xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/google/gve/gve_rx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2*4882a593Smuzhiyun /* Google virtual Ethernet (gve) driver
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2015-2019 Google, Inc.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "gve.h"
8*4882a593Smuzhiyun #include "gve_adminq.h"
9*4882a593Smuzhiyun #include <linux/etherdevice.h>
10*4882a593Smuzhiyun 
gve_rx_remove_from_block(struct gve_priv * priv,int queue_idx)11*4882a593Smuzhiyun static void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun 	struct gve_notify_block *block =
14*4882a593Smuzhiyun 			&priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun 	block->rx = NULL;
17*4882a593Smuzhiyun }
18*4882a593Smuzhiyun 
gve_rx_free_ring(struct gve_priv * priv,int idx)19*4882a593Smuzhiyun static void gve_rx_free_ring(struct gve_priv *priv, int idx)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	struct gve_rx_ring *rx = &priv->rx[idx];
22*4882a593Smuzhiyun 	struct device *dev = &priv->pdev->dev;
23*4882a593Smuzhiyun 	size_t bytes;
24*4882a593Smuzhiyun 	u32 slots;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	gve_rx_remove_from_block(priv, idx);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
29*4882a593Smuzhiyun 	dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
30*4882a593Smuzhiyun 	rx->desc.desc_ring = NULL;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	dma_free_coherent(dev, sizeof(*rx->q_resources),
33*4882a593Smuzhiyun 			  rx->q_resources, rx->q_resources_bus);
34*4882a593Smuzhiyun 	rx->q_resources = NULL;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	gve_unassign_qpl(priv, rx->data.qpl->id);
37*4882a593Smuzhiyun 	rx->data.qpl = NULL;
38*4882a593Smuzhiyun 	kvfree(rx->data.page_info);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	slots = rx->mask + 1;
41*4882a593Smuzhiyun 	bytes = sizeof(*rx->data.data_ring) * slots;
42*4882a593Smuzhiyun 	dma_free_coherent(dev, bytes, rx->data.data_ring,
43*4882a593Smuzhiyun 			  rx->data.data_bus);
44*4882a593Smuzhiyun 	rx->data.data_ring = NULL;
45*4882a593Smuzhiyun 	netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
gve_setup_rx_buffer(struct gve_rx_slot_page_info * page_info,struct gve_rx_data_slot * slot,dma_addr_t addr,struct page * page)48*4882a593Smuzhiyun static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
49*4882a593Smuzhiyun 				struct gve_rx_data_slot *slot,
50*4882a593Smuzhiyun 				dma_addr_t addr, struct page *page)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	page_info->page = page;
53*4882a593Smuzhiyun 	page_info->page_offset = 0;
54*4882a593Smuzhiyun 	page_info->page_address = page_address(page);
55*4882a593Smuzhiyun 	slot->qpl_offset = cpu_to_be64(addr);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
gve_prefill_rx_pages(struct gve_rx_ring * rx)58*4882a593Smuzhiyun static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct gve_priv *priv = rx->gve;
61*4882a593Smuzhiyun 	u32 slots;
62*4882a593Smuzhiyun 	int i;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/* Allocate one page per Rx queue slot. Each page is split into two
65*4882a593Smuzhiyun 	 * packet buffers, when possible we "page flip" between the two.
66*4882a593Smuzhiyun 	 */
67*4882a593Smuzhiyun 	slots = rx->mask + 1;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	rx->data.page_info = kvzalloc(slots *
70*4882a593Smuzhiyun 				      sizeof(*rx->data.page_info), GFP_KERNEL);
71*4882a593Smuzhiyun 	if (!rx->data.page_info)
72*4882a593Smuzhiyun 		return -ENOMEM;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	rx->data.qpl = gve_assign_rx_qpl(priv);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	for (i = 0; i < slots; i++) {
77*4882a593Smuzhiyun 		struct page *page = rx->data.qpl->pages[i];
78*4882a593Smuzhiyun 		dma_addr_t addr = i * PAGE_SIZE;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 		gve_setup_rx_buffer(&rx->data.page_info[i],
81*4882a593Smuzhiyun 				    &rx->data.data_ring[i], addr, page);
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	return slots;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
gve_rx_add_to_block(struct gve_priv * priv,int queue_idx)87*4882a593Smuzhiyun static void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
90*4882a593Smuzhiyun 	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
91*4882a593Smuzhiyun 	struct gve_rx_ring *rx = &priv->rx[queue_idx];
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	block->rx = rx;
94*4882a593Smuzhiyun 	rx->ntfy_id = ntfy_idx;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
gve_rx_alloc_ring(struct gve_priv * priv,int idx)97*4882a593Smuzhiyun static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	struct gve_rx_ring *rx = &priv->rx[idx];
100*4882a593Smuzhiyun 	struct device *hdev = &priv->pdev->dev;
101*4882a593Smuzhiyun 	u32 slots, npages;
102*4882a593Smuzhiyun 	int filled_pages;
103*4882a593Smuzhiyun 	size_t bytes;
104*4882a593Smuzhiyun 	int err;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
107*4882a593Smuzhiyun 	/* Make sure everything is zeroed to start with */
108*4882a593Smuzhiyun 	memset(rx, 0, sizeof(*rx));
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	rx->gve = priv;
111*4882a593Smuzhiyun 	rx->q_num = idx;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	slots = priv->rx_pages_per_qpl;
114*4882a593Smuzhiyun 	rx->mask = slots - 1;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	/* alloc rx data ring */
117*4882a593Smuzhiyun 	bytes = sizeof(*rx->data.data_ring) * slots;
118*4882a593Smuzhiyun 	rx->data.data_ring = dma_alloc_coherent(hdev, bytes,
119*4882a593Smuzhiyun 						&rx->data.data_bus,
120*4882a593Smuzhiyun 						GFP_KERNEL);
121*4882a593Smuzhiyun 	if (!rx->data.data_ring)
122*4882a593Smuzhiyun 		return -ENOMEM;
123*4882a593Smuzhiyun 	filled_pages = gve_prefill_rx_pages(rx);
124*4882a593Smuzhiyun 	if (filled_pages < 0) {
125*4882a593Smuzhiyun 		err = -ENOMEM;
126*4882a593Smuzhiyun 		goto abort_with_slots;
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 	rx->fill_cnt = filled_pages;
129*4882a593Smuzhiyun 	/* Ensure data ring slots (packet buffers) are visible. */
130*4882a593Smuzhiyun 	dma_wmb();
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	/* Alloc gve_queue_resources */
133*4882a593Smuzhiyun 	rx->q_resources =
134*4882a593Smuzhiyun 		dma_alloc_coherent(hdev,
135*4882a593Smuzhiyun 				   sizeof(*rx->q_resources),
136*4882a593Smuzhiyun 				   &rx->q_resources_bus,
137*4882a593Smuzhiyun 				   GFP_KERNEL);
138*4882a593Smuzhiyun 	if (!rx->q_resources) {
139*4882a593Smuzhiyun 		err = -ENOMEM;
140*4882a593Smuzhiyun 		goto abort_filled;
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun 	netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx,
143*4882a593Smuzhiyun 		  (unsigned long)rx->data.data_bus);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* alloc rx desc ring */
146*4882a593Smuzhiyun 	bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
147*4882a593Smuzhiyun 	npages = bytes / PAGE_SIZE;
148*4882a593Smuzhiyun 	if (npages * PAGE_SIZE != bytes) {
149*4882a593Smuzhiyun 		err = -EIO;
150*4882a593Smuzhiyun 		goto abort_with_q_resources;
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
154*4882a593Smuzhiyun 						GFP_KERNEL);
155*4882a593Smuzhiyun 	if (!rx->desc.desc_ring) {
156*4882a593Smuzhiyun 		err = -ENOMEM;
157*4882a593Smuzhiyun 		goto abort_with_q_resources;
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 	rx->mask = slots - 1;
160*4882a593Smuzhiyun 	rx->cnt = 0;
161*4882a593Smuzhiyun 	rx->desc.seqno = 1;
162*4882a593Smuzhiyun 	gve_rx_add_to_block(priv, idx);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	return 0;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun abort_with_q_resources:
167*4882a593Smuzhiyun 	dma_free_coherent(hdev, sizeof(*rx->q_resources),
168*4882a593Smuzhiyun 			  rx->q_resources, rx->q_resources_bus);
169*4882a593Smuzhiyun 	rx->q_resources = NULL;
170*4882a593Smuzhiyun abort_filled:
171*4882a593Smuzhiyun 	kvfree(rx->data.page_info);
172*4882a593Smuzhiyun abort_with_slots:
173*4882a593Smuzhiyun 	bytes = sizeof(*rx->data.data_ring) * slots;
174*4882a593Smuzhiyun 	dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
175*4882a593Smuzhiyun 	rx->data.data_ring = NULL;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	return err;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
gve_rx_alloc_rings(struct gve_priv * priv)180*4882a593Smuzhiyun int gve_rx_alloc_rings(struct gve_priv *priv)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	int err = 0;
183*4882a593Smuzhiyun 	int i;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	for (i = 0; i < priv->rx_cfg.num_queues; i++) {
186*4882a593Smuzhiyun 		err = gve_rx_alloc_ring(priv, i);
187*4882a593Smuzhiyun 		if (err) {
188*4882a593Smuzhiyun 			netif_err(priv, drv, priv->dev,
189*4882a593Smuzhiyun 				  "Failed to alloc rx ring=%d: err=%d\n",
190*4882a593Smuzhiyun 				  i, err);
191*4882a593Smuzhiyun 			break;
192*4882a593Smuzhiyun 		}
193*4882a593Smuzhiyun 	}
194*4882a593Smuzhiyun 	/* Unallocate if there was an error */
195*4882a593Smuzhiyun 	if (err) {
196*4882a593Smuzhiyun 		int j;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		for (j = 0; j < i; j++)
199*4882a593Smuzhiyun 			gve_rx_free_ring(priv, j);
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 	return err;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
gve_rx_free_rings(struct gve_priv * priv)204*4882a593Smuzhiyun void gve_rx_free_rings(struct gve_priv *priv)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	int i;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	for (i = 0; i < priv->rx_cfg.num_queues; i++)
209*4882a593Smuzhiyun 		gve_rx_free_ring(priv, i);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
gve_rx_write_doorbell(struct gve_priv * priv,struct gve_rx_ring * rx)212*4882a593Smuzhiyun void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	u32 db_idx = be32_to_cpu(rx->q_resources->db_index);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
gve_rss_type(__be16 pkt_flags)219*4882a593Smuzhiyun static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	if (likely(pkt_flags & (GVE_RXF_TCP | GVE_RXF_UDP)))
222*4882a593Smuzhiyun 		return PKT_HASH_TYPE_L4;
223*4882a593Smuzhiyun 	if (pkt_flags & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
224*4882a593Smuzhiyun 		return PKT_HASH_TYPE_L3;
225*4882a593Smuzhiyun 	return PKT_HASH_TYPE_L2;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
gve_rx_copy(struct gve_rx_ring * rx,struct net_device * dev,struct napi_struct * napi,struct gve_rx_slot_page_info * page_info,u16 len)228*4882a593Smuzhiyun static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx,
229*4882a593Smuzhiyun 				   struct net_device *dev,
230*4882a593Smuzhiyun 				   struct napi_struct *napi,
231*4882a593Smuzhiyun 				   struct gve_rx_slot_page_info *page_info,
232*4882a593Smuzhiyun 				   u16 len)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	struct sk_buff *skb = napi_alloc_skb(napi, len);
235*4882a593Smuzhiyun 	void *va = page_info->page_address + GVE_RX_PAD +
236*4882a593Smuzhiyun 		   page_info->page_offset;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (unlikely(!skb))
239*4882a593Smuzhiyun 		return NULL;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	__skb_put(skb, len);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	skb_copy_to_linear_data(skb, va, len);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, dev);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	u64_stats_update_begin(&rx->statss);
248*4882a593Smuzhiyun 	rx->rx_copied_pkt++;
249*4882a593Smuzhiyun 	u64_stats_update_end(&rx->statss);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	return skb;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
gve_rx_add_frags(struct net_device * dev,struct napi_struct * napi,struct gve_rx_slot_page_info * page_info,u16 len)254*4882a593Smuzhiyun static struct sk_buff *gve_rx_add_frags(struct net_device *dev,
255*4882a593Smuzhiyun 					struct napi_struct *napi,
256*4882a593Smuzhiyun 					struct gve_rx_slot_page_info *page_info,
257*4882a593Smuzhiyun 					u16 len)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	struct sk_buff *skb = napi_get_frags(napi);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	if (unlikely(!skb))
262*4882a593Smuzhiyun 		return NULL;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	skb_add_rx_frag(skb, 0, page_info->page,
265*4882a593Smuzhiyun 			page_info->page_offset +
266*4882a593Smuzhiyun 			GVE_RX_PAD, len, PAGE_SIZE / 2);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return skb;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
gve_rx_flip_buff(struct gve_rx_slot_page_info * page_info,struct gve_rx_data_slot * data_ring)271*4882a593Smuzhiyun static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info,
272*4882a593Smuzhiyun 			     struct gve_rx_data_slot *data_ring)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	u64 addr = be64_to_cpu(data_ring->qpl_offset);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	page_info->page_offset ^= PAGE_SIZE / 2;
277*4882a593Smuzhiyun 	addr ^= PAGE_SIZE / 2;
278*4882a593Smuzhiyun 	data_ring->qpl_offset = cpu_to_be64(addr);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
gve_rx(struct gve_rx_ring * rx,struct gve_rx_desc * rx_desc,netdev_features_t feat,u32 idx)281*4882a593Smuzhiyun static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
282*4882a593Smuzhiyun 		   netdev_features_t feat, u32 idx)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	struct gve_rx_slot_page_info *page_info;
285*4882a593Smuzhiyun 	struct gve_priv *priv = rx->gve;
286*4882a593Smuzhiyun 	struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
287*4882a593Smuzhiyun 	struct net_device *dev = priv->dev;
288*4882a593Smuzhiyun 	struct sk_buff *skb;
289*4882a593Smuzhiyun 	int pagecount;
290*4882a593Smuzhiyun 	u16 len;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/* drop this packet */
293*4882a593Smuzhiyun 	if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) {
294*4882a593Smuzhiyun 		u64_stats_update_begin(&rx->statss);
295*4882a593Smuzhiyun 		rx->rx_desc_err_dropped_pkt++;
296*4882a593Smuzhiyun 		u64_stats_update_end(&rx->statss);
297*4882a593Smuzhiyun 		return true;
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
301*4882a593Smuzhiyun 	page_info = &rx->data.page_info[idx];
302*4882a593Smuzhiyun 	dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
303*4882a593Smuzhiyun 				PAGE_SIZE, DMA_FROM_DEVICE);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	/* gvnic can only receive into registered segments. If the buffer
306*4882a593Smuzhiyun 	 * can't be recycled, our only choice is to copy the data out of
307*4882a593Smuzhiyun 	 * it so that we can return it to the device.
308*4882a593Smuzhiyun 	 */
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	if (PAGE_SIZE == 4096) {
311*4882a593Smuzhiyun 		if (len <= priv->rx_copybreak) {
312*4882a593Smuzhiyun 			/* Just copy small packets */
313*4882a593Smuzhiyun 			skb = gve_rx_copy(rx, dev, napi, page_info, len);
314*4882a593Smuzhiyun 			u64_stats_update_begin(&rx->statss);
315*4882a593Smuzhiyun 			rx->rx_copybreak_pkt++;
316*4882a593Smuzhiyun 			u64_stats_update_end(&rx->statss);
317*4882a593Smuzhiyun 			goto have_skb;
318*4882a593Smuzhiyun 		}
319*4882a593Smuzhiyun 		if (unlikely(!gve_can_recycle_pages(dev))) {
320*4882a593Smuzhiyun 			skb = gve_rx_copy(rx, dev, napi, page_info, len);
321*4882a593Smuzhiyun 			goto have_skb;
322*4882a593Smuzhiyun 		}
323*4882a593Smuzhiyun 		pagecount = page_count(page_info->page);
324*4882a593Smuzhiyun 		if (pagecount == 1) {
325*4882a593Smuzhiyun 			/* No part of this page is used by any SKBs; we attach
326*4882a593Smuzhiyun 			 * the page fragment to a new SKB and pass it up the
327*4882a593Smuzhiyun 			 * stack.
328*4882a593Smuzhiyun 			 */
329*4882a593Smuzhiyun 			skb = gve_rx_add_frags(dev, napi, page_info, len);
330*4882a593Smuzhiyun 			if (!skb) {
331*4882a593Smuzhiyun 				u64_stats_update_begin(&rx->statss);
332*4882a593Smuzhiyun 				rx->rx_skb_alloc_fail++;
333*4882a593Smuzhiyun 				u64_stats_update_end(&rx->statss);
334*4882a593Smuzhiyun 				return true;
335*4882a593Smuzhiyun 			}
336*4882a593Smuzhiyun 			/* Make sure the kernel stack can't release the page */
337*4882a593Smuzhiyun 			get_page(page_info->page);
338*4882a593Smuzhiyun 			/* "flip" to other packet buffer on this page */
339*4882a593Smuzhiyun 			gve_rx_flip_buff(page_info, &rx->data.data_ring[idx]);
340*4882a593Smuzhiyun 		} else if (pagecount >= 2) {
341*4882a593Smuzhiyun 			/* We have previously passed the other half of this
342*4882a593Smuzhiyun 			 * page up the stack, but it has not yet been freed.
343*4882a593Smuzhiyun 			 */
344*4882a593Smuzhiyun 			skb = gve_rx_copy(rx, dev, napi, page_info, len);
345*4882a593Smuzhiyun 		} else {
346*4882a593Smuzhiyun 			WARN(pagecount < 1, "Pagecount should never be < 1");
347*4882a593Smuzhiyun 			return false;
348*4882a593Smuzhiyun 		}
349*4882a593Smuzhiyun 	} else {
350*4882a593Smuzhiyun 		skb = gve_rx_copy(rx, dev, napi, page_info, len);
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun have_skb:
354*4882a593Smuzhiyun 	/* We didn't manage to allocate an skb but we haven't had any
355*4882a593Smuzhiyun 	 * reset worthy failures.
356*4882a593Smuzhiyun 	 */
357*4882a593Smuzhiyun 	if (!skb) {
358*4882a593Smuzhiyun 		u64_stats_update_begin(&rx->statss);
359*4882a593Smuzhiyun 		rx->rx_skb_alloc_fail++;
360*4882a593Smuzhiyun 		u64_stats_update_end(&rx->statss);
361*4882a593Smuzhiyun 		return true;
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	if (likely(feat & NETIF_F_RXCSUM)) {
365*4882a593Smuzhiyun 		/* NIC passes up the partial sum */
366*4882a593Smuzhiyun 		if (rx_desc->csum)
367*4882a593Smuzhiyun 			skb->ip_summed = CHECKSUM_COMPLETE;
368*4882a593Smuzhiyun 		else
369*4882a593Smuzhiyun 			skb->ip_summed = CHECKSUM_NONE;
370*4882a593Smuzhiyun 		skb->csum = csum_unfold(rx_desc->csum);
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/* parse flags & pass relevant info up */
374*4882a593Smuzhiyun 	if (likely(feat & NETIF_F_RXHASH) &&
375*4882a593Smuzhiyun 	    gve_needs_rss(rx_desc->flags_seq))
376*4882a593Smuzhiyun 		skb_set_hash(skb, be32_to_cpu(rx_desc->rss_hash),
377*4882a593Smuzhiyun 			     gve_rss_type(rx_desc->flags_seq));
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	if (skb_is_nonlinear(skb))
380*4882a593Smuzhiyun 		napi_gro_frags(napi);
381*4882a593Smuzhiyun 	else
382*4882a593Smuzhiyun 		napi_gro_receive(napi, skb);
383*4882a593Smuzhiyun 	return true;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
gve_rx_work_pending(struct gve_rx_ring * rx)386*4882a593Smuzhiyun static bool gve_rx_work_pending(struct gve_rx_ring *rx)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	struct gve_rx_desc *desc;
389*4882a593Smuzhiyun 	__be16 flags_seq;
390*4882a593Smuzhiyun 	u32 next_idx;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	next_idx = rx->cnt & rx->mask;
393*4882a593Smuzhiyun 	desc = rx->desc.desc_ring + next_idx;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	flags_seq = desc->flags_seq;
396*4882a593Smuzhiyun 	/* Make sure we have synchronized the seq no with the device */
397*4882a593Smuzhiyun 	smp_rmb();
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	return (GVE_SEQNO(flags_seq) == rx->desc.seqno);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
gve_clean_rx_done(struct gve_rx_ring * rx,int budget,netdev_features_t feat)402*4882a593Smuzhiyun bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
403*4882a593Smuzhiyun 		       netdev_features_t feat)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	struct gve_priv *priv = rx->gve;
406*4882a593Smuzhiyun 	struct gve_rx_desc *desc;
407*4882a593Smuzhiyun 	u32 cnt = rx->cnt;
408*4882a593Smuzhiyun 	u32 idx = cnt & rx->mask;
409*4882a593Smuzhiyun 	u32 work_done = 0;
410*4882a593Smuzhiyun 	u64 bytes = 0;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	desc = rx->desc.desc_ring + idx;
413*4882a593Smuzhiyun 	while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
414*4882a593Smuzhiyun 	       work_done < budget) {
415*4882a593Smuzhiyun 		netif_info(priv, rx_status, priv->dev,
416*4882a593Smuzhiyun 			   "[%d] idx=%d desc=%p desc->flags_seq=0x%x\n",
417*4882a593Smuzhiyun 			   rx->q_num, idx, desc, desc->flags_seq);
418*4882a593Smuzhiyun 		netif_info(priv, rx_status, priv->dev,
419*4882a593Smuzhiyun 			   "[%d] seqno=%d rx->desc.seqno=%d\n",
420*4882a593Smuzhiyun 			   rx->q_num, GVE_SEQNO(desc->flags_seq),
421*4882a593Smuzhiyun 			   rx->desc.seqno);
422*4882a593Smuzhiyun 		bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
423*4882a593Smuzhiyun 		if (!gve_rx(rx, desc, feat, idx))
424*4882a593Smuzhiyun 			gve_schedule_reset(priv);
425*4882a593Smuzhiyun 		cnt++;
426*4882a593Smuzhiyun 		idx = cnt & rx->mask;
427*4882a593Smuzhiyun 		desc = rx->desc.desc_ring + idx;
428*4882a593Smuzhiyun 		rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
429*4882a593Smuzhiyun 		work_done++;
430*4882a593Smuzhiyun 	}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (!work_done)
433*4882a593Smuzhiyun 		return false;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	u64_stats_update_begin(&rx->statss);
436*4882a593Smuzhiyun 	rx->rpackets += work_done;
437*4882a593Smuzhiyun 	rx->rbytes += bytes;
438*4882a593Smuzhiyun 	u64_stats_update_end(&rx->statss);
439*4882a593Smuzhiyun 	rx->cnt = cnt;
440*4882a593Smuzhiyun 	rx->fill_cnt += work_done;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	gve_rx_write_doorbell(priv, rx);
443*4882a593Smuzhiyun 	return gve_rx_work_pending(rx);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
gve_rx_poll(struct gve_notify_block * block,int budget)446*4882a593Smuzhiyun bool gve_rx_poll(struct gve_notify_block *block, int budget)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	struct gve_rx_ring *rx = block->rx;
449*4882a593Smuzhiyun 	netdev_features_t feat;
450*4882a593Smuzhiyun 	bool repoll = false;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	feat = block->napi.dev->features;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	/* If budget is 0, do all the work */
455*4882a593Smuzhiyun 	if (budget == 0)
456*4882a593Smuzhiyun 		budget = INT_MAX;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	if (budget > 0)
459*4882a593Smuzhiyun 		repoll |= gve_clean_rx_done(rx, budget, feat);
460*4882a593Smuzhiyun 	else
461*4882a593Smuzhiyun 		repoll |= gve_rx_work_pending(rx);
462*4882a593Smuzhiyun 	return repoll;
463*4882a593Smuzhiyun }
464