xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/sfc/falcon/rx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /****************************************************************************
3*4882a593Smuzhiyun  * Driver for Solarflare network controllers and boards
4*4882a593Smuzhiyun  * Copyright 2005-2006 Fen Systems Ltd.
5*4882a593Smuzhiyun  * Copyright 2005-2013 Solarflare Communications Inc.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/socket.h>
9*4882a593Smuzhiyun #include <linux/in.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/ip.h>
12*4882a593Smuzhiyun #include <linux/ipv6.h>
13*4882a593Smuzhiyun #include <linux/tcp.h>
14*4882a593Smuzhiyun #include <linux/udp.h>
15*4882a593Smuzhiyun #include <linux/prefetch.h>
16*4882a593Smuzhiyun #include <linux/moduleparam.h>
17*4882a593Smuzhiyun #include <linux/iommu.h>
18*4882a593Smuzhiyun #include <net/ip.h>
19*4882a593Smuzhiyun #include <net/checksum.h>
20*4882a593Smuzhiyun #include "net_driver.h"
21*4882a593Smuzhiyun #include "efx.h"
22*4882a593Smuzhiyun #include "filter.h"
23*4882a593Smuzhiyun #include "nic.h"
24*4882a593Smuzhiyun #include "selftest.h"
25*4882a593Smuzhiyun #include "workarounds.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* Preferred number of descriptors to fill at once */
28*4882a593Smuzhiyun #define EF4_RX_PREFERRED_BATCH 8U
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /* Number of RX buffers to recycle pages for.  When creating the RX page recycle
31*4882a593Smuzhiyun  * ring, this number is divided by the number of buffers per page to calculate
32*4882a593Smuzhiyun  * the number of pages to store in the RX page recycle ring.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun #define EF4_RECYCLE_RING_SIZE_IOMMU 4096
35*4882a593Smuzhiyun #define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH)
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /* Size of buffer allocated for skb header area. */
38*4882a593Smuzhiyun #define EF4_SKB_HEADERS  128u
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /* This is the percentage fill level below which new RX descriptors
41*4882a593Smuzhiyun  * will be added to the RX descriptor ring.
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun static unsigned int rx_refill_threshold;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
46*4882a593Smuzhiyun #define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \
47*4882a593Smuzhiyun 				      EF4_RX_USR_BUF_SIZE)
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun  * RX maximum head room required.
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  * This must be at least 1 to prevent overflow, plus one packet-worth
53*4882a593Smuzhiyun  * to allow pipelined receives.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun #define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS)
56*4882a593Smuzhiyun 
ef4_rx_buf_va(struct ef4_rx_buffer * buf)57*4882a593Smuzhiyun static inline u8 *ef4_rx_buf_va(struct ef4_rx_buffer *buf)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	return page_address(buf->page) + buf->page_offset;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
ef4_rx_buf_hash(struct ef4_nic * efx,const u8 * eh)62*4882a593Smuzhiyun static inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
65*4882a593Smuzhiyun 	return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
66*4882a593Smuzhiyun #else
67*4882a593Smuzhiyun 	const u8 *data = eh + efx->rx_packet_hash_offset;
68*4882a593Smuzhiyun 	return (u32)data[0]	  |
69*4882a593Smuzhiyun 	       (u32)data[1] << 8  |
70*4882a593Smuzhiyun 	       (u32)data[2] << 16 |
71*4882a593Smuzhiyun 	       (u32)data[3] << 24;
72*4882a593Smuzhiyun #endif
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun static inline struct ef4_rx_buffer *
ef4_rx_buf_next(struct ef4_rx_queue * rx_queue,struct ef4_rx_buffer * rx_buf)76*4882a593Smuzhiyun ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask)))
79*4882a593Smuzhiyun 		return ef4_rx_buffer(rx_queue, 0);
80*4882a593Smuzhiyun 	else
81*4882a593Smuzhiyun 		return rx_buf + 1;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
ef4_sync_rx_buffer(struct ef4_nic * efx,struct ef4_rx_buffer * rx_buf,unsigned int len)84*4882a593Smuzhiyun static inline void ef4_sync_rx_buffer(struct ef4_nic *efx,
85*4882a593Smuzhiyun 				      struct ef4_rx_buffer *rx_buf,
86*4882a593Smuzhiyun 				      unsigned int len)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
89*4882a593Smuzhiyun 				DMA_FROM_DEVICE);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
ef4_rx_config_page_split(struct ef4_nic * efx)92*4882a593Smuzhiyun void ef4_rx_config_page_split(struct ef4_nic *efx)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
95*4882a593Smuzhiyun 				      EF4_RX_BUF_ALIGNMENT);
96*4882a593Smuzhiyun 	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
97*4882a593Smuzhiyun 		((PAGE_SIZE - sizeof(struct ef4_rx_page_state)) /
98*4882a593Smuzhiyun 		 efx->rx_page_buf_step);
99*4882a593Smuzhiyun 	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
100*4882a593Smuzhiyun 		efx->rx_bufs_per_page;
101*4882a593Smuzhiyun 	efx->rx_pages_per_batch = DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH,
102*4882a593Smuzhiyun 					       efx->rx_bufs_per_page);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /* Check the RX page recycle ring for a page that can be reused. */
ef4_reuse_page(struct ef4_rx_queue * rx_queue)106*4882a593Smuzhiyun static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
109*4882a593Smuzhiyun 	struct page *page;
110*4882a593Smuzhiyun 	struct ef4_rx_page_state *state;
111*4882a593Smuzhiyun 	unsigned index;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	if (unlikely(!rx_queue->page_ring))
114*4882a593Smuzhiyun 		return NULL;
115*4882a593Smuzhiyun 	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
116*4882a593Smuzhiyun 	page = rx_queue->page_ring[index];
117*4882a593Smuzhiyun 	if (page == NULL)
118*4882a593Smuzhiyun 		return NULL;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	rx_queue->page_ring[index] = NULL;
121*4882a593Smuzhiyun 	/* page_remove cannot exceed page_add. */
122*4882a593Smuzhiyun 	if (rx_queue->page_remove != rx_queue->page_add)
123*4882a593Smuzhiyun 		++rx_queue->page_remove;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/* If page_count is 1 then we hold the only reference to this page. */
126*4882a593Smuzhiyun 	if (page_count(page) == 1) {
127*4882a593Smuzhiyun 		++rx_queue->page_recycle_count;
128*4882a593Smuzhiyun 		return page;
129*4882a593Smuzhiyun 	} else {
130*4882a593Smuzhiyun 		state = page_address(page);
131*4882a593Smuzhiyun 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
132*4882a593Smuzhiyun 			       PAGE_SIZE << efx->rx_buffer_order,
133*4882a593Smuzhiyun 			       DMA_FROM_DEVICE);
134*4882a593Smuzhiyun 		put_page(page);
135*4882a593Smuzhiyun 		++rx_queue->page_recycle_failed;
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	return NULL;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun /**
142*4882a593Smuzhiyun  * ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers
143*4882a593Smuzhiyun  *
144*4882a593Smuzhiyun  * @rx_queue:		Efx RX queue
145*4882a593Smuzhiyun  * @atomic:		control memory allocation flags
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * This allocates a batch of pages, maps them for DMA, and populates
148*4882a593Smuzhiyun  * struct ef4_rx_buffers for each one. Return a negative error code or
149*4882a593Smuzhiyun  * 0 on success. If a single page can be used for multiple buffers,
150*4882a593Smuzhiyun  * then the page will either be inserted fully, or not at all.
151*4882a593Smuzhiyun  */
ef4_init_rx_buffers(struct ef4_rx_queue * rx_queue,bool atomic)152*4882a593Smuzhiyun static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
155*4882a593Smuzhiyun 	struct ef4_rx_buffer *rx_buf;
156*4882a593Smuzhiyun 	struct page *page;
157*4882a593Smuzhiyun 	unsigned int page_offset;
158*4882a593Smuzhiyun 	struct ef4_rx_page_state *state;
159*4882a593Smuzhiyun 	dma_addr_t dma_addr;
160*4882a593Smuzhiyun 	unsigned index, count;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	count = 0;
163*4882a593Smuzhiyun 	do {
164*4882a593Smuzhiyun 		page = ef4_reuse_page(rx_queue);
165*4882a593Smuzhiyun 		if (page == NULL) {
166*4882a593Smuzhiyun 			page = alloc_pages(__GFP_COMP |
167*4882a593Smuzhiyun 					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
168*4882a593Smuzhiyun 					   efx->rx_buffer_order);
169*4882a593Smuzhiyun 			if (unlikely(page == NULL))
170*4882a593Smuzhiyun 				return -ENOMEM;
171*4882a593Smuzhiyun 			dma_addr =
172*4882a593Smuzhiyun 				dma_map_page(&efx->pci_dev->dev, page, 0,
173*4882a593Smuzhiyun 					     PAGE_SIZE << efx->rx_buffer_order,
174*4882a593Smuzhiyun 					     DMA_FROM_DEVICE);
175*4882a593Smuzhiyun 			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
176*4882a593Smuzhiyun 						       dma_addr))) {
177*4882a593Smuzhiyun 				__free_pages(page, efx->rx_buffer_order);
178*4882a593Smuzhiyun 				return -EIO;
179*4882a593Smuzhiyun 			}
180*4882a593Smuzhiyun 			state = page_address(page);
181*4882a593Smuzhiyun 			state->dma_addr = dma_addr;
182*4882a593Smuzhiyun 		} else {
183*4882a593Smuzhiyun 			state = page_address(page);
184*4882a593Smuzhiyun 			dma_addr = state->dma_addr;
185*4882a593Smuzhiyun 		}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		dma_addr += sizeof(struct ef4_rx_page_state);
188*4882a593Smuzhiyun 		page_offset = sizeof(struct ef4_rx_page_state);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		do {
191*4882a593Smuzhiyun 			index = rx_queue->added_count & rx_queue->ptr_mask;
192*4882a593Smuzhiyun 			rx_buf = ef4_rx_buffer(rx_queue, index);
193*4882a593Smuzhiyun 			rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
194*4882a593Smuzhiyun 			rx_buf->page = page;
195*4882a593Smuzhiyun 			rx_buf->page_offset = page_offset + efx->rx_ip_align;
196*4882a593Smuzhiyun 			rx_buf->len = efx->rx_dma_len;
197*4882a593Smuzhiyun 			rx_buf->flags = 0;
198*4882a593Smuzhiyun 			++rx_queue->added_count;
199*4882a593Smuzhiyun 			get_page(page);
200*4882a593Smuzhiyun 			dma_addr += efx->rx_page_buf_step;
201*4882a593Smuzhiyun 			page_offset += efx->rx_page_buf_step;
202*4882a593Smuzhiyun 		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE;
205*4882a593Smuzhiyun 	} while (++count < efx->rx_pages_per_batch);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	return 0;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /* Unmap a DMA-mapped page.  This function is only called for the final RX
211*4882a593Smuzhiyun  * buffer in a page.
212*4882a593Smuzhiyun  */
ef4_unmap_rx_buffer(struct ef4_nic * efx,struct ef4_rx_buffer * rx_buf)213*4882a593Smuzhiyun static void ef4_unmap_rx_buffer(struct ef4_nic *efx,
214*4882a593Smuzhiyun 				struct ef4_rx_buffer *rx_buf)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	struct page *page = rx_buf->page;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (page) {
219*4882a593Smuzhiyun 		struct ef4_rx_page_state *state = page_address(page);
220*4882a593Smuzhiyun 		dma_unmap_page(&efx->pci_dev->dev,
221*4882a593Smuzhiyun 			       state->dma_addr,
222*4882a593Smuzhiyun 			       PAGE_SIZE << efx->rx_buffer_order,
223*4882a593Smuzhiyun 			       DMA_FROM_DEVICE);
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
ef4_free_rx_buffers(struct ef4_rx_queue * rx_queue,struct ef4_rx_buffer * rx_buf,unsigned int num_bufs)227*4882a593Smuzhiyun static void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue,
228*4882a593Smuzhiyun 				struct ef4_rx_buffer *rx_buf,
229*4882a593Smuzhiyun 				unsigned int num_bufs)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	do {
232*4882a593Smuzhiyun 		if (rx_buf->page) {
233*4882a593Smuzhiyun 			put_page(rx_buf->page);
234*4882a593Smuzhiyun 			rx_buf->page = NULL;
235*4882a593Smuzhiyun 		}
236*4882a593Smuzhiyun 		rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
237*4882a593Smuzhiyun 	} while (--num_bufs);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /* Attempt to recycle the page if there is an RX recycle ring; the page can
241*4882a593Smuzhiyun  * only be added if this is the final RX buffer, to prevent pages being used in
242*4882a593Smuzhiyun  * the descriptor ring and appearing in the recycle ring simultaneously.
243*4882a593Smuzhiyun  */
ef4_recycle_rx_page(struct ef4_channel * channel,struct ef4_rx_buffer * rx_buf)244*4882a593Smuzhiyun static void ef4_recycle_rx_page(struct ef4_channel *channel,
245*4882a593Smuzhiyun 				struct ef4_rx_buffer *rx_buf)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct page *page = rx_buf->page;
248*4882a593Smuzhiyun 	struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
249*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
250*4882a593Smuzhiyun 	unsigned index;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* Only recycle the page after processing the final buffer. */
253*4882a593Smuzhiyun 	if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE))
254*4882a593Smuzhiyun 		return;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	index = rx_queue->page_add & rx_queue->page_ptr_mask;
257*4882a593Smuzhiyun 	if (rx_queue->page_ring[index] == NULL) {
258*4882a593Smuzhiyun 		unsigned read_index = rx_queue->page_remove &
259*4882a593Smuzhiyun 			rx_queue->page_ptr_mask;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		/* The next slot in the recycle ring is available, but
262*4882a593Smuzhiyun 		 * increment page_remove if the read pointer currently
263*4882a593Smuzhiyun 		 * points here.
264*4882a593Smuzhiyun 		 */
265*4882a593Smuzhiyun 		if (read_index == index)
266*4882a593Smuzhiyun 			++rx_queue->page_remove;
267*4882a593Smuzhiyun 		rx_queue->page_ring[index] = page;
268*4882a593Smuzhiyun 		++rx_queue->page_add;
269*4882a593Smuzhiyun 		return;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 	++rx_queue->page_recycle_full;
272*4882a593Smuzhiyun 	ef4_unmap_rx_buffer(efx, rx_buf);
273*4882a593Smuzhiyun 	put_page(rx_buf->page);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
ef4_fini_rx_buffer(struct ef4_rx_queue * rx_queue,struct ef4_rx_buffer * rx_buf)276*4882a593Smuzhiyun static void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue,
277*4882a593Smuzhiyun 			       struct ef4_rx_buffer *rx_buf)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	/* Release the page reference we hold for the buffer. */
280*4882a593Smuzhiyun 	if (rx_buf->page)
281*4882a593Smuzhiyun 		put_page(rx_buf->page);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/* If this is the last buffer in a page, unmap and free it. */
284*4882a593Smuzhiyun 	if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) {
285*4882a593Smuzhiyun 		ef4_unmap_rx_buffer(rx_queue->efx, rx_buf);
286*4882a593Smuzhiyun 		ef4_free_rx_buffers(rx_queue, rx_buf, 1);
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 	rx_buf->page = NULL;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /* Recycle the pages that are used by buffers that have just been received. */
ef4_recycle_rx_pages(struct ef4_channel * channel,struct ef4_rx_buffer * rx_buf,unsigned int n_frags)292*4882a593Smuzhiyun static void ef4_recycle_rx_pages(struct ef4_channel *channel,
293*4882a593Smuzhiyun 				 struct ef4_rx_buffer *rx_buf,
294*4882a593Smuzhiyun 				 unsigned int n_frags)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	if (unlikely(!rx_queue->page_ring))
299*4882a593Smuzhiyun 		return;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	do {
302*4882a593Smuzhiyun 		ef4_recycle_rx_page(channel, rx_buf);
303*4882a593Smuzhiyun 		rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
304*4882a593Smuzhiyun 	} while (--n_frags);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
ef4_discard_rx_packet(struct ef4_channel * channel,struct ef4_rx_buffer * rx_buf,unsigned int n_frags)307*4882a593Smuzhiyun static void ef4_discard_rx_packet(struct ef4_channel *channel,
308*4882a593Smuzhiyun 				  struct ef4_rx_buffer *rx_buf,
309*4882a593Smuzhiyun 				  unsigned int n_frags)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	ef4_recycle_rx_pages(channel, rx_buf, n_frags);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun /**
319*4882a593Smuzhiyun  * ef4_fast_push_rx_descriptors - push new RX descriptors quickly
320*4882a593Smuzhiyun  * @rx_queue:		RX descriptor queue
321*4882a593Smuzhiyun  *
322*4882a593Smuzhiyun  * This will aim to fill the RX descriptor queue up to
323*4882a593Smuzhiyun  * @rx_queue->@max_fill. If there is insufficient atomic
324*4882a593Smuzhiyun  * memory to do so, a slow fill will be scheduled.
325*4882a593Smuzhiyun  * @atomic: control memory allocation flags
326*4882a593Smuzhiyun  *
327*4882a593Smuzhiyun  * The caller must provide serialisation (none is used here). In practise,
328*4882a593Smuzhiyun  * this means this function must run from the NAPI handler, or be called
329*4882a593Smuzhiyun  * when NAPI is disabled.
330*4882a593Smuzhiyun  */
ef4_fast_push_rx_descriptors(struct ef4_rx_queue * rx_queue,bool atomic)331*4882a593Smuzhiyun void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
334*4882a593Smuzhiyun 	unsigned int fill_level, batch_size;
335*4882a593Smuzhiyun 	int space, rc = 0;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (!rx_queue->refill_enabled)
338*4882a593Smuzhiyun 		return;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	/* Calculate current fill level, and exit if we don't need to fill */
341*4882a593Smuzhiyun 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
342*4882a593Smuzhiyun 	EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
343*4882a593Smuzhiyun 	if (fill_level >= rx_queue->fast_fill_trigger)
344*4882a593Smuzhiyun 		goto out;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	/* Record minimum fill level */
347*4882a593Smuzhiyun 	if (unlikely(fill_level < rx_queue->min_fill)) {
348*4882a593Smuzhiyun 		if (fill_level)
349*4882a593Smuzhiyun 			rx_queue->min_fill = fill_level;
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
353*4882a593Smuzhiyun 	space = rx_queue->max_fill - fill_level;
354*4882a593Smuzhiyun 	EF4_BUG_ON_PARANOID(space < batch_size);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
357*4882a593Smuzhiyun 		   "RX queue %d fast-filling descriptor ring from"
358*4882a593Smuzhiyun 		   " level %d to level %d\n",
359*4882a593Smuzhiyun 		   ef4_rx_queue_index(rx_queue), fill_level,
360*4882a593Smuzhiyun 		   rx_queue->max_fill);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	do {
364*4882a593Smuzhiyun 		rc = ef4_init_rx_buffers(rx_queue, atomic);
365*4882a593Smuzhiyun 		if (unlikely(rc)) {
366*4882a593Smuzhiyun 			/* Ensure that we don't leave the rx queue empty */
367*4882a593Smuzhiyun 			if (rx_queue->added_count == rx_queue->removed_count)
368*4882a593Smuzhiyun 				ef4_schedule_slow_fill(rx_queue);
369*4882a593Smuzhiyun 			goto out;
370*4882a593Smuzhiyun 		}
371*4882a593Smuzhiyun 	} while ((space -= batch_size) >= batch_size);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
374*4882a593Smuzhiyun 		   "RX queue %d fast-filled descriptor ring "
375*4882a593Smuzhiyun 		   "to level %d\n", ef4_rx_queue_index(rx_queue),
376*4882a593Smuzhiyun 		   rx_queue->added_count - rx_queue->removed_count);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun  out:
379*4882a593Smuzhiyun 	if (rx_queue->notified_count != rx_queue->added_count)
380*4882a593Smuzhiyun 		ef4_nic_notify_rx_desc(rx_queue);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
ef4_rx_slow_fill(struct timer_list * t)383*4882a593Smuzhiyun void ef4_rx_slow_fill(struct timer_list *t)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	/* Post an event to cause NAPI to run and refill the queue */
388*4882a593Smuzhiyun 	ef4_nic_generate_fill_event(rx_queue);
389*4882a593Smuzhiyun 	++rx_queue->slow_fill_count;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
ef4_rx_packet__check_len(struct ef4_rx_queue * rx_queue,struct ef4_rx_buffer * rx_buf,int len)392*4882a593Smuzhiyun static void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue,
393*4882a593Smuzhiyun 				     struct ef4_rx_buffer *rx_buf,
394*4882a593Smuzhiyun 				     int len)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
397*4882a593Smuzhiyun 	unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	if (likely(len <= max_len))
400*4882a593Smuzhiyun 		return;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	/* The packet must be discarded, but this is only a fatal error
403*4882a593Smuzhiyun 	 * if the caller indicated it was
404*4882a593Smuzhiyun 	 */
405*4882a593Smuzhiyun 	rx_buf->flags |= EF4_RX_PKT_DISCARD;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) {
408*4882a593Smuzhiyun 		if (net_ratelimit())
409*4882a593Smuzhiyun 			netif_err(efx, rx_err, efx->net_dev,
410*4882a593Smuzhiyun 				  " RX queue %d seriously overlength "
411*4882a593Smuzhiyun 				  "RX event (0x%x > 0x%x+0x%x). Leaking\n",
412*4882a593Smuzhiyun 				  ef4_rx_queue_index(rx_queue), len, max_len,
413*4882a593Smuzhiyun 				  efx->type->rx_buffer_padding);
414*4882a593Smuzhiyun 		ef4_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
415*4882a593Smuzhiyun 	} else {
416*4882a593Smuzhiyun 		if (net_ratelimit())
417*4882a593Smuzhiyun 			netif_err(efx, rx_err, efx->net_dev,
418*4882a593Smuzhiyun 				  " RX queue %d overlength RX event "
419*4882a593Smuzhiyun 				  "(0x%x > 0x%x)\n",
420*4882a593Smuzhiyun 				  ef4_rx_queue_index(rx_queue), len, max_len);
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	ef4_rx_queue_channel(rx_queue)->n_rx_overlength++;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun /* Pass a received packet up through GRO.  GRO can handle pages
427*4882a593Smuzhiyun  * regardless of checksum state and skbs with a good checksum.
428*4882a593Smuzhiyun  */
429*4882a593Smuzhiyun static void
ef4_rx_packet_gro(struct ef4_channel * channel,struct ef4_rx_buffer * rx_buf,unsigned int n_frags,u8 * eh)430*4882a593Smuzhiyun ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
431*4882a593Smuzhiyun 		  unsigned int n_frags, u8 *eh)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct napi_struct *napi = &channel->napi_str;
434*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
435*4882a593Smuzhiyun 	struct sk_buff *skb;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	skb = napi_get_frags(napi);
438*4882a593Smuzhiyun 	if (unlikely(!skb)) {
439*4882a593Smuzhiyun 		struct ef4_rx_queue *rx_queue;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		rx_queue = ef4_channel_get_rx_queue(channel);
442*4882a593Smuzhiyun 		ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
443*4882a593Smuzhiyun 		return;
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	if (efx->net_dev->features & NETIF_F_RXHASH)
447*4882a593Smuzhiyun 		skb_set_hash(skb, ef4_rx_buf_hash(efx, eh),
448*4882a593Smuzhiyun 			     PKT_HASH_TYPE_L3);
449*4882a593Smuzhiyun 	skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ?
450*4882a593Smuzhiyun 			  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	for (;;) {
453*4882a593Smuzhiyun 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
454*4882a593Smuzhiyun 				   rx_buf->page, rx_buf->page_offset,
455*4882a593Smuzhiyun 				   rx_buf->len);
456*4882a593Smuzhiyun 		rx_buf->page = NULL;
457*4882a593Smuzhiyun 		skb->len += rx_buf->len;
458*4882a593Smuzhiyun 		if (skb_shinfo(skb)->nr_frags == n_frags)
459*4882a593Smuzhiyun 			break;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 		rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	skb->data_len = skb->len;
465*4882a593Smuzhiyun 	skb->truesize += n_frags * efx->rx_buffer_truesize;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	napi_gro_frags(napi);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun /* Allocate and construct an SKB around page fragments */
ef4_rx_mk_skb(struct ef4_channel * channel,struct ef4_rx_buffer * rx_buf,unsigned int n_frags,u8 * eh,int hdr_len)473*4882a593Smuzhiyun static struct sk_buff *ef4_rx_mk_skb(struct ef4_channel *channel,
474*4882a593Smuzhiyun 				     struct ef4_rx_buffer *rx_buf,
475*4882a593Smuzhiyun 				     unsigned int n_frags,
476*4882a593Smuzhiyun 				     u8 *eh, int hdr_len)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
479*4882a593Smuzhiyun 	struct sk_buff *skb;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	/* Allocate an SKB to store the headers */
482*4882a593Smuzhiyun 	skb = netdev_alloc_skb(efx->net_dev,
483*4882a593Smuzhiyun 			       efx->rx_ip_align + efx->rx_prefix_size +
484*4882a593Smuzhiyun 			       hdr_len);
485*4882a593Smuzhiyun 	if (unlikely(skb == NULL)) {
486*4882a593Smuzhiyun 		atomic_inc(&efx->n_rx_noskb_drops);
487*4882a593Smuzhiyun 		return NULL;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len);
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
493*4882a593Smuzhiyun 	       efx->rx_prefix_size + hdr_len);
494*4882a593Smuzhiyun 	skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
495*4882a593Smuzhiyun 	__skb_put(skb, hdr_len);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	/* Append the remaining page(s) onto the frag list */
498*4882a593Smuzhiyun 	if (rx_buf->len > hdr_len) {
499*4882a593Smuzhiyun 		rx_buf->page_offset += hdr_len;
500*4882a593Smuzhiyun 		rx_buf->len -= hdr_len;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 		for (;;) {
503*4882a593Smuzhiyun 			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
504*4882a593Smuzhiyun 					   rx_buf->page, rx_buf->page_offset,
505*4882a593Smuzhiyun 					   rx_buf->len);
506*4882a593Smuzhiyun 			rx_buf->page = NULL;
507*4882a593Smuzhiyun 			skb->len += rx_buf->len;
508*4882a593Smuzhiyun 			skb->data_len += rx_buf->len;
509*4882a593Smuzhiyun 			if (skb_shinfo(skb)->nr_frags == n_frags)
510*4882a593Smuzhiyun 				break;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 			rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
513*4882a593Smuzhiyun 		}
514*4882a593Smuzhiyun 	} else {
515*4882a593Smuzhiyun 		__free_pages(rx_buf->page, efx->rx_buffer_order);
516*4882a593Smuzhiyun 		rx_buf->page = NULL;
517*4882a593Smuzhiyun 		n_frags = 0;
518*4882a593Smuzhiyun 	}
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	skb->truesize += n_frags * efx->rx_buffer_truesize;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	/* Move past the ethernet header */
523*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, efx->net_dev);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	skb_mark_napi_id(skb, &channel->napi_str);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	return skb;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun 
ef4_rx_packet(struct ef4_rx_queue * rx_queue,unsigned int index,unsigned int n_frags,unsigned int len,u16 flags)530*4882a593Smuzhiyun void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
531*4882a593Smuzhiyun 		   unsigned int n_frags, unsigned int len, u16 flags)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
534*4882a593Smuzhiyun 	struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
535*4882a593Smuzhiyun 	struct ef4_rx_buffer *rx_buf;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	rx_queue->rx_packets++;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	rx_buf = ef4_rx_buffer(rx_queue, index);
540*4882a593Smuzhiyun 	rx_buf->flags |= flags;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	/* Validate the number of fragments and completed length */
543*4882a593Smuzhiyun 	if (n_frags == 1) {
544*4882a593Smuzhiyun 		if (!(flags & EF4_RX_PKT_PREFIX_LEN))
545*4882a593Smuzhiyun 			ef4_rx_packet__check_len(rx_queue, rx_buf, len);
546*4882a593Smuzhiyun 	} else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) ||
547*4882a593Smuzhiyun 		   unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
548*4882a593Smuzhiyun 		   unlikely(len > n_frags * efx->rx_dma_len) ||
549*4882a593Smuzhiyun 		   unlikely(!efx->rx_scatter)) {
550*4882a593Smuzhiyun 		/* If this isn't an explicit discard request, either
551*4882a593Smuzhiyun 		 * the hardware or the driver is broken.
552*4882a593Smuzhiyun 		 */
553*4882a593Smuzhiyun 		WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD));
554*4882a593Smuzhiyun 		rx_buf->flags |= EF4_RX_PKT_DISCARD;
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	netif_vdbg(efx, rx_status, efx->net_dev,
558*4882a593Smuzhiyun 		   "RX queue %d received ids %x-%x len %d %s%s\n",
559*4882a593Smuzhiyun 		   ef4_rx_queue_index(rx_queue), index,
560*4882a593Smuzhiyun 		   (index + n_frags - 1) & rx_queue->ptr_mask, len,
561*4882a593Smuzhiyun 		   (rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
562*4882a593Smuzhiyun 		   (rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : "");
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	/* Discard packet, if instructed to do so.  Process the
565*4882a593Smuzhiyun 	 * previous receive first.
566*4882a593Smuzhiyun 	 */
567*4882a593Smuzhiyun 	if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) {
568*4882a593Smuzhiyun 		ef4_rx_flush_packet(channel);
569*4882a593Smuzhiyun 		ef4_discard_rx_packet(channel, rx_buf, n_frags);
570*4882a593Smuzhiyun 		return;
571*4882a593Smuzhiyun 	}
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN))
574*4882a593Smuzhiyun 		rx_buf->len = len;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	/* Release and/or sync the DMA mapping - assumes all RX buffers
577*4882a593Smuzhiyun 	 * consumed in-order per RX queue.
578*4882a593Smuzhiyun 	 */
579*4882a593Smuzhiyun 	ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	/* Prefetch nice and early so data will (hopefully) be in cache by
582*4882a593Smuzhiyun 	 * the time we look at it.
583*4882a593Smuzhiyun 	 */
584*4882a593Smuzhiyun 	prefetch(ef4_rx_buf_va(rx_buf));
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	rx_buf->page_offset += efx->rx_prefix_size;
587*4882a593Smuzhiyun 	rx_buf->len -= efx->rx_prefix_size;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	if (n_frags > 1) {
590*4882a593Smuzhiyun 		/* Release/sync DMA mapping for additional fragments.
591*4882a593Smuzhiyun 		 * Fix length for last fragment.
592*4882a593Smuzhiyun 		 */
593*4882a593Smuzhiyun 		unsigned int tail_frags = n_frags - 1;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		for (;;) {
596*4882a593Smuzhiyun 			rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
597*4882a593Smuzhiyun 			if (--tail_frags == 0)
598*4882a593Smuzhiyun 				break;
599*4882a593Smuzhiyun 			ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
600*4882a593Smuzhiyun 		}
601*4882a593Smuzhiyun 		rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
602*4882a593Smuzhiyun 		ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	/* All fragments have been DMA-synced, so recycle pages. */
606*4882a593Smuzhiyun 	rx_buf = ef4_rx_buffer(rx_queue, index);
607*4882a593Smuzhiyun 	ef4_recycle_rx_pages(channel, rx_buf, n_frags);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	/* Pipeline receives so that we give time for packet headers to be
610*4882a593Smuzhiyun 	 * prefetched into cache.
611*4882a593Smuzhiyun 	 */
612*4882a593Smuzhiyun 	ef4_rx_flush_packet(channel);
613*4882a593Smuzhiyun 	channel->rx_pkt_n_frags = n_frags;
614*4882a593Smuzhiyun 	channel->rx_pkt_index = index;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
ef4_rx_deliver(struct ef4_channel * channel,u8 * eh,struct ef4_rx_buffer * rx_buf,unsigned int n_frags)617*4882a593Smuzhiyun static void ef4_rx_deliver(struct ef4_channel *channel, u8 *eh,
618*4882a593Smuzhiyun 			   struct ef4_rx_buffer *rx_buf,
619*4882a593Smuzhiyun 			   unsigned int n_frags)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	struct sk_buff *skb;
622*4882a593Smuzhiyun 	u16 hdr_len = min_t(u16, rx_buf->len, EF4_SKB_HEADERS);
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
625*4882a593Smuzhiyun 	if (unlikely(skb == NULL)) {
626*4882a593Smuzhiyun 		struct ef4_rx_queue *rx_queue;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 		rx_queue = ef4_channel_get_rx_queue(channel);
629*4882a593Smuzhiyun 		ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
630*4882a593Smuzhiyun 		return;
631*4882a593Smuzhiyun 	}
632*4882a593Smuzhiyun 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	/* Set the SKB flags */
635*4882a593Smuzhiyun 	skb_checksum_none_assert(skb);
636*4882a593Smuzhiyun 	if (likely(rx_buf->flags & EF4_RX_PKT_CSUMMED))
637*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_UNNECESSARY;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	if (channel->type->receive_skb)
640*4882a593Smuzhiyun 		if (channel->type->receive_skb(channel, skb))
641*4882a593Smuzhiyun 			return;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	/* Pass the packet up */
644*4882a593Smuzhiyun 	netif_receive_skb(skb);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun /* Handle a received packet.  Second half: Touches packet payload. */
__ef4_rx_packet(struct ef4_channel * channel)648*4882a593Smuzhiyun void __ef4_rx_packet(struct ef4_channel *channel)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
651*4882a593Smuzhiyun 	struct ef4_rx_buffer *rx_buf =
652*4882a593Smuzhiyun 		ef4_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
653*4882a593Smuzhiyun 	u8 *eh = ef4_rx_buf_va(rx_buf);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	/* Read length from the prefix if necessary.  This already
656*4882a593Smuzhiyun 	 * excludes the length of the prefix itself.
657*4882a593Smuzhiyun 	 */
658*4882a593Smuzhiyun 	if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN)
659*4882a593Smuzhiyun 		rx_buf->len = le16_to_cpup((__le16 *)
660*4882a593Smuzhiyun 					   (eh + efx->rx_packet_len_offset));
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	/* If we're in loopback test, then pass the packet directly to the
663*4882a593Smuzhiyun 	 * loopback layer, and free the rx_buf here
664*4882a593Smuzhiyun 	 */
665*4882a593Smuzhiyun 	if (unlikely(efx->loopback_selftest)) {
666*4882a593Smuzhiyun 		struct ef4_rx_queue *rx_queue;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 		ef4_loopback_rx_packet(efx, eh, rx_buf->len);
669*4882a593Smuzhiyun 		rx_queue = ef4_channel_get_rx_queue(channel);
670*4882a593Smuzhiyun 		ef4_free_rx_buffers(rx_queue, rx_buf,
671*4882a593Smuzhiyun 				    channel->rx_pkt_n_frags);
672*4882a593Smuzhiyun 		goto out;
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
676*4882a593Smuzhiyun 		rx_buf->flags &= ~EF4_RX_PKT_CSUMMED;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb)
679*4882a593Smuzhiyun 		ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
680*4882a593Smuzhiyun 	else
681*4882a593Smuzhiyun 		ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
682*4882a593Smuzhiyun out:
683*4882a593Smuzhiyun 	channel->rx_pkt_n_frags = 0;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun 
ef4_probe_rx_queue(struct ef4_rx_queue * rx_queue)686*4882a593Smuzhiyun int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
689*4882a593Smuzhiyun 	unsigned int entries;
690*4882a593Smuzhiyun 	int rc;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	/* Create the smallest power-of-two aligned ring */
693*4882a593Smuzhiyun 	entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE);
694*4882a593Smuzhiyun 	EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
695*4882a593Smuzhiyun 	rx_queue->ptr_mask = entries - 1;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	netif_dbg(efx, probe, efx->net_dev,
698*4882a593Smuzhiyun 		  "creating RX queue %d size %#x mask %#x\n",
699*4882a593Smuzhiyun 		  ef4_rx_queue_index(rx_queue), efx->rxq_entries,
700*4882a593Smuzhiyun 		  rx_queue->ptr_mask);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	/* Allocate RX buffers */
703*4882a593Smuzhiyun 	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
704*4882a593Smuzhiyun 				   GFP_KERNEL);
705*4882a593Smuzhiyun 	if (!rx_queue->buffer)
706*4882a593Smuzhiyun 		return -ENOMEM;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	rc = ef4_nic_probe_rx(rx_queue);
709*4882a593Smuzhiyun 	if (rc) {
710*4882a593Smuzhiyun 		kfree(rx_queue->buffer);
711*4882a593Smuzhiyun 		rx_queue->buffer = NULL;
712*4882a593Smuzhiyun 	}
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	return rc;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun 
ef4_init_rx_recycle_ring(struct ef4_nic * efx,struct ef4_rx_queue * rx_queue)717*4882a593Smuzhiyun static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
718*4882a593Smuzhiyun 				     struct ef4_rx_queue *rx_queue)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun 	unsigned int bufs_in_recycle_ring, page_ring_size;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	/* Set the RX recycle ring size */
723*4882a593Smuzhiyun #ifdef CONFIG_PPC64
724*4882a593Smuzhiyun 	bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
725*4882a593Smuzhiyun #else
726*4882a593Smuzhiyun 	if (iommu_present(&pci_bus_type))
727*4882a593Smuzhiyun 		bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
728*4882a593Smuzhiyun 	else
729*4882a593Smuzhiyun 		bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
730*4882a593Smuzhiyun #endif /* CONFIG_PPC64 */
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
733*4882a593Smuzhiyun 					    efx->rx_bufs_per_page);
734*4882a593Smuzhiyun 	rx_queue->page_ring = kcalloc(page_ring_size,
735*4882a593Smuzhiyun 				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
736*4882a593Smuzhiyun 	if (!rx_queue->page_ring)
737*4882a593Smuzhiyun 		rx_queue->page_ptr_mask = 0;
738*4882a593Smuzhiyun 	else
739*4882a593Smuzhiyun 		rx_queue->page_ptr_mask = page_ring_size - 1;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun 
ef4_init_rx_queue(struct ef4_rx_queue * rx_queue)742*4882a593Smuzhiyun void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
745*4882a593Smuzhiyun 	unsigned int max_fill, trigger, max_trigger;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
748*4882a593Smuzhiyun 		  "initialising RX queue %d\n", ef4_rx_queue_index(rx_queue));
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	/* Initialise ptr fields */
751*4882a593Smuzhiyun 	rx_queue->added_count = 0;
752*4882a593Smuzhiyun 	rx_queue->notified_count = 0;
753*4882a593Smuzhiyun 	rx_queue->removed_count = 0;
754*4882a593Smuzhiyun 	rx_queue->min_fill = -1U;
755*4882a593Smuzhiyun 	ef4_init_rx_recycle_ring(efx, rx_queue);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	rx_queue->page_remove = 0;
758*4882a593Smuzhiyun 	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
759*4882a593Smuzhiyun 	rx_queue->page_recycle_count = 0;
760*4882a593Smuzhiyun 	rx_queue->page_recycle_failed = 0;
761*4882a593Smuzhiyun 	rx_queue->page_recycle_full = 0;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	/* Initialise limit fields */
764*4882a593Smuzhiyun 	max_fill = efx->rxq_entries - EF4_RXD_HEAD_ROOM;
765*4882a593Smuzhiyun 	max_trigger =
766*4882a593Smuzhiyun 		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
767*4882a593Smuzhiyun 	if (rx_refill_threshold != 0) {
768*4882a593Smuzhiyun 		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
769*4882a593Smuzhiyun 		if (trigger > max_trigger)
770*4882a593Smuzhiyun 			trigger = max_trigger;
771*4882a593Smuzhiyun 	} else {
772*4882a593Smuzhiyun 		trigger = max_trigger;
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	rx_queue->max_fill = max_fill;
776*4882a593Smuzhiyun 	rx_queue->fast_fill_trigger = trigger;
777*4882a593Smuzhiyun 	rx_queue->refill_enabled = true;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	/* Set up RX descriptor ring */
780*4882a593Smuzhiyun 	ef4_nic_init_rx(rx_queue);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
ef4_fini_rx_queue(struct ef4_rx_queue * rx_queue)783*4882a593Smuzhiyun void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun 	int i;
786*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
787*4882a593Smuzhiyun 	struct ef4_rx_buffer *rx_buf;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
790*4882a593Smuzhiyun 		  "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	del_timer_sync(&rx_queue->slow_fill);
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	/* Release RX buffers from the current read ptr to the write ptr */
795*4882a593Smuzhiyun 	if (rx_queue->buffer) {
796*4882a593Smuzhiyun 		for (i = rx_queue->removed_count; i < rx_queue->added_count;
797*4882a593Smuzhiyun 		     i++) {
798*4882a593Smuzhiyun 			unsigned index = i & rx_queue->ptr_mask;
799*4882a593Smuzhiyun 			rx_buf = ef4_rx_buffer(rx_queue, index);
800*4882a593Smuzhiyun 			ef4_fini_rx_buffer(rx_queue, rx_buf);
801*4882a593Smuzhiyun 		}
802*4882a593Smuzhiyun 	}
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	/* Unmap and release the pages in the recycle ring. Remove the ring. */
805*4882a593Smuzhiyun 	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
806*4882a593Smuzhiyun 		struct page *page = rx_queue->page_ring[i];
807*4882a593Smuzhiyun 		struct ef4_rx_page_state *state;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 		if (page == NULL)
810*4882a593Smuzhiyun 			continue;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 		state = page_address(page);
813*4882a593Smuzhiyun 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
814*4882a593Smuzhiyun 			       PAGE_SIZE << efx->rx_buffer_order,
815*4882a593Smuzhiyun 			       DMA_FROM_DEVICE);
816*4882a593Smuzhiyun 		put_page(page);
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun 	kfree(rx_queue->page_ring);
819*4882a593Smuzhiyun 	rx_queue->page_ring = NULL;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
ef4_remove_rx_queue(struct ef4_rx_queue * rx_queue)822*4882a593Smuzhiyun void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
825*4882a593Smuzhiyun 		  "destroying RX queue %d\n", ef4_rx_queue_index(rx_queue));
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	ef4_nic_remove_rx(rx_queue);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	kfree(rx_queue->buffer);
830*4882a593Smuzhiyun 	rx_queue->buffer = NULL;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun module_param(rx_refill_threshold, uint, 0444);
835*4882a593Smuzhiyun MODULE_PARM_DESC(rx_refill_threshold,
836*4882a593Smuzhiyun 		 "RX descriptor ring refill threshold (%)");
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun #ifdef CONFIG_RFS_ACCEL
839*4882a593Smuzhiyun 
ef4_filter_rfs(struct net_device * net_dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)840*4882a593Smuzhiyun int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
841*4882a593Smuzhiyun 		   u16 rxq_index, u32 flow_id)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun 	struct ef4_nic *efx = netdev_priv(net_dev);
844*4882a593Smuzhiyun 	struct ef4_channel *channel;
845*4882a593Smuzhiyun 	struct ef4_filter_spec spec;
846*4882a593Smuzhiyun 	struct flow_keys fk;
847*4882a593Smuzhiyun 	int rc;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	if (flow_id == RPS_FLOW_ID_INVALID)
850*4882a593Smuzhiyun 		return -EINVAL;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
853*4882a593Smuzhiyun 		return -EPROTONOSUPPORT;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
856*4882a593Smuzhiyun 		return -EPROTONOSUPPORT;
857*4882a593Smuzhiyun 	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
858*4882a593Smuzhiyun 		return -EPROTONOSUPPORT;
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	ef4_filter_init_rx(&spec, EF4_FILTER_PRI_HINT,
861*4882a593Smuzhiyun 			   efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
862*4882a593Smuzhiyun 			   rxq_index);
863*4882a593Smuzhiyun 	spec.match_flags =
864*4882a593Smuzhiyun 		EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
865*4882a593Smuzhiyun 		EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
866*4882a593Smuzhiyun 		EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT;
867*4882a593Smuzhiyun 	spec.ether_type = fk.basic.n_proto;
868*4882a593Smuzhiyun 	spec.ip_proto = fk.basic.ip_proto;
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	if (fk.basic.n_proto == htons(ETH_P_IP)) {
871*4882a593Smuzhiyun 		spec.rem_host[0] = fk.addrs.v4addrs.src;
872*4882a593Smuzhiyun 		spec.loc_host[0] = fk.addrs.v4addrs.dst;
873*4882a593Smuzhiyun 	} else {
874*4882a593Smuzhiyun 		memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
875*4882a593Smuzhiyun 		memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
876*4882a593Smuzhiyun 	}
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	spec.rem_port = fk.ports.src;
879*4882a593Smuzhiyun 	spec.loc_port = fk.ports.dst;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	rc = efx->type->filter_rfs_insert(efx, &spec);
882*4882a593Smuzhiyun 	if (rc < 0)
883*4882a593Smuzhiyun 		return rc;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	/* Remember this so we can check whether to expire the filter later */
886*4882a593Smuzhiyun 	channel = ef4_get_channel(efx, rxq_index);
887*4882a593Smuzhiyun 	channel->rps_flow_id[rc] = flow_id;
888*4882a593Smuzhiyun 	++channel->rfs_filters_added;
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	if (spec.ether_type == htons(ETH_P_IP))
891*4882a593Smuzhiyun 		netif_info(efx, rx_status, efx->net_dev,
892*4882a593Smuzhiyun 			   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
893*4882a593Smuzhiyun 			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
894*4882a593Smuzhiyun 			   spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
895*4882a593Smuzhiyun 			   ntohs(spec.loc_port), rxq_index, flow_id, rc);
896*4882a593Smuzhiyun 	else
897*4882a593Smuzhiyun 		netif_info(efx, rx_status, efx->net_dev,
898*4882a593Smuzhiyun 			   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
899*4882a593Smuzhiyun 			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
900*4882a593Smuzhiyun 			   spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
901*4882a593Smuzhiyun 			   ntohs(spec.loc_port), rxq_index, flow_id, rc);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	return rc;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun 
__ef4_filter_rfs_expire(struct ef4_nic * efx,unsigned int quota)906*4882a593Smuzhiyun bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun 	bool (*expire_one)(struct ef4_nic *efx, u32 flow_id, unsigned int index);
909*4882a593Smuzhiyun 	unsigned int channel_idx, index, size;
910*4882a593Smuzhiyun 	u32 flow_id;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	if (!spin_trylock_bh(&efx->filter_lock))
913*4882a593Smuzhiyun 		return false;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	expire_one = efx->type->filter_rfs_expire_one;
916*4882a593Smuzhiyun 	channel_idx = efx->rps_expire_channel;
917*4882a593Smuzhiyun 	index = efx->rps_expire_index;
918*4882a593Smuzhiyun 	size = efx->type->max_rx_ip_filters;
919*4882a593Smuzhiyun 	while (quota--) {
920*4882a593Smuzhiyun 		struct ef4_channel *channel = ef4_get_channel(efx, channel_idx);
921*4882a593Smuzhiyun 		flow_id = channel->rps_flow_id[index];
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 		if (flow_id != RPS_FLOW_ID_INVALID &&
924*4882a593Smuzhiyun 		    expire_one(efx, flow_id, index)) {
925*4882a593Smuzhiyun 			netif_info(efx, rx_status, efx->net_dev,
926*4882a593Smuzhiyun 				   "expired filter %d [queue %u flow %u]\n",
927*4882a593Smuzhiyun 				   index, channel_idx, flow_id);
928*4882a593Smuzhiyun 			channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
929*4882a593Smuzhiyun 		}
930*4882a593Smuzhiyun 		if (++index == size) {
931*4882a593Smuzhiyun 			if (++channel_idx == efx->n_channels)
932*4882a593Smuzhiyun 				channel_idx = 0;
933*4882a593Smuzhiyun 			index = 0;
934*4882a593Smuzhiyun 		}
935*4882a593Smuzhiyun 	}
936*4882a593Smuzhiyun 	efx->rps_expire_channel = channel_idx;
937*4882a593Smuzhiyun 	efx->rps_expire_index = index;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	spin_unlock_bh(&efx->filter_lock);
940*4882a593Smuzhiyun 	return true;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun #endif /* CONFIG_RFS_ACCEL */
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun /**
946*4882a593Smuzhiyun  * ef4_filter_is_mc_recipient - test whether spec is a multicast recipient
947*4882a593Smuzhiyun  * @spec: Specification to test
948*4882a593Smuzhiyun  *
949*4882a593Smuzhiyun  * Return: %true if the specification is a non-drop RX filter that
950*4882a593Smuzhiyun  * matches a local MAC address I/G bit value of 1 or matches a local
951*4882a593Smuzhiyun  * IPv4 or IPv6 address value in the respective multicast address
952*4882a593Smuzhiyun  * range.  Otherwise %false.
953*4882a593Smuzhiyun  */
ef4_filter_is_mc_recipient(const struct ef4_filter_spec * spec)954*4882a593Smuzhiyun bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun 	if (!(spec->flags & EF4_FILTER_FLAG_RX) ||
957*4882a593Smuzhiyun 	    spec->dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
958*4882a593Smuzhiyun 		return false;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	if (spec->match_flags &
961*4882a593Smuzhiyun 	    (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG) &&
962*4882a593Smuzhiyun 	    is_multicast_ether_addr(spec->loc_mac))
963*4882a593Smuzhiyun 		return true;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	if ((spec->match_flags &
966*4882a593Smuzhiyun 	     (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) ==
967*4882a593Smuzhiyun 	    (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) {
968*4882a593Smuzhiyun 		if (spec->ether_type == htons(ETH_P_IP) &&
969*4882a593Smuzhiyun 		    ipv4_is_multicast(spec->loc_host[0]))
970*4882a593Smuzhiyun 			return true;
971*4882a593Smuzhiyun 		if (spec->ether_type == htons(ETH_P_IPV6) &&
972*4882a593Smuzhiyun 		    ((const u8 *)spec->loc_host)[0] == 0xff)
973*4882a593Smuzhiyun 			return true;
974*4882a593Smuzhiyun 	}
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	return false;
977*4882a593Smuzhiyun }
978