xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/sfc/falcon/farch.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /****************************************************************************
3*4882a593Smuzhiyun  * Driver for Solarflare network controllers and boards
4*4882a593Smuzhiyun  * Copyright 2005-2006 Fen Systems Ltd.
5*4882a593Smuzhiyun  * Copyright 2006-2013 Solarflare Communications Inc.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/bitops.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/interrupt.h>
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/seq_file.h>
14*4882a593Smuzhiyun #include <linux/crc32.h>
15*4882a593Smuzhiyun #include "net_driver.h"
16*4882a593Smuzhiyun #include "bitfield.h"
17*4882a593Smuzhiyun #include "efx.h"
18*4882a593Smuzhiyun #include "nic.h"
19*4882a593Smuzhiyun #include "farch_regs.h"
20*4882a593Smuzhiyun #include "io.h"
21*4882a593Smuzhiyun #include "workarounds.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /* Falcon-architecture (SFC4000) support */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /**************************************************************************
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * Configurable values
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  **************************************************************************
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /* This is set to 16 for a good reason.  In summary, if larger than
33*4882a593Smuzhiyun  * 16, the descriptor cache holds more than a default socket
34*4882a593Smuzhiyun  * buffer's worth of packets (for UDP we can only have at most one
35*4882a593Smuzhiyun  * socket buffer's worth outstanding).  This combined with the fact
36*4882a593Smuzhiyun  * that we only get 1 TX event per descriptor cache means the NIC
37*4882a593Smuzhiyun  * goes idle.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #define TX_DC_ENTRIES 16
40*4882a593Smuzhiyun #define TX_DC_ENTRIES_ORDER 1
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define RX_DC_ENTRIES 64
43*4882a593Smuzhiyun #define RX_DC_ENTRIES_ORDER 3
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /* If EF4_MAX_INT_ERRORS internal errors occur within
46*4882a593Smuzhiyun  * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
47*4882a593Smuzhiyun  * disable it.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun #define EF4_INT_ERROR_EXPIRE 3600
50*4882a593Smuzhiyun #define EF4_MAX_INT_ERRORS 5
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /* Depth of RX flush request fifo */
53*4882a593Smuzhiyun #define EF4_RX_FLUSH_COUNT 4
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /* Driver generated events */
56*4882a593Smuzhiyun #define _EF4_CHANNEL_MAGIC_TEST		0x000101
57*4882a593Smuzhiyun #define _EF4_CHANNEL_MAGIC_FILL		0x000102
58*4882a593Smuzhiyun #define _EF4_CHANNEL_MAGIC_RX_DRAIN	0x000103
59*4882a593Smuzhiyun #define _EF4_CHANNEL_MAGIC_TX_DRAIN	0x000104
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define _EF4_CHANNEL_MAGIC(_code, _data)	((_code) << 8 | (_data))
62*4882a593Smuzhiyun #define _EF4_CHANNEL_MAGIC_CODE(_magic)		((_magic) >> 8)
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define EF4_CHANNEL_MAGIC_TEST(_channel)				\
65*4882a593Smuzhiyun 	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TEST, (_channel)->channel)
66*4882a593Smuzhiyun #define EF4_CHANNEL_MAGIC_FILL(_rx_queue)				\
67*4882a593Smuzhiyun 	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_FILL,			\
68*4882a593Smuzhiyun 			   ef4_rx_queue_index(_rx_queue))
69*4882a593Smuzhiyun #define EF4_CHANNEL_MAGIC_RX_DRAIN(_rx_queue)				\
70*4882a593Smuzhiyun 	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_RX_DRAIN,			\
71*4882a593Smuzhiyun 			   ef4_rx_queue_index(_rx_queue))
72*4882a593Smuzhiyun #define EF4_CHANNEL_MAGIC_TX_DRAIN(_tx_queue)				\
73*4882a593Smuzhiyun 	_EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TX_DRAIN,			\
74*4882a593Smuzhiyun 			   (_tx_queue)->queue)
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /**************************************************************************
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * Hardware access
81*4882a593Smuzhiyun  *
82*4882a593Smuzhiyun  **************************************************************************/
83*4882a593Smuzhiyun 
ef4_write_buf_tbl(struct ef4_nic * efx,ef4_qword_t * value,unsigned int index)84*4882a593Smuzhiyun static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value,
85*4882a593Smuzhiyun 				     unsigned int index)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	ef4_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
88*4882a593Smuzhiyun 			value, index);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
ef4_masked_compare_oword(const ef4_oword_t * a,const ef4_oword_t * b,const ef4_oword_t * mask)91*4882a593Smuzhiyun static bool ef4_masked_compare_oword(const ef4_oword_t *a, const ef4_oword_t *b,
92*4882a593Smuzhiyun 				     const ef4_oword_t *mask)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
95*4882a593Smuzhiyun 		((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
ef4_farch_test_registers(struct ef4_nic * efx,const struct ef4_farch_register_test * regs,size_t n_regs)98*4882a593Smuzhiyun int ef4_farch_test_registers(struct ef4_nic *efx,
99*4882a593Smuzhiyun 			     const struct ef4_farch_register_test *regs,
100*4882a593Smuzhiyun 			     size_t n_regs)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	unsigned address = 0;
103*4882a593Smuzhiyun 	int i, j;
104*4882a593Smuzhiyun 	ef4_oword_t mask, imask, original, reg, buf;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	for (i = 0; i < n_regs; ++i) {
107*4882a593Smuzhiyun 		address = regs[i].address;
108*4882a593Smuzhiyun 		mask = imask = regs[i].mask;
109*4882a593Smuzhiyun 		EF4_INVERT_OWORD(imask);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 		ef4_reado(efx, &original, address);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 		/* bit sweep on and off */
114*4882a593Smuzhiyun 		for (j = 0; j < 128; j++) {
115*4882a593Smuzhiyun 			if (!EF4_EXTRACT_OWORD32(mask, j, j))
116*4882a593Smuzhiyun 				continue;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 			/* Test this testable bit can be set in isolation */
119*4882a593Smuzhiyun 			EF4_AND_OWORD(reg, original, mask);
120*4882a593Smuzhiyun 			EF4_SET_OWORD32(reg, j, j, 1);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 			ef4_writeo(efx, &reg, address);
123*4882a593Smuzhiyun 			ef4_reado(efx, &buf, address);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 			if (ef4_masked_compare_oword(&reg, &buf, &mask))
126*4882a593Smuzhiyun 				goto fail;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 			/* Test this testable bit can be cleared in isolation */
129*4882a593Smuzhiyun 			EF4_OR_OWORD(reg, original, mask);
130*4882a593Smuzhiyun 			EF4_SET_OWORD32(reg, j, j, 0);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 			ef4_writeo(efx, &reg, address);
133*4882a593Smuzhiyun 			ef4_reado(efx, &buf, address);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 			if (ef4_masked_compare_oword(&reg, &buf, &mask))
136*4882a593Smuzhiyun 				goto fail;
137*4882a593Smuzhiyun 		}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 		ef4_writeo(efx, &original, address);
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	return 0;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun fail:
145*4882a593Smuzhiyun 	netif_err(efx, hw, efx->net_dev,
146*4882a593Smuzhiyun 		  "wrote "EF4_OWORD_FMT" read "EF4_OWORD_FMT
147*4882a593Smuzhiyun 		  " at address 0x%x mask "EF4_OWORD_FMT"\n", EF4_OWORD_VAL(reg),
148*4882a593Smuzhiyun 		  EF4_OWORD_VAL(buf), address, EF4_OWORD_VAL(mask));
149*4882a593Smuzhiyun 	return -EIO;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /**************************************************************************
153*4882a593Smuzhiyun  *
154*4882a593Smuzhiyun  * Special buffer handling
155*4882a593Smuzhiyun  * Special buffers are used for event queues and the TX and RX
156*4882a593Smuzhiyun  * descriptor rings.
157*4882a593Smuzhiyun  *
158*4882a593Smuzhiyun  *************************************************************************/
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun  * Initialise a special buffer
162*4882a593Smuzhiyun  *
163*4882a593Smuzhiyun  * This will define a buffer (previously allocated via
164*4882a593Smuzhiyun  * ef4_alloc_special_buffer()) in the buffer table, allowing
165*4882a593Smuzhiyun  * it to be used for event queues, descriptor rings etc.
166*4882a593Smuzhiyun  */
167*4882a593Smuzhiyun static void
ef4_init_special_buffer(struct ef4_nic * efx,struct ef4_special_buffer * buffer)168*4882a593Smuzhiyun ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	ef4_qword_t buf_desc;
171*4882a593Smuzhiyun 	unsigned int index;
172*4882a593Smuzhiyun 	dma_addr_t dma_addr;
173*4882a593Smuzhiyun 	int i;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	EF4_BUG_ON_PARANOID(!buffer->buf.addr);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/* Write buffer descriptors to NIC */
178*4882a593Smuzhiyun 	for (i = 0; i < buffer->entries; i++) {
179*4882a593Smuzhiyun 		index = buffer->index + i;
180*4882a593Smuzhiyun 		dma_addr = buffer->buf.dma_addr + (i * EF4_BUF_SIZE);
181*4882a593Smuzhiyun 		netif_dbg(efx, probe, efx->net_dev,
182*4882a593Smuzhiyun 			  "mapping special buffer %d at %llx\n",
183*4882a593Smuzhiyun 			  index, (unsigned long long)dma_addr);
184*4882a593Smuzhiyun 		EF4_POPULATE_QWORD_3(buf_desc,
185*4882a593Smuzhiyun 				     FRF_AZ_BUF_ADR_REGION, 0,
186*4882a593Smuzhiyun 				     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
187*4882a593Smuzhiyun 				     FRF_AZ_BUF_OWNER_ID_FBUF, 0);
188*4882a593Smuzhiyun 		ef4_write_buf_tbl(efx, &buf_desc, index);
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /* Unmaps a buffer and clears the buffer table entries */
193*4882a593Smuzhiyun static void
ef4_fini_special_buffer(struct ef4_nic * efx,struct ef4_special_buffer * buffer)194*4882a593Smuzhiyun ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	ef4_oword_t buf_tbl_upd;
197*4882a593Smuzhiyun 	unsigned int start = buffer->index;
198*4882a593Smuzhiyun 	unsigned int end = (buffer->index + buffer->entries - 1);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	if (!buffer->entries)
201*4882a593Smuzhiyun 		return;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
204*4882a593Smuzhiyun 		  buffer->index, buffer->index + buffer->entries - 1);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_4(buf_tbl_upd,
207*4882a593Smuzhiyun 			     FRF_AZ_BUF_UPD_CMD, 0,
208*4882a593Smuzhiyun 			     FRF_AZ_BUF_CLR_CMD, 1,
209*4882a593Smuzhiyun 			     FRF_AZ_BUF_CLR_END_ID, end,
210*4882a593Smuzhiyun 			     FRF_AZ_BUF_CLR_START_ID, start);
211*4882a593Smuzhiyun 	ef4_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun  * Allocate a new special buffer
216*4882a593Smuzhiyun  *
217*4882a593Smuzhiyun  * This allocates memory for a new buffer, clears it and allocates a
218*4882a593Smuzhiyun  * new buffer ID range.  It does not write into the buffer table.
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * This call will allocate 4KB buffers, since 8KB buffers can't be
221*4882a593Smuzhiyun  * used for event queues and descriptor rings.
222*4882a593Smuzhiyun  */
ef4_alloc_special_buffer(struct ef4_nic * efx,struct ef4_special_buffer * buffer,unsigned int len)223*4882a593Smuzhiyun static int ef4_alloc_special_buffer(struct ef4_nic *efx,
224*4882a593Smuzhiyun 				    struct ef4_special_buffer *buffer,
225*4882a593Smuzhiyun 				    unsigned int len)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	len = ALIGN(len, EF4_BUF_SIZE);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	if (ef4_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
230*4882a593Smuzhiyun 		return -ENOMEM;
231*4882a593Smuzhiyun 	buffer->entries = len / EF4_BUF_SIZE;
232*4882a593Smuzhiyun 	BUG_ON(buffer->buf.dma_addr & (EF4_BUF_SIZE - 1));
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/* Select new buffer ID */
235*4882a593Smuzhiyun 	buffer->index = efx->next_buffer_table;
236*4882a593Smuzhiyun 	efx->next_buffer_table += buffer->entries;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	netif_dbg(efx, probe, efx->net_dev,
239*4882a593Smuzhiyun 		  "allocating special buffers %d-%d at %llx+%x "
240*4882a593Smuzhiyun 		  "(virt %p phys %llx)\n", buffer->index,
241*4882a593Smuzhiyun 		  buffer->index + buffer->entries - 1,
242*4882a593Smuzhiyun 		  (u64)buffer->buf.dma_addr, len,
243*4882a593Smuzhiyun 		  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	return 0;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun static void
ef4_free_special_buffer(struct ef4_nic * efx,struct ef4_special_buffer * buffer)249*4882a593Smuzhiyun ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	if (!buffer->buf.addr)
252*4882a593Smuzhiyun 		return;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	netif_dbg(efx, hw, efx->net_dev,
255*4882a593Smuzhiyun 		  "deallocating special buffers %d-%d at %llx+%x "
256*4882a593Smuzhiyun 		  "(virt %p phys %llx)\n", buffer->index,
257*4882a593Smuzhiyun 		  buffer->index + buffer->entries - 1,
258*4882a593Smuzhiyun 		  (u64)buffer->buf.dma_addr, buffer->buf.len,
259*4882a593Smuzhiyun 		  buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	ef4_nic_free_buffer(efx, &buffer->buf);
262*4882a593Smuzhiyun 	buffer->entries = 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /**************************************************************************
266*4882a593Smuzhiyun  *
267*4882a593Smuzhiyun  * TX path
268*4882a593Smuzhiyun  *
269*4882a593Smuzhiyun  **************************************************************************/
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
ef4_farch_notify_tx_desc(struct ef4_tx_queue * tx_queue)272*4882a593Smuzhiyun static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	unsigned write_ptr;
275*4882a593Smuzhiyun 	ef4_dword_t reg;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
278*4882a593Smuzhiyun 	EF4_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
279*4882a593Smuzhiyun 	ef4_writed_page(tx_queue->efx, &reg,
280*4882a593Smuzhiyun 			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun /* Write pointer and first descriptor for TX descriptor ring */
ef4_farch_push_tx_desc(struct ef4_tx_queue * tx_queue,const ef4_qword_t * txd)284*4882a593Smuzhiyun static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue,
285*4882a593Smuzhiyun 					  const ef4_qword_t *txd)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	unsigned write_ptr;
288*4882a593Smuzhiyun 	ef4_oword_t reg;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
291*4882a593Smuzhiyun 	BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
294*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
295*4882a593Smuzhiyun 			     FRF_AZ_TX_DESC_WPTR, write_ptr);
296*4882a593Smuzhiyun 	reg.qword[0] = *txd;
297*4882a593Smuzhiyun 	ef4_writeo_page(tx_queue->efx, &reg,
298*4882a593Smuzhiyun 			FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /* For each entry inserted into the software descriptor ring, create a
303*4882a593Smuzhiyun  * descriptor in the hardware TX descriptor ring (in host memory), and
304*4882a593Smuzhiyun  * write a doorbell.
305*4882a593Smuzhiyun  */
ef4_farch_tx_write(struct ef4_tx_queue * tx_queue)306*4882a593Smuzhiyun void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	struct ef4_tx_buffer *buffer;
309*4882a593Smuzhiyun 	ef4_qword_t *txd;
310*4882a593Smuzhiyun 	unsigned write_ptr;
311*4882a593Smuzhiyun 	unsigned old_write_count = tx_queue->write_count;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	tx_queue->xmit_more_available = false;
314*4882a593Smuzhiyun 	if (unlikely(tx_queue->write_count == tx_queue->insert_count))
315*4882a593Smuzhiyun 		return;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	do {
318*4882a593Smuzhiyun 		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
319*4882a593Smuzhiyun 		buffer = &tx_queue->buffer[write_ptr];
320*4882a593Smuzhiyun 		txd = ef4_tx_desc(tx_queue, write_ptr);
321*4882a593Smuzhiyun 		++tx_queue->write_count;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 		EF4_BUG_ON_PARANOID(buffer->flags & EF4_TX_BUF_OPTION);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 		/* Create TX descriptor ring entry */
326*4882a593Smuzhiyun 		BUILD_BUG_ON(EF4_TX_BUF_CONT != 1);
327*4882a593Smuzhiyun 		EF4_POPULATE_QWORD_4(*txd,
328*4882a593Smuzhiyun 				     FSF_AZ_TX_KER_CONT,
329*4882a593Smuzhiyun 				     buffer->flags & EF4_TX_BUF_CONT,
330*4882a593Smuzhiyun 				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
331*4882a593Smuzhiyun 				     FSF_AZ_TX_KER_BUF_REGION, 0,
332*4882a593Smuzhiyun 				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
333*4882a593Smuzhiyun 	} while (tx_queue->write_count != tx_queue->insert_count);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	wmb(); /* Ensure descriptors are written before they are fetched */
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (ef4_nic_may_push_tx_desc(tx_queue, old_write_count)) {
338*4882a593Smuzhiyun 		txd = ef4_tx_desc(tx_queue,
339*4882a593Smuzhiyun 				  old_write_count & tx_queue->ptr_mask);
340*4882a593Smuzhiyun 		ef4_farch_push_tx_desc(tx_queue, txd);
341*4882a593Smuzhiyun 		++tx_queue->pushes;
342*4882a593Smuzhiyun 	} else {
343*4882a593Smuzhiyun 		ef4_farch_notify_tx_desc(tx_queue);
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
ef4_farch_tx_limit_len(struct ef4_tx_queue * tx_queue,dma_addr_t dma_addr,unsigned int len)347*4882a593Smuzhiyun unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
348*4882a593Smuzhiyun 				    dma_addr_t dma_addr, unsigned int len)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	/* Don't cross 4K boundaries with descriptors. */
351*4882a593Smuzhiyun 	unsigned int limit = (~dma_addr & (EF4_PAGE_SIZE - 1)) + 1;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	len = min(limit, len);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (EF4_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf))
356*4882a593Smuzhiyun 		len = min_t(unsigned int, len, 512 - (dma_addr & 0xf));
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	return len;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun /* Allocate hardware resources for a TX queue */
ef4_farch_tx_probe(struct ef4_tx_queue * tx_queue)363*4882a593Smuzhiyun int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	struct ef4_nic *efx = tx_queue->efx;
366*4882a593Smuzhiyun 	unsigned entries;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	entries = tx_queue->ptr_mask + 1;
369*4882a593Smuzhiyun 	return ef4_alloc_special_buffer(efx, &tx_queue->txd,
370*4882a593Smuzhiyun 					entries * sizeof(ef4_qword_t));
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
ef4_farch_tx_init(struct ef4_tx_queue * tx_queue)373*4882a593Smuzhiyun void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	struct ef4_nic *efx = tx_queue->efx;
376*4882a593Smuzhiyun 	ef4_oword_t reg;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	/* Pin TX descriptor ring */
379*4882a593Smuzhiyun 	ef4_init_special_buffer(efx, &tx_queue->txd);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	/* Push TX descriptor ring to card */
382*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_10(reg,
383*4882a593Smuzhiyun 			      FRF_AZ_TX_DESCQ_EN, 1,
384*4882a593Smuzhiyun 			      FRF_AZ_TX_ISCSI_DDIG_EN, 0,
385*4882a593Smuzhiyun 			      FRF_AZ_TX_ISCSI_HDIG_EN, 0,
386*4882a593Smuzhiyun 			      FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
387*4882a593Smuzhiyun 			      FRF_AZ_TX_DESCQ_EVQ_ID,
388*4882a593Smuzhiyun 			      tx_queue->channel->channel,
389*4882a593Smuzhiyun 			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
390*4882a593Smuzhiyun 			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
391*4882a593Smuzhiyun 			      FRF_AZ_TX_DESCQ_SIZE,
392*4882a593Smuzhiyun 			      __ffs(tx_queue->txd.entries),
393*4882a593Smuzhiyun 			      FRF_AZ_TX_DESCQ_TYPE, 0,
394*4882a593Smuzhiyun 			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
397*4882a593Smuzhiyun 		int csum = tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD;
398*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
399*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
400*4882a593Smuzhiyun 				    !csum);
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	ef4_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
404*4882a593Smuzhiyun 			 tx_queue->queue);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
407*4882a593Smuzhiyun 		/* Only 128 bits in this register */
408*4882a593Smuzhiyun 		BUILD_BUG_ON(EF4_MAX_TX_QUEUES > 128);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 		ef4_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
411*4882a593Smuzhiyun 		if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
412*4882a593Smuzhiyun 			__clear_bit_le(tx_queue->queue, &reg);
413*4882a593Smuzhiyun 		else
414*4882a593Smuzhiyun 			__set_bit_le(tx_queue->queue, &reg);
415*4882a593Smuzhiyun 		ef4_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
419*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_1(reg,
420*4882a593Smuzhiyun 				     FRF_BZ_TX_PACE,
421*4882a593Smuzhiyun 				     (tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
422*4882a593Smuzhiyun 				     FFE_BZ_TX_PACE_OFF :
423*4882a593Smuzhiyun 				     FFE_BZ_TX_PACE_RESERVED);
424*4882a593Smuzhiyun 		ef4_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
425*4882a593Smuzhiyun 				 tx_queue->queue);
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
ef4_farch_flush_tx_queue(struct ef4_tx_queue * tx_queue)429*4882a593Smuzhiyun static void ef4_farch_flush_tx_queue(struct ef4_tx_queue *tx_queue)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	struct ef4_nic *efx = tx_queue->efx;
432*4882a593Smuzhiyun 	ef4_oword_t tx_flush_descq;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	WARN_ON(atomic_read(&tx_queue->flush_outstanding));
435*4882a593Smuzhiyun 	atomic_set(&tx_queue->flush_outstanding, 1);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(tx_flush_descq,
438*4882a593Smuzhiyun 			     FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
439*4882a593Smuzhiyun 			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
440*4882a593Smuzhiyun 	ef4_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
ef4_farch_tx_fini(struct ef4_tx_queue * tx_queue)443*4882a593Smuzhiyun void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	struct ef4_nic *efx = tx_queue->efx;
446*4882a593Smuzhiyun 	ef4_oword_t tx_desc_ptr;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	/* Remove TX descriptor ring from card */
449*4882a593Smuzhiyun 	EF4_ZERO_OWORD(tx_desc_ptr);
450*4882a593Smuzhiyun 	ef4_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
451*4882a593Smuzhiyun 			 tx_queue->queue);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/* Unpin TX descriptor ring */
454*4882a593Smuzhiyun 	ef4_fini_special_buffer(efx, &tx_queue->txd);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun /* Free buffers backing TX queue */
ef4_farch_tx_remove(struct ef4_tx_queue * tx_queue)458*4882a593Smuzhiyun void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun /**************************************************************************
464*4882a593Smuzhiyun  *
465*4882a593Smuzhiyun  * RX path
466*4882a593Smuzhiyun  *
467*4882a593Smuzhiyun  **************************************************************************/
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun /* This creates an entry in the RX descriptor queue */
470*4882a593Smuzhiyun static inline void
ef4_farch_build_rx_desc(struct ef4_rx_queue * rx_queue,unsigned index)471*4882a593Smuzhiyun ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	struct ef4_rx_buffer *rx_buf;
474*4882a593Smuzhiyun 	ef4_qword_t *rxd;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	rxd = ef4_rx_desc(rx_queue, index);
477*4882a593Smuzhiyun 	rx_buf = ef4_rx_buffer(rx_queue, index);
478*4882a593Smuzhiyun 	EF4_POPULATE_QWORD_3(*rxd,
479*4882a593Smuzhiyun 			     FSF_AZ_RX_KER_BUF_SIZE,
480*4882a593Smuzhiyun 			     rx_buf->len -
481*4882a593Smuzhiyun 			     rx_queue->efx->type->rx_buffer_padding,
482*4882a593Smuzhiyun 			     FSF_AZ_RX_KER_BUF_REGION, 0,
483*4882a593Smuzhiyun 			     FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun /* This writes to the RX_DESC_WPTR register for the specified receive
487*4882a593Smuzhiyun  * descriptor ring.
488*4882a593Smuzhiyun  */
ef4_farch_rx_write(struct ef4_rx_queue * rx_queue)489*4882a593Smuzhiyun void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
492*4882a593Smuzhiyun 	ef4_dword_t reg;
493*4882a593Smuzhiyun 	unsigned write_ptr;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	while (rx_queue->notified_count != rx_queue->added_count) {
496*4882a593Smuzhiyun 		ef4_farch_build_rx_desc(
497*4882a593Smuzhiyun 			rx_queue,
498*4882a593Smuzhiyun 			rx_queue->notified_count & rx_queue->ptr_mask);
499*4882a593Smuzhiyun 		++rx_queue->notified_count;
500*4882a593Smuzhiyun 	}
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	wmb();
503*4882a593Smuzhiyun 	write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
504*4882a593Smuzhiyun 	EF4_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
505*4882a593Smuzhiyun 	ef4_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
506*4882a593Smuzhiyun 			ef4_rx_queue_index(rx_queue));
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
ef4_farch_rx_probe(struct ef4_rx_queue * rx_queue)509*4882a593Smuzhiyun int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
512*4882a593Smuzhiyun 	unsigned entries;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	entries = rx_queue->ptr_mask + 1;
515*4882a593Smuzhiyun 	return ef4_alloc_special_buffer(efx, &rx_queue->rxd,
516*4882a593Smuzhiyun 					entries * sizeof(ef4_qword_t));
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
ef4_farch_rx_init(struct ef4_rx_queue * rx_queue)519*4882a593Smuzhiyun void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	ef4_oword_t rx_desc_ptr;
522*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
523*4882a593Smuzhiyun 	bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0;
524*4882a593Smuzhiyun 	bool iscsi_digest_en = is_b0;
525*4882a593Smuzhiyun 	bool jumbo_en;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	/* For kernel-mode queues in Falcon A1, the JUMBO flag enables
528*4882a593Smuzhiyun 	 * DMA to continue after a PCIe page boundary (and scattering
529*4882a593Smuzhiyun 	 * is not possible).  In Falcon B0 and Siena, it enables
530*4882a593Smuzhiyun 	 * scatter.
531*4882a593Smuzhiyun 	 */
532*4882a593Smuzhiyun 	jumbo_en = !is_b0 || efx->rx_scatter;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	netif_dbg(efx, hw, efx->net_dev,
535*4882a593Smuzhiyun 		  "RX queue %d ring in special buffers %d-%d\n",
536*4882a593Smuzhiyun 		  ef4_rx_queue_index(rx_queue), rx_queue->rxd.index,
537*4882a593Smuzhiyun 		  rx_queue->rxd.index + rx_queue->rxd.entries - 1);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	rx_queue->scatter_n = 0;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	/* Pin RX descriptor ring */
542*4882a593Smuzhiyun 	ef4_init_special_buffer(efx, &rx_queue->rxd);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	/* Push RX descriptor ring to card */
545*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_10(rx_desc_ptr,
546*4882a593Smuzhiyun 			      FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
547*4882a593Smuzhiyun 			      FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
548*4882a593Smuzhiyun 			      FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
549*4882a593Smuzhiyun 			      FRF_AZ_RX_DESCQ_EVQ_ID,
550*4882a593Smuzhiyun 			      ef4_rx_queue_channel(rx_queue)->channel,
551*4882a593Smuzhiyun 			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
552*4882a593Smuzhiyun 			      FRF_AZ_RX_DESCQ_LABEL,
553*4882a593Smuzhiyun 			      ef4_rx_queue_index(rx_queue),
554*4882a593Smuzhiyun 			      FRF_AZ_RX_DESCQ_SIZE,
555*4882a593Smuzhiyun 			      __ffs(rx_queue->rxd.entries),
556*4882a593Smuzhiyun 			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
557*4882a593Smuzhiyun 			      FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
558*4882a593Smuzhiyun 			      FRF_AZ_RX_DESCQ_EN, 1);
559*4882a593Smuzhiyun 	ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
560*4882a593Smuzhiyun 			 ef4_rx_queue_index(rx_queue));
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun 
ef4_farch_flush_rx_queue(struct ef4_rx_queue * rx_queue)563*4882a593Smuzhiyun static void ef4_farch_flush_rx_queue(struct ef4_rx_queue *rx_queue)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
566*4882a593Smuzhiyun 	ef4_oword_t rx_flush_descq;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(rx_flush_descq,
569*4882a593Smuzhiyun 			     FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
570*4882a593Smuzhiyun 			     FRF_AZ_RX_FLUSH_DESCQ,
571*4882a593Smuzhiyun 			     ef4_rx_queue_index(rx_queue));
572*4882a593Smuzhiyun 	ef4_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
ef4_farch_rx_fini(struct ef4_rx_queue * rx_queue)575*4882a593Smuzhiyun void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	ef4_oword_t rx_desc_ptr;
578*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	/* Remove RX descriptor ring from card */
581*4882a593Smuzhiyun 	EF4_ZERO_OWORD(rx_desc_ptr);
582*4882a593Smuzhiyun 	ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
583*4882a593Smuzhiyun 			 ef4_rx_queue_index(rx_queue));
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	/* Unpin RX descriptor ring */
586*4882a593Smuzhiyun 	ef4_fini_special_buffer(efx, &rx_queue->rxd);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun /* Free buffers backing RX queue */
ef4_farch_rx_remove(struct ef4_rx_queue * rx_queue)590*4882a593Smuzhiyun void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	ef4_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun /**************************************************************************
596*4882a593Smuzhiyun  *
597*4882a593Smuzhiyun  * Flush handling
598*4882a593Smuzhiyun  *
599*4882a593Smuzhiyun  **************************************************************************/
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun /* ef4_farch_flush_queues() must be woken up when all flushes are completed,
602*4882a593Smuzhiyun  * or more RX flushes can be kicked off.
603*4882a593Smuzhiyun  */
ef4_farch_flush_wake(struct ef4_nic * efx)604*4882a593Smuzhiyun static bool ef4_farch_flush_wake(struct ef4_nic *efx)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	/* Ensure that all updates are visible to ef4_farch_flush_queues() */
607*4882a593Smuzhiyun 	smp_mb();
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	return (atomic_read(&efx->active_queues) == 0 ||
610*4882a593Smuzhiyun 		(atomic_read(&efx->rxq_flush_outstanding) < EF4_RX_FLUSH_COUNT
611*4882a593Smuzhiyun 		 && atomic_read(&efx->rxq_flush_pending) > 0));
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun 
ef4_check_tx_flush_complete(struct ef4_nic * efx)614*4882a593Smuzhiyun static bool ef4_check_tx_flush_complete(struct ef4_nic *efx)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun 	bool i = true;
617*4882a593Smuzhiyun 	ef4_oword_t txd_ptr_tbl;
618*4882a593Smuzhiyun 	struct ef4_channel *channel;
619*4882a593Smuzhiyun 	struct ef4_tx_queue *tx_queue;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	ef4_for_each_channel(channel, efx) {
622*4882a593Smuzhiyun 		ef4_for_each_channel_tx_queue(tx_queue, channel) {
623*4882a593Smuzhiyun 			ef4_reado_table(efx, &txd_ptr_tbl,
624*4882a593Smuzhiyun 					FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
625*4882a593Smuzhiyun 			if (EF4_OWORD_FIELD(txd_ptr_tbl,
626*4882a593Smuzhiyun 					    FRF_AZ_TX_DESCQ_FLUSH) ||
627*4882a593Smuzhiyun 			    EF4_OWORD_FIELD(txd_ptr_tbl,
628*4882a593Smuzhiyun 					    FRF_AZ_TX_DESCQ_EN)) {
629*4882a593Smuzhiyun 				netif_dbg(efx, hw, efx->net_dev,
630*4882a593Smuzhiyun 					  "flush did not complete on TXQ %d\n",
631*4882a593Smuzhiyun 					  tx_queue->queue);
632*4882a593Smuzhiyun 				i = false;
633*4882a593Smuzhiyun 			} else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
634*4882a593Smuzhiyun 						  1, 0)) {
635*4882a593Smuzhiyun 				/* The flush is complete, but we didn't
636*4882a593Smuzhiyun 				 * receive a flush completion event
637*4882a593Smuzhiyun 				 */
638*4882a593Smuzhiyun 				netif_dbg(efx, hw, efx->net_dev,
639*4882a593Smuzhiyun 					  "flush complete on TXQ %d, so drain "
640*4882a593Smuzhiyun 					  "the queue\n", tx_queue->queue);
641*4882a593Smuzhiyun 				/* Don't need to increment active_queues as it
642*4882a593Smuzhiyun 				 * has already been incremented for the queues
643*4882a593Smuzhiyun 				 * which did not drain
644*4882a593Smuzhiyun 				 */
645*4882a593Smuzhiyun 				ef4_farch_magic_event(channel,
646*4882a593Smuzhiyun 						      EF4_CHANNEL_MAGIC_TX_DRAIN(
647*4882a593Smuzhiyun 							      tx_queue));
648*4882a593Smuzhiyun 			}
649*4882a593Smuzhiyun 		}
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	return i;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun /* Flush all the transmit queues, and continue flushing receive queues until
656*4882a593Smuzhiyun  * they're all flushed. Wait for the DRAIN events to be received so that there
657*4882a593Smuzhiyun  * are no more RX and TX events left on any channel. */
ef4_farch_do_flush(struct ef4_nic * efx)658*4882a593Smuzhiyun static int ef4_farch_do_flush(struct ef4_nic *efx)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun 	unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
661*4882a593Smuzhiyun 	struct ef4_channel *channel;
662*4882a593Smuzhiyun 	struct ef4_rx_queue *rx_queue;
663*4882a593Smuzhiyun 	struct ef4_tx_queue *tx_queue;
664*4882a593Smuzhiyun 	int rc = 0;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	ef4_for_each_channel(channel, efx) {
667*4882a593Smuzhiyun 		ef4_for_each_channel_tx_queue(tx_queue, channel) {
668*4882a593Smuzhiyun 			ef4_farch_flush_tx_queue(tx_queue);
669*4882a593Smuzhiyun 		}
670*4882a593Smuzhiyun 		ef4_for_each_channel_rx_queue(rx_queue, channel) {
671*4882a593Smuzhiyun 			rx_queue->flush_pending = true;
672*4882a593Smuzhiyun 			atomic_inc(&efx->rxq_flush_pending);
673*4882a593Smuzhiyun 		}
674*4882a593Smuzhiyun 	}
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	while (timeout && atomic_read(&efx->active_queues) > 0) {
677*4882a593Smuzhiyun 		/* The hardware supports four concurrent rx flushes, each of
678*4882a593Smuzhiyun 		 * which may need to be retried if there is an outstanding
679*4882a593Smuzhiyun 		 * descriptor fetch
680*4882a593Smuzhiyun 		 */
681*4882a593Smuzhiyun 		ef4_for_each_channel(channel, efx) {
682*4882a593Smuzhiyun 			ef4_for_each_channel_rx_queue(rx_queue, channel) {
683*4882a593Smuzhiyun 				if (atomic_read(&efx->rxq_flush_outstanding) >=
684*4882a593Smuzhiyun 				    EF4_RX_FLUSH_COUNT)
685*4882a593Smuzhiyun 					break;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 				if (rx_queue->flush_pending) {
688*4882a593Smuzhiyun 					rx_queue->flush_pending = false;
689*4882a593Smuzhiyun 					atomic_dec(&efx->rxq_flush_pending);
690*4882a593Smuzhiyun 					atomic_inc(&efx->rxq_flush_outstanding);
691*4882a593Smuzhiyun 					ef4_farch_flush_rx_queue(rx_queue);
692*4882a593Smuzhiyun 				}
693*4882a593Smuzhiyun 			}
694*4882a593Smuzhiyun 		}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 		timeout = wait_event_timeout(efx->flush_wq,
697*4882a593Smuzhiyun 					     ef4_farch_flush_wake(efx),
698*4882a593Smuzhiyun 					     timeout);
699*4882a593Smuzhiyun 	}
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	if (atomic_read(&efx->active_queues) &&
702*4882a593Smuzhiyun 	    !ef4_check_tx_flush_complete(efx)) {
703*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
704*4882a593Smuzhiyun 			  "(rx %d+%d)\n", atomic_read(&efx->active_queues),
705*4882a593Smuzhiyun 			  atomic_read(&efx->rxq_flush_outstanding),
706*4882a593Smuzhiyun 			  atomic_read(&efx->rxq_flush_pending));
707*4882a593Smuzhiyun 		rc = -ETIMEDOUT;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 		atomic_set(&efx->active_queues, 0);
710*4882a593Smuzhiyun 		atomic_set(&efx->rxq_flush_pending, 0);
711*4882a593Smuzhiyun 		atomic_set(&efx->rxq_flush_outstanding, 0);
712*4882a593Smuzhiyun 	}
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	return rc;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun 
ef4_farch_fini_dmaq(struct ef4_nic * efx)717*4882a593Smuzhiyun int ef4_farch_fini_dmaq(struct ef4_nic *efx)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	struct ef4_channel *channel;
720*4882a593Smuzhiyun 	struct ef4_tx_queue *tx_queue;
721*4882a593Smuzhiyun 	struct ef4_rx_queue *rx_queue;
722*4882a593Smuzhiyun 	int rc = 0;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	/* Do not attempt to write to the NIC during EEH recovery */
725*4882a593Smuzhiyun 	if (efx->state != STATE_RECOVERY) {
726*4882a593Smuzhiyun 		/* Only perform flush if DMA is enabled */
727*4882a593Smuzhiyun 		if (efx->pci_dev->is_busmaster) {
728*4882a593Smuzhiyun 			efx->type->prepare_flush(efx);
729*4882a593Smuzhiyun 			rc = ef4_farch_do_flush(efx);
730*4882a593Smuzhiyun 			efx->type->finish_flush(efx);
731*4882a593Smuzhiyun 		}
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 		ef4_for_each_channel(channel, efx) {
734*4882a593Smuzhiyun 			ef4_for_each_channel_rx_queue(rx_queue, channel)
735*4882a593Smuzhiyun 				ef4_farch_rx_fini(rx_queue);
736*4882a593Smuzhiyun 			ef4_for_each_channel_tx_queue(tx_queue, channel)
737*4882a593Smuzhiyun 				ef4_farch_tx_fini(tx_queue);
738*4882a593Smuzhiyun 		}
739*4882a593Smuzhiyun 	}
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	return rc;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun /* Reset queue and flush accounting after FLR
745*4882a593Smuzhiyun  *
746*4882a593Smuzhiyun  * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
747*4882a593Smuzhiyun  * mastering was disabled), in which case we don't receive (RXQ) flush
748*4882a593Smuzhiyun  * completion events.  This means that efx->rxq_flush_outstanding remained at 4
749*4882a593Smuzhiyun  * after the FLR; also, efx->active_queues was non-zero (as no flush completion
750*4882a593Smuzhiyun  * events were received, and we didn't go through ef4_check_tx_flush_complete())
751*4882a593Smuzhiyun  * If we don't fix this up, on the next call to ef4_realloc_channels() we won't
752*4882a593Smuzhiyun  * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
753*4882a593Smuzhiyun  * for batched flush requests; and the efx->active_queues gets messed up because
754*4882a593Smuzhiyun  * we keep incrementing for the newly initialised queues, but it never went to
755*4882a593Smuzhiyun  * zero previously.  Then we get a timeout every time we try to restart the
756*4882a593Smuzhiyun  * queues, as it doesn't go back to zero when we should be flushing the queues.
757*4882a593Smuzhiyun  */
ef4_farch_finish_flr(struct ef4_nic * efx)758*4882a593Smuzhiyun void ef4_farch_finish_flr(struct ef4_nic *efx)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun 	atomic_set(&efx->rxq_flush_pending, 0);
761*4882a593Smuzhiyun 	atomic_set(&efx->rxq_flush_outstanding, 0);
762*4882a593Smuzhiyun 	atomic_set(&efx->active_queues, 0);
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun /**************************************************************************
767*4882a593Smuzhiyun  *
768*4882a593Smuzhiyun  * Event queue processing
769*4882a593Smuzhiyun  * Event queues are processed by per-channel tasklets.
770*4882a593Smuzhiyun  *
771*4882a593Smuzhiyun  **************************************************************************/
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun /* Update a channel's event queue's read pointer (RPTR) register
774*4882a593Smuzhiyun  *
775*4882a593Smuzhiyun  * This writes the EVQ_RPTR_REG register for the specified channel's
776*4882a593Smuzhiyun  * event queue.
777*4882a593Smuzhiyun  */
ef4_farch_ev_read_ack(struct ef4_channel * channel)778*4882a593Smuzhiyun void ef4_farch_ev_read_ack(struct ef4_channel *channel)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	ef4_dword_t reg;
781*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	EF4_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
784*4882a593Smuzhiyun 			     channel->eventq_read_ptr & channel->eventq_mask);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	/* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
787*4882a593Smuzhiyun 	 * of 4 bytes, but it is really 16 bytes just like later revisions.
788*4882a593Smuzhiyun 	 */
789*4882a593Smuzhiyun 	ef4_writed(efx, &reg,
790*4882a593Smuzhiyun 		   efx->type->evq_rptr_tbl_base +
791*4882a593Smuzhiyun 		   FR_BZ_EVQ_RPTR_STEP * channel->channel);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun /* Use HW to insert a SW defined event */
ef4_farch_generate_event(struct ef4_nic * efx,unsigned int evq,ef4_qword_t * event)795*4882a593Smuzhiyun void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
796*4882a593Smuzhiyun 			      ef4_qword_t *event)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun 	ef4_oword_t drv_ev_reg;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
801*4882a593Smuzhiyun 		     FRF_AZ_DRV_EV_DATA_WIDTH != 64);
802*4882a593Smuzhiyun 	drv_ev_reg.u32[0] = event->u32[0];
803*4882a593Smuzhiyun 	drv_ev_reg.u32[1] = event->u32[1];
804*4882a593Smuzhiyun 	drv_ev_reg.u32[2] = 0;
805*4882a593Smuzhiyun 	drv_ev_reg.u32[3] = 0;
806*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
807*4882a593Smuzhiyun 	ef4_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
ef4_farch_magic_event(struct ef4_channel * channel,u32 magic)810*4882a593Smuzhiyun static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun 	ef4_qword_t event;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	EF4_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
815*4882a593Smuzhiyun 			     FSE_AZ_EV_CODE_DRV_GEN_EV,
816*4882a593Smuzhiyun 			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
817*4882a593Smuzhiyun 	ef4_farch_generate_event(channel->efx, channel->channel, &event);
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun /* Handle a transmit completion event
821*4882a593Smuzhiyun  *
822*4882a593Smuzhiyun  * The NIC batches TX completion events; the message we receive is of
823*4882a593Smuzhiyun  * the form "complete all TX events up to this index".
824*4882a593Smuzhiyun  */
825*4882a593Smuzhiyun static int
ef4_farch_handle_tx_event(struct ef4_channel * channel,ef4_qword_t * event)826*4882a593Smuzhiyun ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun 	unsigned int tx_ev_desc_ptr;
829*4882a593Smuzhiyun 	unsigned int tx_ev_q_label;
830*4882a593Smuzhiyun 	struct ef4_tx_queue *tx_queue;
831*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
832*4882a593Smuzhiyun 	int tx_packets = 0;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	if (unlikely(READ_ONCE(efx->reset_pending)))
835*4882a593Smuzhiyun 		return 0;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
838*4882a593Smuzhiyun 		/* Transmit completion */
839*4882a593Smuzhiyun 		tx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
840*4882a593Smuzhiyun 		tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
841*4882a593Smuzhiyun 		tx_queue = ef4_channel_get_tx_queue(
842*4882a593Smuzhiyun 			channel, tx_ev_q_label % EF4_TXQ_TYPES);
843*4882a593Smuzhiyun 		tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
844*4882a593Smuzhiyun 			      tx_queue->ptr_mask);
845*4882a593Smuzhiyun 		ef4_xmit_done(tx_queue, tx_ev_desc_ptr);
846*4882a593Smuzhiyun 	} else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
847*4882a593Smuzhiyun 		/* Rewrite the FIFO write pointer */
848*4882a593Smuzhiyun 		tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
849*4882a593Smuzhiyun 		tx_queue = ef4_channel_get_tx_queue(
850*4882a593Smuzhiyun 			channel, tx_ev_q_label % EF4_TXQ_TYPES);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 		netif_tx_lock(efx->net_dev);
853*4882a593Smuzhiyun 		ef4_farch_notify_tx_desc(tx_queue);
854*4882a593Smuzhiyun 		netif_tx_unlock(efx->net_dev);
855*4882a593Smuzhiyun 	} else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
856*4882a593Smuzhiyun 		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
857*4882a593Smuzhiyun 	} else {
858*4882a593Smuzhiyun 		netif_err(efx, tx_err, efx->net_dev,
859*4882a593Smuzhiyun 			  "channel %d unexpected TX event "
860*4882a593Smuzhiyun 			  EF4_QWORD_FMT"\n", channel->channel,
861*4882a593Smuzhiyun 			  EF4_QWORD_VAL(*event));
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	return tx_packets;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun /* Detect errors included in the rx_evt_pkt_ok bit. */
ef4_farch_handle_rx_not_ok(struct ef4_rx_queue * rx_queue,const ef4_qword_t * event)868*4882a593Smuzhiyun static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
869*4882a593Smuzhiyun 				      const ef4_qword_t *event)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun 	struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
872*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
873*4882a593Smuzhiyun 	bool __maybe_unused rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
874*4882a593Smuzhiyun 	bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
875*4882a593Smuzhiyun 	bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
876*4882a593Smuzhiyun 	bool rx_ev_pause_frm;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
879*4882a593Smuzhiyun 	rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event,
880*4882a593Smuzhiyun 						 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
881*4882a593Smuzhiyun 	rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event,
882*4882a593Smuzhiyun 						  FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
883*4882a593Smuzhiyun 	rx_ev_tcp_udp_chksum_err = EF4_QWORD_FIELD(*event,
884*4882a593Smuzhiyun 						   FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
885*4882a593Smuzhiyun 	rx_ev_eth_crc_err = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
886*4882a593Smuzhiyun 	rx_ev_frm_trunc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
887*4882a593Smuzhiyun 	rx_ev_drib_nib = ((ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) ?
888*4882a593Smuzhiyun 			  0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
889*4882a593Smuzhiyun 	rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	/* Count errors that are not in MAC stats.  Ignore expected
893*4882a593Smuzhiyun 	 * checksum errors during self-test. */
894*4882a593Smuzhiyun 	if (rx_ev_frm_trunc)
895*4882a593Smuzhiyun 		++channel->n_rx_frm_trunc;
896*4882a593Smuzhiyun 	else if (rx_ev_tobe_disc)
897*4882a593Smuzhiyun 		++channel->n_rx_tobe_disc;
898*4882a593Smuzhiyun 	else if (!efx->loopback_selftest) {
899*4882a593Smuzhiyun 		if (rx_ev_ip_hdr_chksum_err)
900*4882a593Smuzhiyun 			++channel->n_rx_ip_hdr_chksum_err;
901*4882a593Smuzhiyun 		else if (rx_ev_tcp_udp_chksum_err)
902*4882a593Smuzhiyun 			++channel->n_rx_tcp_udp_chksum_err;
903*4882a593Smuzhiyun 	}
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	/* TOBE_DISC is expected on unicast mismatches; don't print out an
906*4882a593Smuzhiyun 	 * error message.  FRM_TRUNC indicates RXDP dropped the packet due
907*4882a593Smuzhiyun 	 * to a FIFO overflow.
908*4882a593Smuzhiyun 	 */
909*4882a593Smuzhiyun #ifdef DEBUG
910*4882a593Smuzhiyun 	{
911*4882a593Smuzhiyun 	/* Every error apart from tobe_disc and pause_frm */
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	bool rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
914*4882a593Smuzhiyun 				rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
915*4882a593Smuzhiyun 				rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	if (rx_ev_other_err && net_ratelimit()) {
918*4882a593Smuzhiyun 		netif_dbg(efx, rx_err, efx->net_dev,
919*4882a593Smuzhiyun 			  " RX queue %d unexpected RX event "
920*4882a593Smuzhiyun 			  EF4_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
921*4882a593Smuzhiyun 			  ef4_rx_queue_index(rx_queue), EF4_QWORD_VAL(*event),
922*4882a593Smuzhiyun 			  rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
923*4882a593Smuzhiyun 			  rx_ev_ip_hdr_chksum_err ?
924*4882a593Smuzhiyun 			  " [IP_HDR_CHKSUM_ERR]" : "",
925*4882a593Smuzhiyun 			  rx_ev_tcp_udp_chksum_err ?
926*4882a593Smuzhiyun 			  " [TCP_UDP_CHKSUM_ERR]" : "",
927*4882a593Smuzhiyun 			  rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
928*4882a593Smuzhiyun 			  rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
929*4882a593Smuzhiyun 			  rx_ev_drib_nib ? " [DRIB_NIB]" : "",
930*4882a593Smuzhiyun 			  rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
931*4882a593Smuzhiyun 			  rx_ev_pause_frm ? " [PAUSE]" : "");
932*4882a593Smuzhiyun 	}
933*4882a593Smuzhiyun 	}
934*4882a593Smuzhiyun #endif
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	/* The frame must be discarded if any of these are true. */
937*4882a593Smuzhiyun 	return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
938*4882a593Smuzhiyun 		rx_ev_tobe_disc | rx_ev_pause_frm) ?
939*4882a593Smuzhiyun 		EF4_RX_PKT_DISCARD : 0;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun /* Handle receive events that are not in-order. Return true if this
943*4882a593Smuzhiyun  * can be handled as a partial packet discard, false if it's more
944*4882a593Smuzhiyun  * serious.
945*4882a593Smuzhiyun  */
946*4882a593Smuzhiyun static bool
ef4_farch_handle_rx_bad_index(struct ef4_rx_queue * rx_queue,unsigned index)947*4882a593Smuzhiyun ef4_farch_handle_rx_bad_index(struct ef4_rx_queue *rx_queue, unsigned index)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun 	struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
950*4882a593Smuzhiyun 	struct ef4_nic *efx = rx_queue->efx;
951*4882a593Smuzhiyun 	unsigned expected, dropped;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	if (rx_queue->scatter_n &&
954*4882a593Smuzhiyun 	    index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
955*4882a593Smuzhiyun 		      rx_queue->ptr_mask)) {
956*4882a593Smuzhiyun 		++channel->n_rx_nodesc_trunc;
957*4882a593Smuzhiyun 		return true;
958*4882a593Smuzhiyun 	}
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	expected = rx_queue->removed_count & rx_queue->ptr_mask;
961*4882a593Smuzhiyun 	dropped = (index - expected) & rx_queue->ptr_mask;
962*4882a593Smuzhiyun 	netif_info(efx, rx_err, efx->net_dev,
963*4882a593Smuzhiyun 		   "dropped %d events (index=%d expected=%d)\n",
964*4882a593Smuzhiyun 		   dropped, index, expected);
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	ef4_schedule_reset(efx, EF4_WORKAROUND_5676(efx) ?
967*4882a593Smuzhiyun 			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
968*4882a593Smuzhiyun 	return false;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun /* Handle a packet received event
972*4882a593Smuzhiyun  *
973*4882a593Smuzhiyun  * The NIC gives a "discard" flag if it's a unicast packet with the
974*4882a593Smuzhiyun  * wrong destination address
975*4882a593Smuzhiyun  * Also "is multicast" and "matches multicast filter" flags can be used to
976*4882a593Smuzhiyun  * discard non-matching multicast packets.
977*4882a593Smuzhiyun  */
978*4882a593Smuzhiyun static void
ef4_farch_handle_rx_event(struct ef4_channel * channel,const ef4_qword_t * event)979*4882a593Smuzhiyun ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun 	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
982*4882a593Smuzhiyun 	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
983*4882a593Smuzhiyun 	unsigned expected_ptr;
984*4882a593Smuzhiyun 	bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
985*4882a593Smuzhiyun 	u16 flags;
986*4882a593Smuzhiyun 	struct ef4_rx_queue *rx_queue;
987*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	if (unlikely(READ_ONCE(efx->reset_pending)))
990*4882a593Smuzhiyun 		return;
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
993*4882a593Smuzhiyun 	rx_ev_sop = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
994*4882a593Smuzhiyun 	WARN_ON(EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
995*4882a593Smuzhiyun 		channel->channel);
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	rx_queue = ef4_channel_get_rx_queue(channel);
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	rx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1000*4882a593Smuzhiyun 	expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
1001*4882a593Smuzhiyun 			rx_queue->ptr_mask);
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	/* Check for partial drops and other errors */
1004*4882a593Smuzhiyun 	if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
1005*4882a593Smuzhiyun 	    unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
1006*4882a593Smuzhiyun 		if (rx_ev_desc_ptr != expected_ptr &&
1007*4882a593Smuzhiyun 		    !ef4_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
1008*4882a593Smuzhiyun 			return;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 		/* Discard all pending fragments */
1011*4882a593Smuzhiyun 		if (rx_queue->scatter_n) {
1012*4882a593Smuzhiyun 			ef4_rx_packet(
1013*4882a593Smuzhiyun 				rx_queue,
1014*4882a593Smuzhiyun 				rx_queue->removed_count & rx_queue->ptr_mask,
1015*4882a593Smuzhiyun 				rx_queue->scatter_n, 0, EF4_RX_PKT_DISCARD);
1016*4882a593Smuzhiyun 			rx_queue->removed_count += rx_queue->scatter_n;
1017*4882a593Smuzhiyun 			rx_queue->scatter_n = 0;
1018*4882a593Smuzhiyun 		}
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 		/* Return if there is no new fragment */
1021*4882a593Smuzhiyun 		if (rx_ev_desc_ptr != expected_ptr)
1022*4882a593Smuzhiyun 			return;
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 		/* Discard new fragment if not SOP */
1025*4882a593Smuzhiyun 		if (!rx_ev_sop) {
1026*4882a593Smuzhiyun 			ef4_rx_packet(
1027*4882a593Smuzhiyun 				rx_queue,
1028*4882a593Smuzhiyun 				rx_queue->removed_count & rx_queue->ptr_mask,
1029*4882a593Smuzhiyun 				1, 0, EF4_RX_PKT_DISCARD);
1030*4882a593Smuzhiyun 			++rx_queue->removed_count;
1031*4882a593Smuzhiyun 			return;
1032*4882a593Smuzhiyun 		}
1033*4882a593Smuzhiyun 	}
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	++rx_queue->scatter_n;
1036*4882a593Smuzhiyun 	if (rx_ev_cont)
1037*4882a593Smuzhiyun 		return;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	rx_ev_byte_cnt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1040*4882a593Smuzhiyun 	rx_ev_pkt_ok = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1041*4882a593Smuzhiyun 	rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	if (likely(rx_ev_pkt_ok)) {
1044*4882a593Smuzhiyun 		/* If packet is marked as OK then we can rely on the
1045*4882a593Smuzhiyun 		 * hardware checksum and classification.
1046*4882a593Smuzhiyun 		 */
1047*4882a593Smuzhiyun 		flags = 0;
1048*4882a593Smuzhiyun 		switch (rx_ev_hdr_type) {
1049*4882a593Smuzhiyun 		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1050*4882a593Smuzhiyun 			flags |= EF4_RX_PKT_TCP;
1051*4882a593Smuzhiyun 			fallthrough;
1052*4882a593Smuzhiyun 		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1053*4882a593Smuzhiyun 			flags |= EF4_RX_PKT_CSUMMED;
1054*4882a593Smuzhiyun 			fallthrough;
1055*4882a593Smuzhiyun 		case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1056*4882a593Smuzhiyun 		case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1057*4882a593Smuzhiyun 			break;
1058*4882a593Smuzhiyun 		}
1059*4882a593Smuzhiyun 	} else {
1060*4882a593Smuzhiyun 		flags = ef4_farch_handle_rx_not_ok(rx_queue, event);
1061*4882a593Smuzhiyun 	}
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	/* Detect multicast packets that didn't match the filter */
1064*4882a593Smuzhiyun 	rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1065*4882a593Smuzhiyun 	if (rx_ev_mcast_pkt) {
1066*4882a593Smuzhiyun 		unsigned int rx_ev_mcast_hash_match =
1067*4882a593Smuzhiyun 			EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 		if (unlikely(!rx_ev_mcast_hash_match)) {
1070*4882a593Smuzhiyun 			++channel->n_rx_mcast_mismatch;
1071*4882a593Smuzhiyun 			flags |= EF4_RX_PKT_DISCARD;
1072*4882a593Smuzhiyun 		}
1073*4882a593Smuzhiyun 	}
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	channel->irq_mod_score += 2;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	/* Handle received packet */
1078*4882a593Smuzhiyun 	ef4_rx_packet(rx_queue,
1079*4882a593Smuzhiyun 		      rx_queue->removed_count & rx_queue->ptr_mask,
1080*4882a593Smuzhiyun 		      rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1081*4882a593Smuzhiyun 	rx_queue->removed_count += rx_queue->scatter_n;
1082*4882a593Smuzhiyun 	rx_queue->scatter_n = 0;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun /* If this flush done event corresponds to a &struct ef4_tx_queue, then
1086*4882a593Smuzhiyun  * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1087*4882a593Smuzhiyun  * of all transmit completions.
1088*4882a593Smuzhiyun  */
1089*4882a593Smuzhiyun static void
ef4_farch_handle_tx_flush_done(struct ef4_nic * efx,ef4_qword_t * event)1090*4882a593Smuzhiyun ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun 	struct ef4_tx_queue *tx_queue;
1093*4882a593Smuzhiyun 	int qid;
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1096*4882a593Smuzhiyun 	if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) {
1097*4882a593Smuzhiyun 		tx_queue = ef4_get_tx_queue(efx, qid / EF4_TXQ_TYPES,
1098*4882a593Smuzhiyun 					    qid % EF4_TXQ_TYPES);
1099*4882a593Smuzhiyun 		if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1100*4882a593Smuzhiyun 			ef4_farch_magic_event(tx_queue->channel,
1101*4882a593Smuzhiyun 					      EF4_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1102*4882a593Smuzhiyun 		}
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun /* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush
1107*4882a593Smuzhiyun  * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1108*4882a593Smuzhiyun  * the RX queue back to the mask of RX queues in need of flushing.
1109*4882a593Smuzhiyun  */
1110*4882a593Smuzhiyun static void
ef4_farch_handle_rx_flush_done(struct ef4_nic * efx,ef4_qword_t * event)1111*4882a593Smuzhiyun ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun 	struct ef4_channel *channel;
1114*4882a593Smuzhiyun 	struct ef4_rx_queue *rx_queue;
1115*4882a593Smuzhiyun 	int qid;
1116*4882a593Smuzhiyun 	bool failed;
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1119*4882a593Smuzhiyun 	failed = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1120*4882a593Smuzhiyun 	if (qid >= efx->n_channels)
1121*4882a593Smuzhiyun 		return;
1122*4882a593Smuzhiyun 	channel = ef4_get_channel(efx, qid);
1123*4882a593Smuzhiyun 	if (!ef4_channel_has_rx_queue(channel))
1124*4882a593Smuzhiyun 		return;
1125*4882a593Smuzhiyun 	rx_queue = ef4_channel_get_rx_queue(channel);
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	if (failed) {
1128*4882a593Smuzhiyun 		netif_info(efx, hw, efx->net_dev,
1129*4882a593Smuzhiyun 			   "RXQ %d flush retry\n", qid);
1130*4882a593Smuzhiyun 		rx_queue->flush_pending = true;
1131*4882a593Smuzhiyun 		atomic_inc(&efx->rxq_flush_pending);
1132*4882a593Smuzhiyun 	} else {
1133*4882a593Smuzhiyun 		ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
1134*4882a593Smuzhiyun 				      EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1135*4882a593Smuzhiyun 	}
1136*4882a593Smuzhiyun 	atomic_dec(&efx->rxq_flush_outstanding);
1137*4882a593Smuzhiyun 	if (ef4_farch_flush_wake(efx))
1138*4882a593Smuzhiyun 		wake_up(&efx->flush_wq);
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun static void
ef4_farch_handle_drain_event(struct ef4_channel * channel)1142*4882a593Smuzhiyun ef4_farch_handle_drain_event(struct ef4_channel *channel)
1143*4882a593Smuzhiyun {
1144*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	WARN_ON(atomic_read(&efx->active_queues) == 0);
1147*4882a593Smuzhiyun 	atomic_dec(&efx->active_queues);
1148*4882a593Smuzhiyun 	if (ef4_farch_flush_wake(efx))
1149*4882a593Smuzhiyun 		wake_up(&efx->flush_wq);
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun 
ef4_farch_handle_generated_event(struct ef4_channel * channel,ef4_qword_t * event)1152*4882a593Smuzhiyun static void ef4_farch_handle_generated_event(struct ef4_channel *channel,
1153*4882a593Smuzhiyun 					     ef4_qword_t *event)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
1156*4882a593Smuzhiyun 	struct ef4_rx_queue *rx_queue =
1157*4882a593Smuzhiyun 		ef4_channel_has_rx_queue(channel) ?
1158*4882a593Smuzhiyun 		ef4_channel_get_rx_queue(channel) : NULL;
1159*4882a593Smuzhiyun 	unsigned magic, code;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	magic = EF4_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1162*4882a593Smuzhiyun 	code = _EF4_CHANNEL_MAGIC_CODE(magic);
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	if (magic == EF4_CHANNEL_MAGIC_TEST(channel)) {
1165*4882a593Smuzhiyun 		channel->event_test_cpu = raw_smp_processor_id();
1166*4882a593Smuzhiyun 	} else if (rx_queue && magic == EF4_CHANNEL_MAGIC_FILL(rx_queue)) {
1167*4882a593Smuzhiyun 		/* The queue must be empty, so we won't receive any rx
1168*4882a593Smuzhiyun 		 * events, so ef4_process_channel() won't refill the
1169*4882a593Smuzhiyun 		 * queue. Refill it here */
1170*4882a593Smuzhiyun 		ef4_fast_push_rx_descriptors(rx_queue, true);
1171*4882a593Smuzhiyun 	} else if (rx_queue && magic == EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1172*4882a593Smuzhiyun 		ef4_farch_handle_drain_event(channel);
1173*4882a593Smuzhiyun 	} else if (code == _EF4_CHANNEL_MAGIC_TX_DRAIN) {
1174*4882a593Smuzhiyun 		ef4_farch_handle_drain_event(channel);
1175*4882a593Smuzhiyun 	} else {
1176*4882a593Smuzhiyun 		netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1177*4882a593Smuzhiyun 			  "generated event "EF4_QWORD_FMT"\n",
1178*4882a593Smuzhiyun 			  channel->channel, EF4_QWORD_VAL(*event));
1179*4882a593Smuzhiyun 	}
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun static void
ef4_farch_handle_driver_event(struct ef4_channel * channel,ef4_qword_t * event)1183*4882a593Smuzhiyun ef4_farch_handle_driver_event(struct ef4_channel *channel, ef4_qword_t *event)
1184*4882a593Smuzhiyun {
1185*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
1186*4882a593Smuzhiyun 	unsigned int ev_sub_code;
1187*4882a593Smuzhiyun 	unsigned int ev_sub_data;
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	ev_sub_code = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1190*4882a593Smuzhiyun 	ev_sub_data = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	switch (ev_sub_code) {
1193*4882a593Smuzhiyun 	case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1194*4882a593Smuzhiyun 		netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1195*4882a593Smuzhiyun 			   channel->channel, ev_sub_data);
1196*4882a593Smuzhiyun 		ef4_farch_handle_tx_flush_done(efx, event);
1197*4882a593Smuzhiyun 		break;
1198*4882a593Smuzhiyun 	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1199*4882a593Smuzhiyun 		netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1200*4882a593Smuzhiyun 			   channel->channel, ev_sub_data);
1201*4882a593Smuzhiyun 		ef4_farch_handle_rx_flush_done(efx, event);
1202*4882a593Smuzhiyun 		break;
1203*4882a593Smuzhiyun 	case FSE_AZ_EVQ_INIT_DONE_EV:
1204*4882a593Smuzhiyun 		netif_dbg(efx, hw, efx->net_dev,
1205*4882a593Smuzhiyun 			  "channel %d EVQ %d initialised\n",
1206*4882a593Smuzhiyun 			  channel->channel, ev_sub_data);
1207*4882a593Smuzhiyun 		break;
1208*4882a593Smuzhiyun 	case FSE_AZ_SRM_UPD_DONE_EV:
1209*4882a593Smuzhiyun 		netif_vdbg(efx, hw, efx->net_dev,
1210*4882a593Smuzhiyun 			   "channel %d SRAM update done\n", channel->channel);
1211*4882a593Smuzhiyun 		break;
1212*4882a593Smuzhiyun 	case FSE_AZ_WAKE_UP_EV:
1213*4882a593Smuzhiyun 		netif_vdbg(efx, hw, efx->net_dev,
1214*4882a593Smuzhiyun 			   "channel %d RXQ %d wakeup event\n",
1215*4882a593Smuzhiyun 			   channel->channel, ev_sub_data);
1216*4882a593Smuzhiyun 		break;
1217*4882a593Smuzhiyun 	case FSE_AZ_TIMER_EV:
1218*4882a593Smuzhiyun 		netif_vdbg(efx, hw, efx->net_dev,
1219*4882a593Smuzhiyun 			   "channel %d RX queue %d timer expired\n",
1220*4882a593Smuzhiyun 			   channel->channel, ev_sub_data);
1221*4882a593Smuzhiyun 		break;
1222*4882a593Smuzhiyun 	case FSE_AA_RX_RECOVER_EV:
1223*4882a593Smuzhiyun 		netif_err(efx, rx_err, efx->net_dev,
1224*4882a593Smuzhiyun 			  "channel %d seen DRIVER RX_RESET event. "
1225*4882a593Smuzhiyun 			"Resetting.\n", channel->channel);
1226*4882a593Smuzhiyun 		atomic_inc(&efx->rx_reset);
1227*4882a593Smuzhiyun 		ef4_schedule_reset(efx,
1228*4882a593Smuzhiyun 				   EF4_WORKAROUND_6555(efx) ?
1229*4882a593Smuzhiyun 				   RESET_TYPE_RX_RECOVERY :
1230*4882a593Smuzhiyun 				   RESET_TYPE_DISABLE);
1231*4882a593Smuzhiyun 		break;
1232*4882a593Smuzhiyun 	case FSE_BZ_RX_DSC_ERROR_EV:
1233*4882a593Smuzhiyun 		netif_err(efx, rx_err, efx->net_dev,
1234*4882a593Smuzhiyun 			  "RX DMA Q %d reports descriptor fetch error."
1235*4882a593Smuzhiyun 			  " RX Q %d is disabled.\n", ev_sub_data,
1236*4882a593Smuzhiyun 			  ev_sub_data);
1237*4882a593Smuzhiyun 		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1238*4882a593Smuzhiyun 		break;
1239*4882a593Smuzhiyun 	case FSE_BZ_TX_DSC_ERROR_EV:
1240*4882a593Smuzhiyun 		netif_err(efx, tx_err, efx->net_dev,
1241*4882a593Smuzhiyun 			  "TX DMA Q %d reports descriptor fetch error."
1242*4882a593Smuzhiyun 			  " TX Q %d is disabled.\n", ev_sub_data,
1243*4882a593Smuzhiyun 			  ev_sub_data);
1244*4882a593Smuzhiyun 		ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1245*4882a593Smuzhiyun 		break;
1246*4882a593Smuzhiyun 	default:
1247*4882a593Smuzhiyun 		netif_vdbg(efx, hw, efx->net_dev,
1248*4882a593Smuzhiyun 			   "channel %d unknown driver event code %d "
1249*4882a593Smuzhiyun 			   "data %04x\n", channel->channel, ev_sub_code,
1250*4882a593Smuzhiyun 			   ev_sub_data);
1251*4882a593Smuzhiyun 		break;
1252*4882a593Smuzhiyun 	}
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun 
ef4_farch_ev_process(struct ef4_channel * channel,int budget)1255*4882a593Smuzhiyun int ef4_farch_ev_process(struct ef4_channel *channel, int budget)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
1258*4882a593Smuzhiyun 	unsigned int read_ptr;
1259*4882a593Smuzhiyun 	ef4_qword_t event, *p_event;
1260*4882a593Smuzhiyun 	int ev_code;
1261*4882a593Smuzhiyun 	int tx_packets = 0;
1262*4882a593Smuzhiyun 	int spent = 0;
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 	if (budget <= 0)
1265*4882a593Smuzhiyun 		return spent;
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	read_ptr = channel->eventq_read_ptr;
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 	for (;;) {
1270*4882a593Smuzhiyun 		p_event = ef4_event(channel, read_ptr);
1271*4882a593Smuzhiyun 		event = *p_event;
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 		if (!ef4_event_present(&event))
1274*4882a593Smuzhiyun 			/* End of events */
1275*4882a593Smuzhiyun 			break;
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 		netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1278*4882a593Smuzhiyun 			   "channel %d event is "EF4_QWORD_FMT"\n",
1279*4882a593Smuzhiyun 			   channel->channel, EF4_QWORD_VAL(event));
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 		/* Clear this event by marking it all ones */
1282*4882a593Smuzhiyun 		EF4_SET_QWORD(*p_event);
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 		++read_ptr;
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 		ev_code = EF4_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 		switch (ev_code) {
1289*4882a593Smuzhiyun 		case FSE_AZ_EV_CODE_RX_EV:
1290*4882a593Smuzhiyun 			ef4_farch_handle_rx_event(channel, &event);
1291*4882a593Smuzhiyun 			if (++spent == budget)
1292*4882a593Smuzhiyun 				goto out;
1293*4882a593Smuzhiyun 			break;
1294*4882a593Smuzhiyun 		case FSE_AZ_EV_CODE_TX_EV:
1295*4882a593Smuzhiyun 			tx_packets += ef4_farch_handle_tx_event(channel,
1296*4882a593Smuzhiyun 								&event);
1297*4882a593Smuzhiyun 			if (tx_packets > efx->txq_entries) {
1298*4882a593Smuzhiyun 				spent = budget;
1299*4882a593Smuzhiyun 				goto out;
1300*4882a593Smuzhiyun 			}
1301*4882a593Smuzhiyun 			break;
1302*4882a593Smuzhiyun 		case FSE_AZ_EV_CODE_DRV_GEN_EV:
1303*4882a593Smuzhiyun 			ef4_farch_handle_generated_event(channel, &event);
1304*4882a593Smuzhiyun 			break;
1305*4882a593Smuzhiyun 		case FSE_AZ_EV_CODE_DRIVER_EV:
1306*4882a593Smuzhiyun 			ef4_farch_handle_driver_event(channel, &event);
1307*4882a593Smuzhiyun 			break;
1308*4882a593Smuzhiyun 		case FSE_AZ_EV_CODE_GLOBAL_EV:
1309*4882a593Smuzhiyun 			if (efx->type->handle_global_event &&
1310*4882a593Smuzhiyun 			    efx->type->handle_global_event(channel, &event))
1311*4882a593Smuzhiyun 				break;
1312*4882a593Smuzhiyun 			fallthrough;
1313*4882a593Smuzhiyun 		default:
1314*4882a593Smuzhiyun 			netif_err(channel->efx, hw, channel->efx->net_dev,
1315*4882a593Smuzhiyun 				  "channel %d unknown event type %d (data "
1316*4882a593Smuzhiyun 				  EF4_QWORD_FMT ")\n", channel->channel,
1317*4882a593Smuzhiyun 				  ev_code, EF4_QWORD_VAL(event));
1318*4882a593Smuzhiyun 		}
1319*4882a593Smuzhiyun 	}
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun out:
1322*4882a593Smuzhiyun 	channel->eventq_read_ptr = read_ptr;
1323*4882a593Smuzhiyun 	return spent;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun /* Allocate buffer table entries for event queue */
ef4_farch_ev_probe(struct ef4_channel * channel)1327*4882a593Smuzhiyun int ef4_farch_ev_probe(struct ef4_channel *channel)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
1330*4882a593Smuzhiyun 	unsigned entries;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	entries = channel->eventq_mask + 1;
1333*4882a593Smuzhiyun 	return ef4_alloc_special_buffer(efx, &channel->eventq,
1334*4882a593Smuzhiyun 					entries * sizeof(ef4_qword_t));
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun 
ef4_farch_ev_init(struct ef4_channel * channel)1337*4882a593Smuzhiyun int ef4_farch_ev_init(struct ef4_channel *channel)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun 	ef4_oword_t reg;
1340*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	netif_dbg(efx, hw, efx->net_dev,
1343*4882a593Smuzhiyun 		  "channel %d event queue in special buffers %d-%d\n",
1344*4882a593Smuzhiyun 		  channel->channel, channel->eventq.index,
1345*4882a593Smuzhiyun 		  channel->eventq.index + channel->eventq.entries - 1);
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	/* Pin event queue buffer */
1348*4882a593Smuzhiyun 	ef4_init_special_buffer(efx, &channel->eventq);
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	/* Fill event queue with all ones (i.e. empty events) */
1351*4882a593Smuzhiyun 	memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	/* Push event queue to card */
1354*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_3(reg,
1355*4882a593Smuzhiyun 			     FRF_AZ_EVQ_EN, 1,
1356*4882a593Smuzhiyun 			     FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1357*4882a593Smuzhiyun 			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1358*4882a593Smuzhiyun 	ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1359*4882a593Smuzhiyun 			 channel->channel);
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	return 0;
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun 
ef4_farch_ev_fini(struct ef4_channel * channel)1364*4882a593Smuzhiyun void ef4_farch_ev_fini(struct ef4_channel *channel)
1365*4882a593Smuzhiyun {
1366*4882a593Smuzhiyun 	ef4_oword_t reg;
1367*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	/* Remove event queue from card */
1370*4882a593Smuzhiyun 	EF4_ZERO_OWORD(reg);
1371*4882a593Smuzhiyun 	ef4_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1372*4882a593Smuzhiyun 			 channel->channel);
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	/* Unpin event queue */
1375*4882a593Smuzhiyun 	ef4_fini_special_buffer(efx, &channel->eventq);
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun /* Free buffers backing event queue */
ef4_farch_ev_remove(struct ef4_channel * channel)1379*4882a593Smuzhiyun void ef4_farch_ev_remove(struct ef4_channel *channel)
1380*4882a593Smuzhiyun {
1381*4882a593Smuzhiyun 	ef4_free_special_buffer(channel->efx, &channel->eventq);
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 
ef4_farch_ev_test_generate(struct ef4_channel * channel)1385*4882a593Smuzhiyun void ef4_farch_ev_test_generate(struct ef4_channel *channel)
1386*4882a593Smuzhiyun {
1387*4882a593Smuzhiyun 	ef4_farch_magic_event(channel, EF4_CHANNEL_MAGIC_TEST(channel));
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun 
ef4_farch_rx_defer_refill(struct ef4_rx_queue * rx_queue)1390*4882a593Smuzhiyun void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue)
1391*4882a593Smuzhiyun {
1392*4882a593Smuzhiyun 	ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
1393*4882a593Smuzhiyun 			      EF4_CHANNEL_MAGIC_FILL(rx_queue));
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun /**************************************************************************
1397*4882a593Smuzhiyun  *
1398*4882a593Smuzhiyun  * Hardware interrupts
1399*4882a593Smuzhiyun  * The hardware interrupt handler does very little work; all the event
1400*4882a593Smuzhiyun  * queue processing is carried out by per-channel tasklets.
1401*4882a593Smuzhiyun  *
1402*4882a593Smuzhiyun  **************************************************************************/
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun /* Enable/disable/generate interrupts */
ef4_farch_interrupts(struct ef4_nic * efx,bool enabled,bool force)1405*4882a593Smuzhiyun static inline void ef4_farch_interrupts(struct ef4_nic *efx,
1406*4882a593Smuzhiyun 				      bool enabled, bool force)
1407*4882a593Smuzhiyun {
1408*4882a593Smuzhiyun 	ef4_oword_t int_en_reg_ker;
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_3(int_en_reg_ker,
1411*4882a593Smuzhiyun 			     FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1412*4882a593Smuzhiyun 			     FRF_AZ_KER_INT_KER, force,
1413*4882a593Smuzhiyun 			     FRF_AZ_DRV_INT_EN_KER, enabled);
1414*4882a593Smuzhiyun 	ef4_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun 
ef4_farch_irq_enable_master(struct ef4_nic * efx)1417*4882a593Smuzhiyun void ef4_farch_irq_enable_master(struct ef4_nic *efx)
1418*4882a593Smuzhiyun {
1419*4882a593Smuzhiyun 	EF4_ZERO_OWORD(*((ef4_oword_t *) efx->irq_status.addr));
1420*4882a593Smuzhiyun 	wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	ef4_farch_interrupts(efx, true, false);
1423*4882a593Smuzhiyun }
1424*4882a593Smuzhiyun 
ef4_farch_irq_disable_master(struct ef4_nic * efx)1425*4882a593Smuzhiyun void ef4_farch_irq_disable_master(struct ef4_nic *efx)
1426*4882a593Smuzhiyun {
1427*4882a593Smuzhiyun 	/* Disable interrupts */
1428*4882a593Smuzhiyun 	ef4_farch_interrupts(efx, false, false);
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun /* Generate a test interrupt
1432*4882a593Smuzhiyun  * Interrupt must already have been enabled, otherwise nasty things
1433*4882a593Smuzhiyun  * may happen.
1434*4882a593Smuzhiyun  */
ef4_farch_irq_test_generate(struct ef4_nic * efx)1435*4882a593Smuzhiyun int ef4_farch_irq_test_generate(struct ef4_nic *efx)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun 	ef4_farch_interrupts(efx, true, true);
1438*4882a593Smuzhiyun 	return 0;
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun /* Process a fatal interrupt
1442*4882a593Smuzhiyun  * Disable bus mastering ASAP and schedule a reset
1443*4882a593Smuzhiyun  */
ef4_farch_fatal_interrupt(struct ef4_nic * efx)1444*4882a593Smuzhiyun irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
1445*4882a593Smuzhiyun {
1446*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1447*4882a593Smuzhiyun 	ef4_oword_t *int_ker = efx->irq_status.addr;
1448*4882a593Smuzhiyun 	ef4_oword_t fatal_intr;
1449*4882a593Smuzhiyun 	int error, mem_perr;
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	ef4_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1452*4882a593Smuzhiyun 	error = EF4_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status "
1455*4882a593Smuzhiyun 		  EF4_OWORD_FMT ": %s\n", EF4_OWORD_VAL(*int_ker),
1456*4882a593Smuzhiyun 		  EF4_OWORD_VAL(fatal_intr),
1457*4882a593Smuzhiyun 		  error ? "disabling bus mastering" : "no recognised error");
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	/* If this is a memory parity error dump which blocks are offending */
1460*4882a593Smuzhiyun 	mem_perr = (EF4_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1461*4882a593Smuzhiyun 		    EF4_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1462*4882a593Smuzhiyun 	if (mem_perr) {
1463*4882a593Smuzhiyun 		ef4_oword_t reg;
1464*4882a593Smuzhiyun 		ef4_reado(efx, &reg, FR_AZ_MEM_STAT);
1465*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev,
1466*4882a593Smuzhiyun 			  "SYSTEM ERROR: memory parity error "EF4_OWORD_FMT"\n",
1467*4882a593Smuzhiyun 			  EF4_OWORD_VAL(reg));
1468*4882a593Smuzhiyun 	}
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	/* Disable both devices */
1471*4882a593Smuzhiyun 	pci_clear_master(efx->pci_dev);
1472*4882a593Smuzhiyun 	if (ef4_nic_is_dual_func(efx))
1473*4882a593Smuzhiyun 		pci_clear_master(nic_data->pci_dev2);
1474*4882a593Smuzhiyun 	ef4_farch_irq_disable_master(efx);
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	/* Count errors and reset or disable the NIC accordingly */
1477*4882a593Smuzhiyun 	if (efx->int_error_count == 0 ||
1478*4882a593Smuzhiyun 	    time_after(jiffies, efx->int_error_expire)) {
1479*4882a593Smuzhiyun 		efx->int_error_count = 0;
1480*4882a593Smuzhiyun 		efx->int_error_expire =
1481*4882a593Smuzhiyun 			jiffies + EF4_INT_ERROR_EXPIRE * HZ;
1482*4882a593Smuzhiyun 	}
1483*4882a593Smuzhiyun 	if (++efx->int_error_count < EF4_MAX_INT_ERRORS) {
1484*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev,
1485*4882a593Smuzhiyun 			  "SYSTEM ERROR - reset scheduled\n");
1486*4882a593Smuzhiyun 		ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1487*4882a593Smuzhiyun 	} else {
1488*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev,
1489*4882a593Smuzhiyun 			  "SYSTEM ERROR - max number of errors seen."
1490*4882a593Smuzhiyun 			  "NIC will be disabled\n");
1491*4882a593Smuzhiyun 		ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
1492*4882a593Smuzhiyun 	}
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	return IRQ_HANDLED;
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun /* Handle a legacy interrupt
1498*4882a593Smuzhiyun  * Acknowledges the interrupt and schedule event queue processing.
1499*4882a593Smuzhiyun  */
ef4_farch_legacy_interrupt(int irq,void * dev_id)1500*4882a593Smuzhiyun irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
1501*4882a593Smuzhiyun {
1502*4882a593Smuzhiyun 	struct ef4_nic *efx = dev_id;
1503*4882a593Smuzhiyun 	bool soft_enabled = READ_ONCE(efx->irq_soft_enabled);
1504*4882a593Smuzhiyun 	ef4_oword_t *int_ker = efx->irq_status.addr;
1505*4882a593Smuzhiyun 	irqreturn_t result = IRQ_NONE;
1506*4882a593Smuzhiyun 	struct ef4_channel *channel;
1507*4882a593Smuzhiyun 	ef4_dword_t reg;
1508*4882a593Smuzhiyun 	u32 queues;
1509*4882a593Smuzhiyun 	int syserr;
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 	/* Read the ISR which also ACKs the interrupts */
1512*4882a593Smuzhiyun 	ef4_readd(efx, &reg, FR_BZ_INT_ISR0);
1513*4882a593Smuzhiyun 	queues = EF4_EXTRACT_DWORD(reg, 0, 31);
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 	/* Legacy interrupts are disabled too late by the EEH kernel
1516*4882a593Smuzhiyun 	 * code. Disable them earlier.
1517*4882a593Smuzhiyun 	 * If an EEH error occurred, the read will have returned all ones.
1518*4882a593Smuzhiyun 	 */
1519*4882a593Smuzhiyun 	if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) &&
1520*4882a593Smuzhiyun 	    !efx->eeh_disabled_legacy_irq) {
1521*4882a593Smuzhiyun 		disable_irq_nosync(efx->legacy_irq);
1522*4882a593Smuzhiyun 		efx->eeh_disabled_legacy_irq = true;
1523*4882a593Smuzhiyun 	}
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 	/* Handle non-event-queue sources */
1526*4882a593Smuzhiyun 	if (queues & (1U << efx->irq_level) && soft_enabled) {
1527*4882a593Smuzhiyun 		syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1528*4882a593Smuzhiyun 		if (unlikely(syserr))
1529*4882a593Smuzhiyun 			return ef4_farch_fatal_interrupt(efx);
1530*4882a593Smuzhiyun 		efx->last_irq_cpu = raw_smp_processor_id();
1531*4882a593Smuzhiyun 	}
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	if (queues != 0) {
1534*4882a593Smuzhiyun 		efx->irq_zero_count = 0;
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 		/* Schedule processing of any interrupting queues */
1537*4882a593Smuzhiyun 		if (likely(soft_enabled)) {
1538*4882a593Smuzhiyun 			ef4_for_each_channel(channel, efx) {
1539*4882a593Smuzhiyun 				if (queues & 1)
1540*4882a593Smuzhiyun 					ef4_schedule_channel_irq(channel);
1541*4882a593Smuzhiyun 				queues >>= 1;
1542*4882a593Smuzhiyun 			}
1543*4882a593Smuzhiyun 		}
1544*4882a593Smuzhiyun 		result = IRQ_HANDLED;
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	} else {
1547*4882a593Smuzhiyun 		ef4_qword_t *event;
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 		/* Legacy ISR read can return zero once (SF bug 15783) */
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 		/* We can't return IRQ_HANDLED more than once on seeing ISR=0
1552*4882a593Smuzhiyun 		 * because this might be a shared interrupt. */
1553*4882a593Smuzhiyun 		if (efx->irq_zero_count++ == 0)
1554*4882a593Smuzhiyun 			result = IRQ_HANDLED;
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 		/* Ensure we schedule or rearm all event queues */
1557*4882a593Smuzhiyun 		if (likely(soft_enabled)) {
1558*4882a593Smuzhiyun 			ef4_for_each_channel(channel, efx) {
1559*4882a593Smuzhiyun 				event = ef4_event(channel,
1560*4882a593Smuzhiyun 						  channel->eventq_read_ptr);
1561*4882a593Smuzhiyun 				if (ef4_event_present(event))
1562*4882a593Smuzhiyun 					ef4_schedule_channel_irq(channel);
1563*4882a593Smuzhiyun 				else
1564*4882a593Smuzhiyun 					ef4_farch_ev_read_ack(channel);
1565*4882a593Smuzhiyun 			}
1566*4882a593Smuzhiyun 		}
1567*4882a593Smuzhiyun 	}
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun 	if (result == IRQ_HANDLED)
1570*4882a593Smuzhiyun 		netif_vdbg(efx, intr, efx->net_dev,
1571*4882a593Smuzhiyun 			   "IRQ %d on CPU %d status " EF4_DWORD_FMT "\n",
1572*4882a593Smuzhiyun 			   irq, raw_smp_processor_id(), EF4_DWORD_VAL(reg));
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	return result;
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun /* Handle an MSI interrupt
1578*4882a593Smuzhiyun  *
1579*4882a593Smuzhiyun  * Handle an MSI hardware interrupt.  This routine schedules event
1580*4882a593Smuzhiyun  * queue processing.  No interrupt acknowledgement cycle is necessary.
1581*4882a593Smuzhiyun  * Also, we never need to check that the interrupt is for us, since
1582*4882a593Smuzhiyun  * MSI interrupts cannot be shared.
1583*4882a593Smuzhiyun  */
ef4_farch_msi_interrupt(int irq,void * dev_id)1584*4882a593Smuzhiyun irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun 	struct ef4_msi_context *context = dev_id;
1587*4882a593Smuzhiyun 	struct ef4_nic *efx = context->efx;
1588*4882a593Smuzhiyun 	ef4_oword_t *int_ker = efx->irq_status.addr;
1589*4882a593Smuzhiyun 	int syserr;
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	netif_vdbg(efx, intr, efx->net_dev,
1592*4882a593Smuzhiyun 		   "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
1593*4882a593Smuzhiyun 		   irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	if (!likely(READ_ONCE(efx->irq_soft_enabled)))
1596*4882a593Smuzhiyun 		return IRQ_HANDLED;
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 	/* Handle non-event-queue sources */
1599*4882a593Smuzhiyun 	if (context->index == efx->irq_level) {
1600*4882a593Smuzhiyun 		syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1601*4882a593Smuzhiyun 		if (unlikely(syserr))
1602*4882a593Smuzhiyun 			return ef4_farch_fatal_interrupt(efx);
1603*4882a593Smuzhiyun 		efx->last_irq_cpu = raw_smp_processor_id();
1604*4882a593Smuzhiyun 	}
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	/* Schedule processing of the channel */
1607*4882a593Smuzhiyun 	ef4_schedule_channel_irq(efx->channel[context->index]);
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 	return IRQ_HANDLED;
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun /* Setup RSS indirection table.
1613*4882a593Smuzhiyun  * This maps from the hash value of the packet to RXQ
1614*4882a593Smuzhiyun  */
ef4_farch_rx_push_indir_table(struct ef4_nic * efx)1615*4882a593Smuzhiyun void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
1616*4882a593Smuzhiyun {
1617*4882a593Smuzhiyun 	size_t i = 0;
1618*4882a593Smuzhiyun 	ef4_dword_t dword;
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	BUG_ON(ef4_nic_rev(efx) < EF4_REV_FALCON_B0);
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun 	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1623*4882a593Smuzhiyun 		     FR_BZ_RX_INDIRECTION_TBL_ROWS);
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 	for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1626*4882a593Smuzhiyun 		EF4_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1627*4882a593Smuzhiyun 				     efx->rx_indir_table[i]);
1628*4882a593Smuzhiyun 		ef4_writed(efx, &dword,
1629*4882a593Smuzhiyun 			   FR_BZ_RX_INDIRECTION_TBL +
1630*4882a593Smuzhiyun 			   FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1631*4882a593Smuzhiyun 	}
1632*4882a593Smuzhiyun }
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun /* Looks at available SRAM resources and works out how many queues we
1635*4882a593Smuzhiyun  * can support, and where things like descriptor caches should live.
1636*4882a593Smuzhiyun  *
1637*4882a593Smuzhiyun  * SRAM is split up as follows:
1638*4882a593Smuzhiyun  * 0                          buftbl entries for channels
1639*4882a593Smuzhiyun  * efx->vf_buftbl_base        buftbl entries for SR-IOV
1640*4882a593Smuzhiyun  * efx->rx_dc_base            RX descriptor caches
1641*4882a593Smuzhiyun  * efx->tx_dc_base            TX descriptor caches
1642*4882a593Smuzhiyun  */
ef4_farch_dimension_resources(struct ef4_nic * efx,unsigned sram_lim_qw)1643*4882a593Smuzhiyun void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
1644*4882a593Smuzhiyun {
1645*4882a593Smuzhiyun 	unsigned vi_count;
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 	/* Account for the buffer table entries backing the datapath channels
1648*4882a593Smuzhiyun 	 * and the descriptor caches for those channels.
1649*4882a593Smuzhiyun 	 */
1650*4882a593Smuzhiyun 	vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun 	efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1653*4882a593Smuzhiyun 	efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun 
ef4_farch_fpga_ver(struct ef4_nic * efx)1656*4882a593Smuzhiyun u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
1657*4882a593Smuzhiyun {
1658*4882a593Smuzhiyun 	ef4_oword_t altera_build;
1659*4882a593Smuzhiyun 	ef4_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1660*4882a593Smuzhiyun 	return EF4_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun 
ef4_farch_init_common(struct ef4_nic * efx)1663*4882a593Smuzhiyun void ef4_farch_init_common(struct ef4_nic *efx)
1664*4882a593Smuzhiyun {
1665*4882a593Smuzhiyun 	ef4_oword_t temp;
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	/* Set positions of descriptor caches in SRAM. */
1668*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1669*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1670*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1671*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	/* Set TX descriptor cache size. */
1674*4882a593Smuzhiyun 	BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1675*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1676*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	/* Set RX descriptor cache size.  Set low watermark to size-8, as
1679*4882a593Smuzhiyun 	 * this allows most efficient prefetching.
1680*4882a593Smuzhiyun 	 */
1681*4882a593Smuzhiyun 	BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1682*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1683*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1684*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1685*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 	/* Program INT_KER address */
1688*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(temp,
1689*4882a593Smuzhiyun 			     FRF_AZ_NORM_INT_VEC_DIS_KER,
1690*4882a593Smuzhiyun 			     EF4_INT_MODE_USE_MSI(efx),
1691*4882a593Smuzhiyun 			     FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1692*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun 	/* Use a valid MSI-X vector */
1695*4882a593Smuzhiyun 	efx->irq_level = 0;
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	/* Enable all the genuinely fatal interrupts.  (They are still
1698*4882a593Smuzhiyun 	 * masked by the overall interrupt mask, controlled by
1699*4882a593Smuzhiyun 	 * falcon_interrupts()).
1700*4882a593Smuzhiyun 	 *
1701*4882a593Smuzhiyun 	 * Note: All other fatal interrupts are enabled
1702*4882a593Smuzhiyun 	 */
1703*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_3(temp,
1704*4882a593Smuzhiyun 			     FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1705*4882a593Smuzhiyun 			     FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1706*4882a593Smuzhiyun 			     FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1707*4882a593Smuzhiyun 	EF4_INVERT_OWORD(temp);
1708*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun 	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1711*4882a593Smuzhiyun 	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1712*4882a593Smuzhiyun 	 */
1713*4882a593Smuzhiyun 	ef4_reado(efx, &temp, FR_AZ_TX_RESERVED);
1714*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1715*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1716*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1717*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1718*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1719*4882a593Smuzhiyun 	/* Enable SW_EV to inherit in char driver - assume harmless here */
1720*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1721*4882a593Smuzhiyun 	/* Prefetch threshold 2 => fetch when descriptor cache half empty */
1722*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1723*4882a593Smuzhiyun 	/* Disable hardware watchdog which can misfire */
1724*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1725*4882a593Smuzhiyun 	/* Squash TX of packets of 16 bytes or less */
1726*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1727*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1728*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1731*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_4(temp,
1732*4882a593Smuzhiyun 				     /* Default values */
1733*4882a593Smuzhiyun 				     FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1734*4882a593Smuzhiyun 				     FRF_BZ_TX_PACE_SB_AF, 0xb,
1735*4882a593Smuzhiyun 				     FRF_BZ_TX_PACE_FB_BASE, 0,
1736*4882a593Smuzhiyun 				     /* Allow large pace values in the
1737*4882a593Smuzhiyun 				      * fast bin. */
1738*4882a593Smuzhiyun 				     FRF_BZ_TX_PACE_BIN_TH,
1739*4882a593Smuzhiyun 				     FFE_BZ_TX_PACE_RESERVED);
1740*4882a593Smuzhiyun 		ef4_writeo(efx, &temp, FR_BZ_TX_PACE);
1741*4882a593Smuzhiyun 	}
1742*4882a593Smuzhiyun }
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun /**************************************************************************
1745*4882a593Smuzhiyun  *
1746*4882a593Smuzhiyun  * Filter tables
1747*4882a593Smuzhiyun  *
1748*4882a593Smuzhiyun  **************************************************************************
1749*4882a593Smuzhiyun  */
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun /* "Fudge factors" - difference between programmed value and actual depth.
1752*4882a593Smuzhiyun  * Due to pipelined implementation we need to program H/W with a value that
1753*4882a593Smuzhiyun  * is larger than the hop limit we want.
1754*4882a593Smuzhiyun  */
1755*4882a593Smuzhiyun #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1756*4882a593Smuzhiyun #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1757*4882a593Smuzhiyun 
1758*4882a593Smuzhiyun /* Hard maximum search limit.  Hardware will time-out beyond 200-something.
1759*4882a593Smuzhiyun  * We also need to avoid infinite loops in ef4_farch_filter_search() when the
1760*4882a593Smuzhiyun  * table is full.
1761*4882a593Smuzhiyun  */
1762*4882a593Smuzhiyun #define EF4_FARCH_FILTER_CTL_SRCH_MAX 200
1763*4882a593Smuzhiyun 
1764*4882a593Smuzhiyun /* Don't try very hard to find space for performance hints, as this is
1765*4882a593Smuzhiyun  * counter-productive. */
1766*4882a593Smuzhiyun #define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun enum ef4_farch_filter_type {
1769*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TCP_FULL = 0,
1770*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TCP_WILD,
1771*4882a593Smuzhiyun 	EF4_FARCH_FILTER_UDP_FULL,
1772*4882a593Smuzhiyun 	EF4_FARCH_FILTER_UDP_WILD,
1773*4882a593Smuzhiyun 	EF4_FARCH_FILTER_MAC_FULL = 4,
1774*4882a593Smuzhiyun 	EF4_FARCH_FILTER_MAC_WILD,
1775*4882a593Smuzhiyun 	EF4_FARCH_FILTER_UC_DEF = 8,
1776*4882a593Smuzhiyun 	EF4_FARCH_FILTER_MC_DEF,
1777*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TYPE_COUNT,		/* number of specific types */
1778*4882a593Smuzhiyun };
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun enum ef4_farch_filter_table_id {
1781*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_RX_IP = 0,
1782*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_RX_MAC,
1783*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_RX_DEF,
1784*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_TX_MAC,
1785*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_COUNT,
1786*4882a593Smuzhiyun };
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun enum ef4_farch_filter_index {
1789*4882a593Smuzhiyun 	EF4_FARCH_FILTER_INDEX_UC_DEF,
1790*4882a593Smuzhiyun 	EF4_FARCH_FILTER_INDEX_MC_DEF,
1791*4882a593Smuzhiyun 	EF4_FARCH_FILTER_SIZE_RX_DEF,
1792*4882a593Smuzhiyun };
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun struct ef4_farch_filter_spec {
1795*4882a593Smuzhiyun 	u8	type:4;
1796*4882a593Smuzhiyun 	u8	priority:4;
1797*4882a593Smuzhiyun 	u8	flags;
1798*4882a593Smuzhiyun 	u16	dmaq_id;
1799*4882a593Smuzhiyun 	u32	data[3];
1800*4882a593Smuzhiyun };
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun struct ef4_farch_filter_table {
1803*4882a593Smuzhiyun 	enum ef4_farch_filter_table_id id;
1804*4882a593Smuzhiyun 	u32		offset;		/* address of table relative to BAR */
1805*4882a593Smuzhiyun 	unsigned	size;		/* number of entries */
1806*4882a593Smuzhiyun 	unsigned	step;		/* step between entries */
1807*4882a593Smuzhiyun 	unsigned	used;		/* number currently used */
1808*4882a593Smuzhiyun 	unsigned long	*used_bitmap;
1809*4882a593Smuzhiyun 	struct ef4_farch_filter_spec *spec;
1810*4882a593Smuzhiyun 	unsigned	search_limit[EF4_FARCH_FILTER_TYPE_COUNT];
1811*4882a593Smuzhiyun };
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun struct ef4_farch_filter_state {
1814*4882a593Smuzhiyun 	struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT];
1815*4882a593Smuzhiyun };
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun static void
1818*4882a593Smuzhiyun ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
1819*4882a593Smuzhiyun 				   struct ef4_farch_filter_table *table,
1820*4882a593Smuzhiyun 				   unsigned int filter_idx);
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1823*4882a593Smuzhiyun  * key derived from the n-tuple.  The initial LFSR state is 0xffff. */
ef4_farch_filter_hash(u32 key)1824*4882a593Smuzhiyun static u16 ef4_farch_filter_hash(u32 key)
1825*4882a593Smuzhiyun {
1826*4882a593Smuzhiyun 	u16 tmp;
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	/* First 16 rounds */
1829*4882a593Smuzhiyun 	tmp = 0x1fff ^ key >> 16;
1830*4882a593Smuzhiyun 	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1831*4882a593Smuzhiyun 	tmp = tmp ^ tmp >> 9;
1832*4882a593Smuzhiyun 	/* Last 16 rounds */
1833*4882a593Smuzhiyun 	tmp = tmp ^ tmp << 13 ^ key;
1834*4882a593Smuzhiyun 	tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1835*4882a593Smuzhiyun 	return tmp ^ tmp >> 9;
1836*4882a593Smuzhiyun }
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun /* To allow for hash collisions, filter search continues at these
1839*4882a593Smuzhiyun  * increments from the first possible entry selected by the hash. */
ef4_farch_filter_increment(u32 key)1840*4882a593Smuzhiyun static u16 ef4_farch_filter_increment(u32 key)
1841*4882a593Smuzhiyun {
1842*4882a593Smuzhiyun 	return key * 2 - 1;
1843*4882a593Smuzhiyun }
1844*4882a593Smuzhiyun 
1845*4882a593Smuzhiyun static enum ef4_farch_filter_table_id
ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec * spec)1846*4882a593Smuzhiyun ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec *spec)
1847*4882a593Smuzhiyun {
1848*4882a593Smuzhiyun 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1849*4882a593Smuzhiyun 		     (EF4_FARCH_FILTER_TCP_FULL >> 2));
1850*4882a593Smuzhiyun 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1851*4882a593Smuzhiyun 		     (EF4_FARCH_FILTER_TCP_WILD >> 2));
1852*4882a593Smuzhiyun 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1853*4882a593Smuzhiyun 		     (EF4_FARCH_FILTER_UDP_FULL >> 2));
1854*4882a593Smuzhiyun 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1855*4882a593Smuzhiyun 		     (EF4_FARCH_FILTER_UDP_WILD >> 2));
1856*4882a593Smuzhiyun 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
1857*4882a593Smuzhiyun 		     (EF4_FARCH_FILTER_MAC_FULL >> 2));
1858*4882a593Smuzhiyun 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
1859*4882a593Smuzhiyun 		     (EF4_FARCH_FILTER_MAC_WILD >> 2));
1860*4882a593Smuzhiyun 	BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_TX_MAC !=
1861*4882a593Smuzhiyun 		     EF4_FARCH_FILTER_TABLE_RX_MAC + 2);
1862*4882a593Smuzhiyun 	return (spec->type >> 2) + ((spec->flags & EF4_FILTER_FLAG_TX) ? 2 : 0);
1863*4882a593Smuzhiyun }
1864*4882a593Smuzhiyun 
ef4_farch_filter_push_rx_config(struct ef4_nic * efx)1865*4882a593Smuzhiyun static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx)
1866*4882a593Smuzhiyun {
1867*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
1868*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table;
1869*4882a593Smuzhiyun 	ef4_oword_t filter_ctl;
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun 	ef4_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
1874*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1875*4882a593Smuzhiyun 			    table->search_limit[EF4_FARCH_FILTER_TCP_FULL] +
1876*4882a593Smuzhiyun 			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1877*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1878*4882a593Smuzhiyun 			    table->search_limit[EF4_FARCH_FILTER_TCP_WILD] +
1879*4882a593Smuzhiyun 			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1880*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1881*4882a593Smuzhiyun 			    table->search_limit[EF4_FARCH_FILTER_UDP_FULL] +
1882*4882a593Smuzhiyun 			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1883*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1884*4882a593Smuzhiyun 			    table->search_limit[EF4_FARCH_FILTER_UDP_WILD] +
1885*4882a593Smuzhiyun 			    EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC];
1888*4882a593Smuzhiyun 	if (table->size) {
1889*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(
1890*4882a593Smuzhiyun 			filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1891*4882a593Smuzhiyun 			table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
1892*4882a593Smuzhiyun 			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1893*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(
1894*4882a593Smuzhiyun 			filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1895*4882a593Smuzhiyun 			table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
1896*4882a593Smuzhiyun 			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1897*4882a593Smuzhiyun 	}
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
1900*4882a593Smuzhiyun 	if (table->size) {
1901*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(
1902*4882a593Smuzhiyun 			filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1903*4882a593Smuzhiyun 			table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1904*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(
1905*4882a593Smuzhiyun 			filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1906*4882a593Smuzhiyun 			!!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
1907*4882a593Smuzhiyun 			   EF4_FILTER_FLAG_RX_RSS));
1908*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(
1909*4882a593Smuzhiyun 			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1910*4882a593Smuzhiyun 			table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1911*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(
1912*4882a593Smuzhiyun 			filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1913*4882a593Smuzhiyun 			!!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
1914*4882a593Smuzhiyun 			   EF4_FILTER_FLAG_RX_RSS));
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 		/* There is a single bit to enable RX scatter for all
1917*4882a593Smuzhiyun 		 * unmatched packets.  Only set it if scatter is
1918*4882a593Smuzhiyun 		 * enabled in both filter specs.
1919*4882a593Smuzhiyun 		 */
1920*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(
1921*4882a593Smuzhiyun 			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1922*4882a593Smuzhiyun 			!!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
1923*4882a593Smuzhiyun 			   table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
1924*4882a593Smuzhiyun 			   EF4_FILTER_FLAG_RX_SCATTER));
1925*4882a593Smuzhiyun 	} else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1926*4882a593Smuzhiyun 		/* We don't expose 'default' filters because unmatched
1927*4882a593Smuzhiyun 		 * packets always go to the queue number found in the
1928*4882a593Smuzhiyun 		 * RSS table.  But we still need to set the RX scatter
1929*4882a593Smuzhiyun 		 * bit here.
1930*4882a593Smuzhiyun 		 */
1931*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(
1932*4882a593Smuzhiyun 			filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1933*4882a593Smuzhiyun 			efx->rx_scatter);
1934*4882a593Smuzhiyun 	}
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	ef4_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1937*4882a593Smuzhiyun }
1938*4882a593Smuzhiyun 
ef4_farch_filter_push_tx_limits(struct ef4_nic * efx)1939*4882a593Smuzhiyun static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx)
1940*4882a593Smuzhiyun {
1941*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
1942*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table;
1943*4882a593Smuzhiyun 	ef4_oword_t tx_cfg;
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun 	ef4_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun 	table = &state->table[EF4_FARCH_FILTER_TABLE_TX_MAC];
1948*4882a593Smuzhiyun 	if (table->size) {
1949*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(
1950*4882a593Smuzhiyun 			tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
1951*4882a593Smuzhiyun 			table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
1952*4882a593Smuzhiyun 			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1953*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(
1954*4882a593Smuzhiyun 			tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
1955*4882a593Smuzhiyun 			table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
1956*4882a593Smuzhiyun 			EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1957*4882a593Smuzhiyun 	}
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun 	ef4_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
1960*4882a593Smuzhiyun }
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun static int
ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec * spec,const struct ef4_filter_spec * gen_spec)1963*4882a593Smuzhiyun ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec,
1964*4882a593Smuzhiyun 			       const struct ef4_filter_spec *gen_spec)
1965*4882a593Smuzhiyun {
1966*4882a593Smuzhiyun 	bool is_full = false;
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	if ((gen_spec->flags & EF4_FILTER_FLAG_RX_RSS) &&
1969*4882a593Smuzhiyun 	    gen_spec->rss_context != EF4_FILTER_RSS_CONTEXT_DEFAULT)
1970*4882a593Smuzhiyun 		return -EINVAL;
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun 	spec->priority = gen_spec->priority;
1973*4882a593Smuzhiyun 	spec->flags = gen_spec->flags;
1974*4882a593Smuzhiyun 	spec->dmaq_id = gen_spec->dmaq_id;
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 	switch (gen_spec->match_flags) {
1977*4882a593Smuzhiyun 	case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
1978*4882a593Smuzhiyun 	      EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
1979*4882a593Smuzhiyun 	      EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT):
1980*4882a593Smuzhiyun 		is_full = true;
1981*4882a593Smuzhiyun 		fallthrough;
1982*4882a593Smuzhiyun 	case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
1983*4882a593Smuzhiyun 	      EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): {
1984*4882a593Smuzhiyun 		__be32 rhost, host1, host2;
1985*4882a593Smuzhiyun 		__be16 rport, port1, port2;
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 		EF4_BUG_ON_PARANOID(!(gen_spec->flags & EF4_FILTER_FLAG_RX));
1988*4882a593Smuzhiyun 
1989*4882a593Smuzhiyun 		if (gen_spec->ether_type != htons(ETH_P_IP))
1990*4882a593Smuzhiyun 			return -EPROTONOSUPPORT;
1991*4882a593Smuzhiyun 		if (gen_spec->loc_port == 0 ||
1992*4882a593Smuzhiyun 		    (is_full && gen_spec->rem_port == 0))
1993*4882a593Smuzhiyun 			return -EADDRNOTAVAIL;
1994*4882a593Smuzhiyun 		switch (gen_spec->ip_proto) {
1995*4882a593Smuzhiyun 		case IPPROTO_TCP:
1996*4882a593Smuzhiyun 			spec->type = (is_full ? EF4_FARCH_FILTER_TCP_FULL :
1997*4882a593Smuzhiyun 				      EF4_FARCH_FILTER_TCP_WILD);
1998*4882a593Smuzhiyun 			break;
1999*4882a593Smuzhiyun 		case IPPROTO_UDP:
2000*4882a593Smuzhiyun 			spec->type = (is_full ? EF4_FARCH_FILTER_UDP_FULL :
2001*4882a593Smuzhiyun 				      EF4_FARCH_FILTER_UDP_WILD);
2002*4882a593Smuzhiyun 			break;
2003*4882a593Smuzhiyun 		default:
2004*4882a593Smuzhiyun 			return -EPROTONOSUPPORT;
2005*4882a593Smuzhiyun 		}
2006*4882a593Smuzhiyun 
2007*4882a593Smuzhiyun 		/* Filter is constructed in terms of source and destination,
2008*4882a593Smuzhiyun 		 * with the odd wrinkle that the ports are swapped in a UDP
2009*4882a593Smuzhiyun 		 * wildcard filter.  We need to convert from local and remote
2010*4882a593Smuzhiyun 		 * (= zero for wildcard) addresses.
2011*4882a593Smuzhiyun 		 */
2012*4882a593Smuzhiyun 		rhost = is_full ? gen_spec->rem_host[0] : 0;
2013*4882a593Smuzhiyun 		rport = is_full ? gen_spec->rem_port : 0;
2014*4882a593Smuzhiyun 		host1 = rhost;
2015*4882a593Smuzhiyun 		host2 = gen_spec->loc_host[0];
2016*4882a593Smuzhiyun 		if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2017*4882a593Smuzhiyun 			port1 = gen_spec->loc_port;
2018*4882a593Smuzhiyun 			port2 = rport;
2019*4882a593Smuzhiyun 		} else {
2020*4882a593Smuzhiyun 			port1 = rport;
2021*4882a593Smuzhiyun 			port2 = gen_spec->loc_port;
2022*4882a593Smuzhiyun 		}
2023*4882a593Smuzhiyun 		spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2024*4882a593Smuzhiyun 		spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2025*4882a593Smuzhiyun 		spec->data[2] = ntohl(host2);
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 		break;
2028*4882a593Smuzhiyun 	}
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun 	case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID:
2031*4882a593Smuzhiyun 		is_full = true;
2032*4882a593Smuzhiyun 		fallthrough;
2033*4882a593Smuzhiyun 	case EF4_FILTER_MATCH_LOC_MAC:
2034*4882a593Smuzhiyun 		spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL :
2035*4882a593Smuzhiyun 			      EF4_FARCH_FILTER_MAC_WILD);
2036*4882a593Smuzhiyun 		spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2037*4882a593Smuzhiyun 		spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2038*4882a593Smuzhiyun 				 gen_spec->loc_mac[3] << 16 |
2039*4882a593Smuzhiyun 				 gen_spec->loc_mac[4] << 8 |
2040*4882a593Smuzhiyun 				 gen_spec->loc_mac[5]);
2041*4882a593Smuzhiyun 		spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2042*4882a593Smuzhiyun 				 gen_spec->loc_mac[1]);
2043*4882a593Smuzhiyun 		break;
2044*4882a593Smuzhiyun 
2045*4882a593Smuzhiyun 	case EF4_FILTER_MATCH_LOC_MAC_IG:
2046*4882a593Smuzhiyun 		spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2047*4882a593Smuzhiyun 			      EF4_FARCH_FILTER_MC_DEF :
2048*4882a593Smuzhiyun 			      EF4_FARCH_FILTER_UC_DEF);
2049*4882a593Smuzhiyun 		memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2050*4882a593Smuzhiyun 		break;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	default:
2053*4882a593Smuzhiyun 		return -EPROTONOSUPPORT;
2054*4882a593Smuzhiyun 	}
2055*4882a593Smuzhiyun 
2056*4882a593Smuzhiyun 	return 0;
2057*4882a593Smuzhiyun }
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun static void
ef4_farch_filter_to_gen_spec(struct ef4_filter_spec * gen_spec,const struct ef4_farch_filter_spec * spec)2060*4882a593Smuzhiyun ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec,
2061*4882a593Smuzhiyun 			     const struct ef4_farch_filter_spec *spec)
2062*4882a593Smuzhiyun {
2063*4882a593Smuzhiyun 	bool is_full = false;
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 	/* *gen_spec should be completely initialised, to be consistent
2066*4882a593Smuzhiyun 	 * with ef4_filter_init_{rx,tx}() and in case we want to copy
2067*4882a593Smuzhiyun 	 * it back to userland.
2068*4882a593Smuzhiyun 	 */
2069*4882a593Smuzhiyun 	memset(gen_spec, 0, sizeof(*gen_spec));
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun 	gen_spec->priority = spec->priority;
2072*4882a593Smuzhiyun 	gen_spec->flags = spec->flags;
2073*4882a593Smuzhiyun 	gen_spec->dmaq_id = spec->dmaq_id;
2074*4882a593Smuzhiyun 
2075*4882a593Smuzhiyun 	switch (spec->type) {
2076*4882a593Smuzhiyun 	case EF4_FARCH_FILTER_TCP_FULL:
2077*4882a593Smuzhiyun 	case EF4_FARCH_FILTER_UDP_FULL:
2078*4882a593Smuzhiyun 		is_full = true;
2079*4882a593Smuzhiyun 		fallthrough;
2080*4882a593Smuzhiyun 	case EF4_FARCH_FILTER_TCP_WILD:
2081*4882a593Smuzhiyun 	case EF4_FARCH_FILTER_UDP_WILD: {
2082*4882a593Smuzhiyun 		__be32 host1, host2;
2083*4882a593Smuzhiyun 		__be16 port1, port2;
2084*4882a593Smuzhiyun 
2085*4882a593Smuzhiyun 		gen_spec->match_flags =
2086*4882a593Smuzhiyun 			EF4_FILTER_MATCH_ETHER_TYPE |
2087*4882a593Smuzhiyun 			EF4_FILTER_MATCH_IP_PROTO |
2088*4882a593Smuzhiyun 			EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT;
2089*4882a593Smuzhiyun 		if (is_full)
2090*4882a593Smuzhiyun 			gen_spec->match_flags |= (EF4_FILTER_MATCH_REM_HOST |
2091*4882a593Smuzhiyun 						  EF4_FILTER_MATCH_REM_PORT);
2092*4882a593Smuzhiyun 		gen_spec->ether_type = htons(ETH_P_IP);
2093*4882a593Smuzhiyun 		gen_spec->ip_proto =
2094*4882a593Smuzhiyun 			(spec->type == EF4_FARCH_FILTER_TCP_FULL ||
2095*4882a593Smuzhiyun 			 spec->type == EF4_FARCH_FILTER_TCP_WILD) ?
2096*4882a593Smuzhiyun 			IPPROTO_TCP : IPPROTO_UDP;
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 		host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2099*4882a593Smuzhiyun 		port1 = htons(spec->data[0]);
2100*4882a593Smuzhiyun 		host2 = htonl(spec->data[2]);
2101*4882a593Smuzhiyun 		port2 = htons(spec->data[1] >> 16);
2102*4882a593Smuzhiyun 		if (spec->flags & EF4_FILTER_FLAG_TX) {
2103*4882a593Smuzhiyun 			gen_spec->loc_host[0] = host1;
2104*4882a593Smuzhiyun 			gen_spec->rem_host[0] = host2;
2105*4882a593Smuzhiyun 		} else {
2106*4882a593Smuzhiyun 			gen_spec->loc_host[0] = host2;
2107*4882a593Smuzhiyun 			gen_spec->rem_host[0] = host1;
2108*4882a593Smuzhiyun 		}
2109*4882a593Smuzhiyun 		if (!!(gen_spec->flags & EF4_FILTER_FLAG_TX) ^
2110*4882a593Smuzhiyun 		    (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2111*4882a593Smuzhiyun 			gen_spec->loc_port = port1;
2112*4882a593Smuzhiyun 			gen_spec->rem_port = port2;
2113*4882a593Smuzhiyun 		} else {
2114*4882a593Smuzhiyun 			gen_spec->loc_port = port2;
2115*4882a593Smuzhiyun 			gen_spec->rem_port = port1;
2116*4882a593Smuzhiyun 		}
2117*4882a593Smuzhiyun 
2118*4882a593Smuzhiyun 		break;
2119*4882a593Smuzhiyun 	}
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 	case EF4_FARCH_FILTER_MAC_FULL:
2122*4882a593Smuzhiyun 		is_full = true;
2123*4882a593Smuzhiyun 		fallthrough;
2124*4882a593Smuzhiyun 	case EF4_FARCH_FILTER_MAC_WILD:
2125*4882a593Smuzhiyun 		gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC;
2126*4882a593Smuzhiyun 		if (is_full)
2127*4882a593Smuzhiyun 			gen_spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID;
2128*4882a593Smuzhiyun 		gen_spec->loc_mac[0] = spec->data[2] >> 8;
2129*4882a593Smuzhiyun 		gen_spec->loc_mac[1] = spec->data[2];
2130*4882a593Smuzhiyun 		gen_spec->loc_mac[2] = spec->data[1] >> 24;
2131*4882a593Smuzhiyun 		gen_spec->loc_mac[3] = spec->data[1] >> 16;
2132*4882a593Smuzhiyun 		gen_spec->loc_mac[4] = spec->data[1] >> 8;
2133*4882a593Smuzhiyun 		gen_spec->loc_mac[5] = spec->data[1];
2134*4882a593Smuzhiyun 		gen_spec->outer_vid = htons(spec->data[0]);
2135*4882a593Smuzhiyun 		break;
2136*4882a593Smuzhiyun 
2137*4882a593Smuzhiyun 	case EF4_FARCH_FILTER_UC_DEF:
2138*4882a593Smuzhiyun 	case EF4_FARCH_FILTER_MC_DEF:
2139*4882a593Smuzhiyun 		gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC_IG;
2140*4882a593Smuzhiyun 		gen_spec->loc_mac[0] = spec->type == EF4_FARCH_FILTER_MC_DEF;
2141*4882a593Smuzhiyun 		break;
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun 	default:
2144*4882a593Smuzhiyun 		WARN_ON(1);
2145*4882a593Smuzhiyun 		break;
2146*4882a593Smuzhiyun 	}
2147*4882a593Smuzhiyun }
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun static void
ef4_farch_filter_init_rx_auto(struct ef4_nic * efx,struct ef4_farch_filter_spec * spec)2150*4882a593Smuzhiyun ef4_farch_filter_init_rx_auto(struct ef4_nic *efx,
2151*4882a593Smuzhiyun 			      struct ef4_farch_filter_spec *spec)
2152*4882a593Smuzhiyun {
2153*4882a593Smuzhiyun 	/* If there's only one channel then disable RSS for non VF
2154*4882a593Smuzhiyun 	 * traffic, thereby allowing VFs to use RSS when the PF can't.
2155*4882a593Smuzhiyun 	 */
2156*4882a593Smuzhiyun 	spec->priority = EF4_FILTER_PRI_AUTO;
2157*4882a593Smuzhiyun 	spec->flags = (EF4_FILTER_FLAG_RX |
2158*4882a593Smuzhiyun 		       (ef4_rss_enabled(efx) ? EF4_FILTER_FLAG_RX_RSS : 0) |
2159*4882a593Smuzhiyun 		       (efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0));
2160*4882a593Smuzhiyun 	spec->dmaq_id = 0;
2161*4882a593Smuzhiyun }
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun /* Build a filter entry and return its n-tuple key. */
ef4_farch_filter_build(ef4_oword_t * filter,struct ef4_farch_filter_spec * spec)2164*4882a593Smuzhiyun static u32 ef4_farch_filter_build(ef4_oword_t *filter,
2165*4882a593Smuzhiyun 				  struct ef4_farch_filter_spec *spec)
2166*4882a593Smuzhiyun {
2167*4882a593Smuzhiyun 	u32 data3;
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun 	switch (ef4_farch_filter_spec_table_id(spec)) {
2170*4882a593Smuzhiyun 	case EF4_FARCH_FILTER_TABLE_RX_IP: {
2171*4882a593Smuzhiyun 		bool is_udp = (spec->type == EF4_FARCH_FILTER_UDP_FULL ||
2172*4882a593Smuzhiyun 			       spec->type == EF4_FARCH_FILTER_UDP_WILD);
2173*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_7(
2174*4882a593Smuzhiyun 			*filter,
2175*4882a593Smuzhiyun 			FRF_BZ_RSS_EN,
2176*4882a593Smuzhiyun 			!!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
2177*4882a593Smuzhiyun 			FRF_BZ_SCATTER_EN,
2178*4882a593Smuzhiyun 			!!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
2179*4882a593Smuzhiyun 			FRF_BZ_TCP_UDP, is_udp,
2180*4882a593Smuzhiyun 			FRF_BZ_RXQ_ID, spec->dmaq_id,
2181*4882a593Smuzhiyun 			EF4_DWORD_2, spec->data[2],
2182*4882a593Smuzhiyun 			EF4_DWORD_1, spec->data[1],
2183*4882a593Smuzhiyun 			EF4_DWORD_0, spec->data[0]);
2184*4882a593Smuzhiyun 		data3 = is_udp;
2185*4882a593Smuzhiyun 		break;
2186*4882a593Smuzhiyun 	}
2187*4882a593Smuzhiyun 
2188*4882a593Smuzhiyun 	case EF4_FARCH_FILTER_TABLE_RX_MAC: {
2189*4882a593Smuzhiyun 		bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
2190*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_7(
2191*4882a593Smuzhiyun 			*filter,
2192*4882a593Smuzhiyun 			FRF_CZ_RMFT_RSS_EN,
2193*4882a593Smuzhiyun 			!!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
2194*4882a593Smuzhiyun 			FRF_CZ_RMFT_SCATTER_EN,
2195*4882a593Smuzhiyun 			!!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
2196*4882a593Smuzhiyun 			FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2197*4882a593Smuzhiyun 			FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2198*4882a593Smuzhiyun 			FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2199*4882a593Smuzhiyun 			FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2200*4882a593Smuzhiyun 			FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2201*4882a593Smuzhiyun 		data3 = is_wild;
2202*4882a593Smuzhiyun 		break;
2203*4882a593Smuzhiyun 	}
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 	case EF4_FARCH_FILTER_TABLE_TX_MAC: {
2206*4882a593Smuzhiyun 		bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
2207*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_5(*filter,
2208*4882a593Smuzhiyun 				     FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2209*4882a593Smuzhiyun 				     FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2210*4882a593Smuzhiyun 				     FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2211*4882a593Smuzhiyun 				     FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2212*4882a593Smuzhiyun 				     FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2213*4882a593Smuzhiyun 		data3 = is_wild | spec->dmaq_id << 1;
2214*4882a593Smuzhiyun 		break;
2215*4882a593Smuzhiyun 	}
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 	default:
2218*4882a593Smuzhiyun 		BUG();
2219*4882a593Smuzhiyun 	}
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun 	return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2222*4882a593Smuzhiyun }
2223*4882a593Smuzhiyun 
ef4_farch_filter_equal(const struct ef4_farch_filter_spec * left,const struct ef4_farch_filter_spec * right)2224*4882a593Smuzhiyun static bool ef4_farch_filter_equal(const struct ef4_farch_filter_spec *left,
2225*4882a593Smuzhiyun 				   const struct ef4_farch_filter_spec *right)
2226*4882a593Smuzhiyun {
2227*4882a593Smuzhiyun 	if (left->type != right->type ||
2228*4882a593Smuzhiyun 	    memcmp(left->data, right->data, sizeof(left->data)))
2229*4882a593Smuzhiyun 		return false;
2230*4882a593Smuzhiyun 
2231*4882a593Smuzhiyun 	if (left->flags & EF4_FILTER_FLAG_TX &&
2232*4882a593Smuzhiyun 	    left->dmaq_id != right->dmaq_id)
2233*4882a593Smuzhiyun 		return false;
2234*4882a593Smuzhiyun 
2235*4882a593Smuzhiyun 	return true;
2236*4882a593Smuzhiyun }
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun /*
2239*4882a593Smuzhiyun  * Construct/deconstruct external filter IDs.  At least the RX filter
2240*4882a593Smuzhiyun  * IDs must be ordered by matching priority, for RX NFC semantics.
2241*4882a593Smuzhiyun  *
2242*4882a593Smuzhiyun  * Deconstruction needs to be robust against invalid IDs so that
2243*4882a593Smuzhiyun  * ef4_filter_remove_id_safe() and ef4_filter_get_filter_safe() can
2244*4882a593Smuzhiyun  * accept user-provided IDs.
2245*4882a593Smuzhiyun  */
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun #define EF4_FARCH_FILTER_MATCH_PRI_COUNT	5
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun static const u8 ef4_farch_filter_type_match_pri[EF4_FARCH_FILTER_TYPE_COUNT] = {
2250*4882a593Smuzhiyun 	[EF4_FARCH_FILTER_TCP_FULL]	= 0,
2251*4882a593Smuzhiyun 	[EF4_FARCH_FILTER_UDP_FULL]	= 0,
2252*4882a593Smuzhiyun 	[EF4_FARCH_FILTER_TCP_WILD]	= 1,
2253*4882a593Smuzhiyun 	[EF4_FARCH_FILTER_UDP_WILD]	= 1,
2254*4882a593Smuzhiyun 	[EF4_FARCH_FILTER_MAC_FULL]	= 2,
2255*4882a593Smuzhiyun 	[EF4_FARCH_FILTER_MAC_WILD]	= 3,
2256*4882a593Smuzhiyun 	[EF4_FARCH_FILTER_UC_DEF]	= 4,
2257*4882a593Smuzhiyun 	[EF4_FARCH_FILTER_MC_DEF]	= 4,
2258*4882a593Smuzhiyun };
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun static const enum ef4_farch_filter_table_id ef4_farch_filter_range_table[] = {
2261*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_RX_IP,	/* RX match pri 0 */
2262*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_RX_IP,
2263*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_RX_MAC,
2264*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_RX_MAC,
2265*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_RX_DEF,	/* RX match pri 4 */
2266*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_TX_MAC,	/* TX match pri 0 */
2267*4882a593Smuzhiyun 	EF4_FARCH_FILTER_TABLE_TX_MAC,	/* TX match pri 1 */
2268*4882a593Smuzhiyun };
2269*4882a593Smuzhiyun 
2270*4882a593Smuzhiyun #define EF4_FARCH_FILTER_INDEX_WIDTH 13
2271*4882a593Smuzhiyun #define EF4_FARCH_FILTER_INDEX_MASK ((1 << EF4_FARCH_FILTER_INDEX_WIDTH) - 1)
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun static inline u32
ef4_farch_filter_make_id(const struct ef4_farch_filter_spec * spec,unsigned int index)2274*4882a593Smuzhiyun ef4_farch_filter_make_id(const struct ef4_farch_filter_spec *spec,
2275*4882a593Smuzhiyun 			 unsigned int index)
2276*4882a593Smuzhiyun {
2277*4882a593Smuzhiyun 	unsigned int range;
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	range = ef4_farch_filter_type_match_pri[spec->type];
2280*4882a593Smuzhiyun 	if (!(spec->flags & EF4_FILTER_FLAG_RX))
2281*4882a593Smuzhiyun 		range += EF4_FARCH_FILTER_MATCH_PRI_COUNT;
2282*4882a593Smuzhiyun 
2283*4882a593Smuzhiyun 	return range << EF4_FARCH_FILTER_INDEX_WIDTH | index;
2284*4882a593Smuzhiyun }
2285*4882a593Smuzhiyun 
2286*4882a593Smuzhiyun static inline enum ef4_farch_filter_table_id
ef4_farch_filter_id_table_id(u32 id)2287*4882a593Smuzhiyun ef4_farch_filter_id_table_id(u32 id)
2288*4882a593Smuzhiyun {
2289*4882a593Smuzhiyun 	unsigned int range = id >> EF4_FARCH_FILTER_INDEX_WIDTH;
2290*4882a593Smuzhiyun 
2291*4882a593Smuzhiyun 	if (range < ARRAY_SIZE(ef4_farch_filter_range_table))
2292*4882a593Smuzhiyun 		return ef4_farch_filter_range_table[range];
2293*4882a593Smuzhiyun 	else
2294*4882a593Smuzhiyun 		return EF4_FARCH_FILTER_TABLE_COUNT; /* invalid */
2295*4882a593Smuzhiyun }
2296*4882a593Smuzhiyun 
ef4_farch_filter_id_index(u32 id)2297*4882a593Smuzhiyun static inline unsigned int ef4_farch_filter_id_index(u32 id)
2298*4882a593Smuzhiyun {
2299*4882a593Smuzhiyun 	return id & EF4_FARCH_FILTER_INDEX_MASK;
2300*4882a593Smuzhiyun }
2301*4882a593Smuzhiyun 
ef4_farch_filter_get_rx_id_limit(struct ef4_nic * efx)2302*4882a593Smuzhiyun u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx)
2303*4882a593Smuzhiyun {
2304*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
2305*4882a593Smuzhiyun 	unsigned int range = EF4_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2306*4882a593Smuzhiyun 	enum ef4_farch_filter_table_id table_id;
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun 	do {
2309*4882a593Smuzhiyun 		table_id = ef4_farch_filter_range_table[range];
2310*4882a593Smuzhiyun 		if (state->table[table_id].size != 0)
2311*4882a593Smuzhiyun 			return range << EF4_FARCH_FILTER_INDEX_WIDTH |
2312*4882a593Smuzhiyun 				state->table[table_id].size;
2313*4882a593Smuzhiyun 	} while (range--);
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun 	return 0;
2316*4882a593Smuzhiyun }
2317*4882a593Smuzhiyun 
ef4_farch_filter_insert(struct ef4_nic * efx,struct ef4_filter_spec * gen_spec,bool replace_equal)2318*4882a593Smuzhiyun s32 ef4_farch_filter_insert(struct ef4_nic *efx,
2319*4882a593Smuzhiyun 			    struct ef4_filter_spec *gen_spec,
2320*4882a593Smuzhiyun 			    bool replace_equal)
2321*4882a593Smuzhiyun {
2322*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
2323*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table;
2324*4882a593Smuzhiyun 	struct ef4_farch_filter_spec spec;
2325*4882a593Smuzhiyun 	ef4_oword_t filter;
2326*4882a593Smuzhiyun 	int rep_index, ins_index;
2327*4882a593Smuzhiyun 	unsigned int depth = 0;
2328*4882a593Smuzhiyun 	int rc;
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun 	rc = ef4_farch_filter_from_gen_spec(&spec, gen_spec);
2331*4882a593Smuzhiyun 	if (rc)
2332*4882a593Smuzhiyun 		return rc;
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun 	table = &state->table[ef4_farch_filter_spec_table_id(&spec)];
2335*4882a593Smuzhiyun 	if (table->size == 0)
2336*4882a593Smuzhiyun 		return -EINVAL;
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 	netif_vdbg(efx, hw, efx->net_dev,
2339*4882a593Smuzhiyun 		   "%s: type %d search_limit=%d", __func__, spec.type,
2340*4882a593Smuzhiyun 		   table->search_limit[spec.type]);
2341*4882a593Smuzhiyun 
2342*4882a593Smuzhiyun 	if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
2343*4882a593Smuzhiyun 		/* One filter spec per type */
2344*4882a593Smuzhiyun 		BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_UC_DEF != 0);
2345*4882a593Smuzhiyun 		BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_MC_DEF !=
2346*4882a593Smuzhiyun 			     EF4_FARCH_FILTER_MC_DEF - EF4_FARCH_FILTER_UC_DEF);
2347*4882a593Smuzhiyun 		rep_index = spec.type - EF4_FARCH_FILTER_UC_DEF;
2348*4882a593Smuzhiyun 		ins_index = rep_index;
2349*4882a593Smuzhiyun 
2350*4882a593Smuzhiyun 		spin_lock_bh(&efx->filter_lock);
2351*4882a593Smuzhiyun 	} else {
2352*4882a593Smuzhiyun 		/* Search concurrently for
2353*4882a593Smuzhiyun 		 * (1) a filter to be replaced (rep_index): any filter
2354*4882a593Smuzhiyun 		 *     with the same match values, up to the current
2355*4882a593Smuzhiyun 		 *     search depth for this type, and
2356*4882a593Smuzhiyun 		 * (2) the insertion point (ins_index): (1) or any
2357*4882a593Smuzhiyun 		 *     free slot before it or up to the maximum search
2358*4882a593Smuzhiyun 		 *     depth for this priority
2359*4882a593Smuzhiyun 		 * We fail if we cannot find (2).
2360*4882a593Smuzhiyun 		 *
2361*4882a593Smuzhiyun 		 * We can stop once either
2362*4882a593Smuzhiyun 		 * (a) we find (1), in which case we have definitely
2363*4882a593Smuzhiyun 		 *     found (2) as well; or
2364*4882a593Smuzhiyun 		 * (b) we have searched exhaustively for (1), and have
2365*4882a593Smuzhiyun 		 *     either found (2) or searched exhaustively for it
2366*4882a593Smuzhiyun 		 */
2367*4882a593Smuzhiyun 		u32 key = ef4_farch_filter_build(&filter, &spec);
2368*4882a593Smuzhiyun 		unsigned int hash = ef4_farch_filter_hash(key);
2369*4882a593Smuzhiyun 		unsigned int incr = ef4_farch_filter_increment(key);
2370*4882a593Smuzhiyun 		unsigned int max_rep_depth = table->search_limit[spec.type];
2371*4882a593Smuzhiyun 		unsigned int max_ins_depth =
2372*4882a593Smuzhiyun 			spec.priority <= EF4_FILTER_PRI_HINT ?
2373*4882a593Smuzhiyun 			EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2374*4882a593Smuzhiyun 			EF4_FARCH_FILTER_CTL_SRCH_MAX;
2375*4882a593Smuzhiyun 		unsigned int i = hash & (table->size - 1);
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 		ins_index = -1;
2378*4882a593Smuzhiyun 		depth = 1;
2379*4882a593Smuzhiyun 
2380*4882a593Smuzhiyun 		spin_lock_bh(&efx->filter_lock);
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 		for (;;) {
2383*4882a593Smuzhiyun 			if (!test_bit(i, table->used_bitmap)) {
2384*4882a593Smuzhiyun 				if (ins_index < 0)
2385*4882a593Smuzhiyun 					ins_index = i;
2386*4882a593Smuzhiyun 			} else if (ef4_farch_filter_equal(&spec,
2387*4882a593Smuzhiyun 							  &table->spec[i])) {
2388*4882a593Smuzhiyun 				/* Case (a) */
2389*4882a593Smuzhiyun 				if (ins_index < 0)
2390*4882a593Smuzhiyun 					ins_index = i;
2391*4882a593Smuzhiyun 				rep_index = i;
2392*4882a593Smuzhiyun 				break;
2393*4882a593Smuzhiyun 			}
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun 			if (depth >= max_rep_depth &&
2396*4882a593Smuzhiyun 			    (ins_index >= 0 || depth >= max_ins_depth)) {
2397*4882a593Smuzhiyun 				/* Case (b) */
2398*4882a593Smuzhiyun 				if (ins_index < 0) {
2399*4882a593Smuzhiyun 					rc = -EBUSY;
2400*4882a593Smuzhiyun 					goto out;
2401*4882a593Smuzhiyun 				}
2402*4882a593Smuzhiyun 				rep_index = -1;
2403*4882a593Smuzhiyun 				break;
2404*4882a593Smuzhiyun 			}
2405*4882a593Smuzhiyun 
2406*4882a593Smuzhiyun 			i = (i + incr) & (table->size - 1);
2407*4882a593Smuzhiyun 			++depth;
2408*4882a593Smuzhiyun 		}
2409*4882a593Smuzhiyun 	}
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 	/* If we found a filter to be replaced, check whether we
2412*4882a593Smuzhiyun 	 * should do so
2413*4882a593Smuzhiyun 	 */
2414*4882a593Smuzhiyun 	if (rep_index >= 0) {
2415*4882a593Smuzhiyun 		struct ef4_farch_filter_spec *saved_spec =
2416*4882a593Smuzhiyun 			&table->spec[rep_index];
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun 		if (spec.priority == saved_spec->priority && !replace_equal) {
2419*4882a593Smuzhiyun 			rc = -EEXIST;
2420*4882a593Smuzhiyun 			goto out;
2421*4882a593Smuzhiyun 		}
2422*4882a593Smuzhiyun 		if (spec.priority < saved_spec->priority) {
2423*4882a593Smuzhiyun 			rc = -EPERM;
2424*4882a593Smuzhiyun 			goto out;
2425*4882a593Smuzhiyun 		}
2426*4882a593Smuzhiyun 		if (saved_spec->priority == EF4_FILTER_PRI_AUTO ||
2427*4882a593Smuzhiyun 		    saved_spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO)
2428*4882a593Smuzhiyun 			spec.flags |= EF4_FILTER_FLAG_RX_OVER_AUTO;
2429*4882a593Smuzhiyun 	}
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 	/* Insert the filter */
2432*4882a593Smuzhiyun 	if (ins_index != rep_index) {
2433*4882a593Smuzhiyun 		__set_bit(ins_index, table->used_bitmap);
2434*4882a593Smuzhiyun 		++table->used;
2435*4882a593Smuzhiyun 	}
2436*4882a593Smuzhiyun 	table->spec[ins_index] = spec;
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun 	if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
2439*4882a593Smuzhiyun 		ef4_farch_filter_push_rx_config(efx);
2440*4882a593Smuzhiyun 	} else {
2441*4882a593Smuzhiyun 		if (table->search_limit[spec.type] < depth) {
2442*4882a593Smuzhiyun 			table->search_limit[spec.type] = depth;
2443*4882a593Smuzhiyun 			if (spec.flags & EF4_FILTER_FLAG_TX)
2444*4882a593Smuzhiyun 				ef4_farch_filter_push_tx_limits(efx);
2445*4882a593Smuzhiyun 			else
2446*4882a593Smuzhiyun 				ef4_farch_filter_push_rx_config(efx);
2447*4882a593Smuzhiyun 		}
2448*4882a593Smuzhiyun 
2449*4882a593Smuzhiyun 		ef4_writeo(efx, &filter,
2450*4882a593Smuzhiyun 			   table->offset + table->step * ins_index);
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun 		/* If we were able to replace a filter by inserting
2453*4882a593Smuzhiyun 		 * at a lower depth, clear the replaced filter
2454*4882a593Smuzhiyun 		 */
2455*4882a593Smuzhiyun 		if (ins_index != rep_index && rep_index >= 0)
2456*4882a593Smuzhiyun 			ef4_farch_filter_table_clear_entry(efx, table,
2457*4882a593Smuzhiyun 							   rep_index);
2458*4882a593Smuzhiyun 	}
2459*4882a593Smuzhiyun 
2460*4882a593Smuzhiyun 	netif_vdbg(efx, hw, efx->net_dev,
2461*4882a593Smuzhiyun 		   "%s: filter type %d index %d rxq %u set",
2462*4882a593Smuzhiyun 		   __func__, spec.type, ins_index, spec.dmaq_id);
2463*4882a593Smuzhiyun 	rc = ef4_farch_filter_make_id(&spec, ins_index);
2464*4882a593Smuzhiyun 
2465*4882a593Smuzhiyun out:
2466*4882a593Smuzhiyun 	spin_unlock_bh(&efx->filter_lock);
2467*4882a593Smuzhiyun 	return rc;
2468*4882a593Smuzhiyun }
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun static void
ef4_farch_filter_table_clear_entry(struct ef4_nic * efx,struct ef4_farch_filter_table * table,unsigned int filter_idx)2471*4882a593Smuzhiyun ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
2472*4882a593Smuzhiyun 				   struct ef4_farch_filter_table *table,
2473*4882a593Smuzhiyun 				   unsigned int filter_idx)
2474*4882a593Smuzhiyun {
2475*4882a593Smuzhiyun 	static ef4_oword_t filter;
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 	EF4_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2478*4882a593Smuzhiyun 	BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2479*4882a593Smuzhiyun 
2480*4882a593Smuzhiyun 	__clear_bit(filter_idx, table->used_bitmap);
2481*4882a593Smuzhiyun 	--table->used;
2482*4882a593Smuzhiyun 	memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2483*4882a593Smuzhiyun 
2484*4882a593Smuzhiyun 	ef4_writeo(efx, &filter, table->offset + table->step * filter_idx);
2485*4882a593Smuzhiyun 
2486*4882a593Smuzhiyun 	/* If this filter required a greater search depth than
2487*4882a593Smuzhiyun 	 * any other, the search limit for its type can now be
2488*4882a593Smuzhiyun 	 * decreased.  However, it is hard to determine that
2489*4882a593Smuzhiyun 	 * unless the table has become completely empty - in
2490*4882a593Smuzhiyun 	 * which case, all its search limits can be set to 0.
2491*4882a593Smuzhiyun 	 */
2492*4882a593Smuzhiyun 	if (unlikely(table->used == 0)) {
2493*4882a593Smuzhiyun 		memset(table->search_limit, 0, sizeof(table->search_limit));
2494*4882a593Smuzhiyun 		if (table->id == EF4_FARCH_FILTER_TABLE_TX_MAC)
2495*4882a593Smuzhiyun 			ef4_farch_filter_push_tx_limits(efx);
2496*4882a593Smuzhiyun 		else
2497*4882a593Smuzhiyun 			ef4_farch_filter_push_rx_config(efx);
2498*4882a593Smuzhiyun 	}
2499*4882a593Smuzhiyun }
2500*4882a593Smuzhiyun 
ef4_farch_filter_remove(struct ef4_nic * efx,struct ef4_farch_filter_table * table,unsigned int filter_idx,enum ef4_filter_priority priority)2501*4882a593Smuzhiyun static int ef4_farch_filter_remove(struct ef4_nic *efx,
2502*4882a593Smuzhiyun 				   struct ef4_farch_filter_table *table,
2503*4882a593Smuzhiyun 				   unsigned int filter_idx,
2504*4882a593Smuzhiyun 				   enum ef4_filter_priority priority)
2505*4882a593Smuzhiyun {
2506*4882a593Smuzhiyun 	struct ef4_farch_filter_spec *spec = &table->spec[filter_idx];
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 	if (!test_bit(filter_idx, table->used_bitmap) ||
2509*4882a593Smuzhiyun 	    spec->priority != priority)
2510*4882a593Smuzhiyun 		return -ENOENT;
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun 	if (spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO) {
2513*4882a593Smuzhiyun 		ef4_farch_filter_init_rx_auto(efx, spec);
2514*4882a593Smuzhiyun 		ef4_farch_filter_push_rx_config(efx);
2515*4882a593Smuzhiyun 	} else {
2516*4882a593Smuzhiyun 		ef4_farch_filter_table_clear_entry(efx, table, filter_idx);
2517*4882a593Smuzhiyun 	}
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	return 0;
2520*4882a593Smuzhiyun }
2521*4882a593Smuzhiyun 
ef4_farch_filter_remove_safe(struct ef4_nic * efx,enum ef4_filter_priority priority,u32 filter_id)2522*4882a593Smuzhiyun int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
2523*4882a593Smuzhiyun 				 enum ef4_filter_priority priority,
2524*4882a593Smuzhiyun 				 u32 filter_id)
2525*4882a593Smuzhiyun {
2526*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
2527*4882a593Smuzhiyun 	enum ef4_farch_filter_table_id table_id;
2528*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table;
2529*4882a593Smuzhiyun 	unsigned int filter_idx;
2530*4882a593Smuzhiyun 	int rc;
2531*4882a593Smuzhiyun 
2532*4882a593Smuzhiyun 	table_id = ef4_farch_filter_id_table_id(filter_id);
2533*4882a593Smuzhiyun 	if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
2534*4882a593Smuzhiyun 		return -ENOENT;
2535*4882a593Smuzhiyun 	table = &state->table[table_id];
2536*4882a593Smuzhiyun 
2537*4882a593Smuzhiyun 	filter_idx = ef4_farch_filter_id_index(filter_id);
2538*4882a593Smuzhiyun 	if (filter_idx >= table->size)
2539*4882a593Smuzhiyun 		return -ENOENT;
2540*4882a593Smuzhiyun 
2541*4882a593Smuzhiyun 	spin_lock_bh(&efx->filter_lock);
2542*4882a593Smuzhiyun 	rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
2543*4882a593Smuzhiyun 	spin_unlock_bh(&efx->filter_lock);
2544*4882a593Smuzhiyun 
2545*4882a593Smuzhiyun 	return rc;
2546*4882a593Smuzhiyun }
2547*4882a593Smuzhiyun 
ef4_farch_filter_get_safe(struct ef4_nic * efx,enum ef4_filter_priority priority,u32 filter_id,struct ef4_filter_spec * spec_buf)2548*4882a593Smuzhiyun int ef4_farch_filter_get_safe(struct ef4_nic *efx,
2549*4882a593Smuzhiyun 			      enum ef4_filter_priority priority,
2550*4882a593Smuzhiyun 			      u32 filter_id, struct ef4_filter_spec *spec_buf)
2551*4882a593Smuzhiyun {
2552*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
2553*4882a593Smuzhiyun 	enum ef4_farch_filter_table_id table_id;
2554*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table;
2555*4882a593Smuzhiyun 	struct ef4_farch_filter_spec *spec;
2556*4882a593Smuzhiyun 	unsigned int filter_idx;
2557*4882a593Smuzhiyun 	int rc;
2558*4882a593Smuzhiyun 
2559*4882a593Smuzhiyun 	table_id = ef4_farch_filter_id_table_id(filter_id);
2560*4882a593Smuzhiyun 	if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
2561*4882a593Smuzhiyun 		return -ENOENT;
2562*4882a593Smuzhiyun 	table = &state->table[table_id];
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 	filter_idx = ef4_farch_filter_id_index(filter_id);
2565*4882a593Smuzhiyun 	if (filter_idx >= table->size)
2566*4882a593Smuzhiyun 		return -ENOENT;
2567*4882a593Smuzhiyun 	spec = &table->spec[filter_idx];
2568*4882a593Smuzhiyun 
2569*4882a593Smuzhiyun 	spin_lock_bh(&efx->filter_lock);
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun 	if (test_bit(filter_idx, table->used_bitmap) &&
2572*4882a593Smuzhiyun 	    spec->priority == priority) {
2573*4882a593Smuzhiyun 		ef4_farch_filter_to_gen_spec(spec_buf, spec);
2574*4882a593Smuzhiyun 		rc = 0;
2575*4882a593Smuzhiyun 	} else {
2576*4882a593Smuzhiyun 		rc = -ENOENT;
2577*4882a593Smuzhiyun 	}
2578*4882a593Smuzhiyun 
2579*4882a593Smuzhiyun 	spin_unlock_bh(&efx->filter_lock);
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	return rc;
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun static void
ef4_farch_filter_table_clear(struct ef4_nic * efx,enum ef4_farch_filter_table_id table_id,enum ef4_filter_priority priority)2585*4882a593Smuzhiyun ef4_farch_filter_table_clear(struct ef4_nic *efx,
2586*4882a593Smuzhiyun 			     enum ef4_farch_filter_table_id table_id,
2587*4882a593Smuzhiyun 			     enum ef4_filter_priority priority)
2588*4882a593Smuzhiyun {
2589*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
2590*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table = &state->table[table_id];
2591*4882a593Smuzhiyun 	unsigned int filter_idx;
2592*4882a593Smuzhiyun 
2593*4882a593Smuzhiyun 	spin_lock_bh(&efx->filter_lock);
2594*4882a593Smuzhiyun 	for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
2595*4882a593Smuzhiyun 		if (table->spec[filter_idx].priority != EF4_FILTER_PRI_AUTO)
2596*4882a593Smuzhiyun 			ef4_farch_filter_remove(efx, table,
2597*4882a593Smuzhiyun 						filter_idx, priority);
2598*4882a593Smuzhiyun 	}
2599*4882a593Smuzhiyun 	spin_unlock_bh(&efx->filter_lock);
2600*4882a593Smuzhiyun }
2601*4882a593Smuzhiyun 
ef4_farch_filter_clear_rx(struct ef4_nic * efx,enum ef4_filter_priority priority)2602*4882a593Smuzhiyun int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
2603*4882a593Smuzhiyun 			       enum ef4_filter_priority priority)
2604*4882a593Smuzhiyun {
2605*4882a593Smuzhiyun 	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_IP,
2606*4882a593Smuzhiyun 				     priority);
2607*4882a593Smuzhiyun 	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_MAC,
2608*4882a593Smuzhiyun 				     priority);
2609*4882a593Smuzhiyun 	ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_DEF,
2610*4882a593Smuzhiyun 				     priority);
2611*4882a593Smuzhiyun 	return 0;
2612*4882a593Smuzhiyun }
2613*4882a593Smuzhiyun 
ef4_farch_filter_count_rx_used(struct ef4_nic * efx,enum ef4_filter_priority priority)2614*4882a593Smuzhiyun u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
2615*4882a593Smuzhiyun 				   enum ef4_filter_priority priority)
2616*4882a593Smuzhiyun {
2617*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
2618*4882a593Smuzhiyun 	enum ef4_farch_filter_table_id table_id;
2619*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table;
2620*4882a593Smuzhiyun 	unsigned int filter_idx;
2621*4882a593Smuzhiyun 	u32 count = 0;
2622*4882a593Smuzhiyun 
2623*4882a593Smuzhiyun 	spin_lock_bh(&efx->filter_lock);
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun 	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2626*4882a593Smuzhiyun 	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2627*4882a593Smuzhiyun 	     table_id++) {
2628*4882a593Smuzhiyun 		table = &state->table[table_id];
2629*4882a593Smuzhiyun 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2630*4882a593Smuzhiyun 			if (test_bit(filter_idx, table->used_bitmap) &&
2631*4882a593Smuzhiyun 			    table->spec[filter_idx].priority == priority)
2632*4882a593Smuzhiyun 				++count;
2633*4882a593Smuzhiyun 		}
2634*4882a593Smuzhiyun 	}
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 	spin_unlock_bh(&efx->filter_lock);
2637*4882a593Smuzhiyun 
2638*4882a593Smuzhiyun 	return count;
2639*4882a593Smuzhiyun }
2640*4882a593Smuzhiyun 
ef4_farch_filter_get_rx_ids(struct ef4_nic * efx,enum ef4_filter_priority priority,u32 * buf,u32 size)2641*4882a593Smuzhiyun s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
2642*4882a593Smuzhiyun 				enum ef4_filter_priority priority,
2643*4882a593Smuzhiyun 				u32 *buf, u32 size)
2644*4882a593Smuzhiyun {
2645*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
2646*4882a593Smuzhiyun 	enum ef4_farch_filter_table_id table_id;
2647*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table;
2648*4882a593Smuzhiyun 	unsigned int filter_idx;
2649*4882a593Smuzhiyun 	s32 count = 0;
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun 	spin_lock_bh(&efx->filter_lock);
2652*4882a593Smuzhiyun 
2653*4882a593Smuzhiyun 	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2654*4882a593Smuzhiyun 	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2655*4882a593Smuzhiyun 	     table_id++) {
2656*4882a593Smuzhiyun 		table = &state->table[table_id];
2657*4882a593Smuzhiyun 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2658*4882a593Smuzhiyun 			if (test_bit(filter_idx, table->used_bitmap) &&
2659*4882a593Smuzhiyun 			    table->spec[filter_idx].priority == priority) {
2660*4882a593Smuzhiyun 				if (count == size) {
2661*4882a593Smuzhiyun 					count = -EMSGSIZE;
2662*4882a593Smuzhiyun 					goto out;
2663*4882a593Smuzhiyun 				}
2664*4882a593Smuzhiyun 				buf[count++] = ef4_farch_filter_make_id(
2665*4882a593Smuzhiyun 					&table->spec[filter_idx], filter_idx);
2666*4882a593Smuzhiyun 			}
2667*4882a593Smuzhiyun 		}
2668*4882a593Smuzhiyun 	}
2669*4882a593Smuzhiyun out:
2670*4882a593Smuzhiyun 	spin_unlock_bh(&efx->filter_lock);
2671*4882a593Smuzhiyun 
2672*4882a593Smuzhiyun 	return count;
2673*4882a593Smuzhiyun }
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun /* Restore filter stater after reset */
ef4_farch_filter_table_restore(struct ef4_nic * efx)2676*4882a593Smuzhiyun void ef4_farch_filter_table_restore(struct ef4_nic *efx)
2677*4882a593Smuzhiyun {
2678*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
2679*4882a593Smuzhiyun 	enum ef4_farch_filter_table_id table_id;
2680*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table;
2681*4882a593Smuzhiyun 	ef4_oword_t filter;
2682*4882a593Smuzhiyun 	unsigned int filter_idx;
2683*4882a593Smuzhiyun 
2684*4882a593Smuzhiyun 	spin_lock_bh(&efx->filter_lock);
2685*4882a593Smuzhiyun 
2686*4882a593Smuzhiyun 	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2687*4882a593Smuzhiyun 		table = &state->table[table_id];
2688*4882a593Smuzhiyun 
2689*4882a593Smuzhiyun 		/* Check whether this is a regular register table */
2690*4882a593Smuzhiyun 		if (table->step == 0)
2691*4882a593Smuzhiyun 			continue;
2692*4882a593Smuzhiyun 
2693*4882a593Smuzhiyun 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2694*4882a593Smuzhiyun 			if (!test_bit(filter_idx, table->used_bitmap))
2695*4882a593Smuzhiyun 				continue;
2696*4882a593Smuzhiyun 			ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
2697*4882a593Smuzhiyun 			ef4_writeo(efx, &filter,
2698*4882a593Smuzhiyun 				   table->offset + table->step * filter_idx);
2699*4882a593Smuzhiyun 		}
2700*4882a593Smuzhiyun 	}
2701*4882a593Smuzhiyun 
2702*4882a593Smuzhiyun 	ef4_farch_filter_push_rx_config(efx);
2703*4882a593Smuzhiyun 	ef4_farch_filter_push_tx_limits(efx);
2704*4882a593Smuzhiyun 
2705*4882a593Smuzhiyun 	spin_unlock_bh(&efx->filter_lock);
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun 
ef4_farch_filter_table_remove(struct ef4_nic * efx)2708*4882a593Smuzhiyun void ef4_farch_filter_table_remove(struct ef4_nic *efx)
2709*4882a593Smuzhiyun {
2710*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
2711*4882a593Smuzhiyun 	enum ef4_farch_filter_table_id table_id;
2712*4882a593Smuzhiyun 
2713*4882a593Smuzhiyun 	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2714*4882a593Smuzhiyun 		kfree(state->table[table_id].used_bitmap);
2715*4882a593Smuzhiyun 		vfree(state->table[table_id].spec);
2716*4882a593Smuzhiyun 	}
2717*4882a593Smuzhiyun 	kfree(state);
2718*4882a593Smuzhiyun }
2719*4882a593Smuzhiyun 
ef4_farch_filter_table_probe(struct ef4_nic * efx)2720*4882a593Smuzhiyun int ef4_farch_filter_table_probe(struct ef4_nic *efx)
2721*4882a593Smuzhiyun {
2722*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state;
2723*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table;
2724*4882a593Smuzhiyun 	unsigned table_id;
2725*4882a593Smuzhiyun 
2726*4882a593Smuzhiyun 	state = kzalloc(sizeof(struct ef4_farch_filter_state), GFP_KERNEL);
2727*4882a593Smuzhiyun 	if (!state)
2728*4882a593Smuzhiyun 		return -ENOMEM;
2729*4882a593Smuzhiyun 	efx->filter_state = state;
2730*4882a593Smuzhiyun 
2731*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
2732*4882a593Smuzhiyun 		table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
2733*4882a593Smuzhiyun 		table->id = EF4_FARCH_FILTER_TABLE_RX_IP;
2734*4882a593Smuzhiyun 		table->offset = FR_BZ_RX_FILTER_TBL0;
2735*4882a593Smuzhiyun 		table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2736*4882a593Smuzhiyun 		table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2737*4882a593Smuzhiyun 	}
2738*4882a593Smuzhiyun 
2739*4882a593Smuzhiyun 	for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2740*4882a593Smuzhiyun 		table = &state->table[table_id];
2741*4882a593Smuzhiyun 		if (table->size == 0)
2742*4882a593Smuzhiyun 			continue;
2743*4882a593Smuzhiyun 		table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2744*4882a593Smuzhiyun 					     sizeof(unsigned long),
2745*4882a593Smuzhiyun 					     GFP_KERNEL);
2746*4882a593Smuzhiyun 		if (!table->used_bitmap)
2747*4882a593Smuzhiyun 			goto fail;
2748*4882a593Smuzhiyun 		table->spec = vzalloc(array_size(sizeof(*table->spec),
2749*4882a593Smuzhiyun 						 table->size));
2750*4882a593Smuzhiyun 		if (!table->spec)
2751*4882a593Smuzhiyun 			goto fail;
2752*4882a593Smuzhiyun 	}
2753*4882a593Smuzhiyun 
2754*4882a593Smuzhiyun 	table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
2755*4882a593Smuzhiyun 	if (table->size) {
2756*4882a593Smuzhiyun 		/* RX default filters must always exist */
2757*4882a593Smuzhiyun 		struct ef4_farch_filter_spec *spec;
2758*4882a593Smuzhiyun 		unsigned i;
2759*4882a593Smuzhiyun 
2760*4882a593Smuzhiyun 		for (i = 0; i < EF4_FARCH_FILTER_SIZE_RX_DEF; i++) {
2761*4882a593Smuzhiyun 			spec = &table->spec[i];
2762*4882a593Smuzhiyun 			spec->type = EF4_FARCH_FILTER_UC_DEF + i;
2763*4882a593Smuzhiyun 			ef4_farch_filter_init_rx_auto(efx, spec);
2764*4882a593Smuzhiyun 			__set_bit(i, table->used_bitmap);
2765*4882a593Smuzhiyun 		}
2766*4882a593Smuzhiyun 	}
2767*4882a593Smuzhiyun 
2768*4882a593Smuzhiyun 	ef4_farch_filter_push_rx_config(efx);
2769*4882a593Smuzhiyun 
2770*4882a593Smuzhiyun 	return 0;
2771*4882a593Smuzhiyun 
2772*4882a593Smuzhiyun fail:
2773*4882a593Smuzhiyun 	ef4_farch_filter_table_remove(efx);
2774*4882a593Smuzhiyun 	return -ENOMEM;
2775*4882a593Smuzhiyun }
2776*4882a593Smuzhiyun 
2777*4882a593Smuzhiyun /* Update scatter enable flags for filters pointing to our own RX queues */
ef4_farch_filter_update_rx_scatter(struct ef4_nic * efx)2778*4882a593Smuzhiyun void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx)
2779*4882a593Smuzhiyun {
2780*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
2781*4882a593Smuzhiyun 	enum ef4_farch_filter_table_id table_id;
2782*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table;
2783*4882a593Smuzhiyun 	ef4_oword_t filter;
2784*4882a593Smuzhiyun 	unsigned int filter_idx;
2785*4882a593Smuzhiyun 
2786*4882a593Smuzhiyun 	spin_lock_bh(&efx->filter_lock);
2787*4882a593Smuzhiyun 
2788*4882a593Smuzhiyun 	for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2789*4882a593Smuzhiyun 	     table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2790*4882a593Smuzhiyun 	     table_id++) {
2791*4882a593Smuzhiyun 		table = &state->table[table_id];
2792*4882a593Smuzhiyun 
2793*4882a593Smuzhiyun 		for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2794*4882a593Smuzhiyun 			if (!test_bit(filter_idx, table->used_bitmap) ||
2795*4882a593Smuzhiyun 			    table->spec[filter_idx].dmaq_id >=
2796*4882a593Smuzhiyun 			    efx->n_rx_channels)
2797*4882a593Smuzhiyun 				continue;
2798*4882a593Smuzhiyun 
2799*4882a593Smuzhiyun 			if (efx->rx_scatter)
2800*4882a593Smuzhiyun 				table->spec[filter_idx].flags |=
2801*4882a593Smuzhiyun 					EF4_FILTER_FLAG_RX_SCATTER;
2802*4882a593Smuzhiyun 			else
2803*4882a593Smuzhiyun 				table->spec[filter_idx].flags &=
2804*4882a593Smuzhiyun 					~EF4_FILTER_FLAG_RX_SCATTER;
2805*4882a593Smuzhiyun 
2806*4882a593Smuzhiyun 			if (table_id == EF4_FARCH_FILTER_TABLE_RX_DEF)
2807*4882a593Smuzhiyun 				/* Pushed by ef4_farch_filter_push_rx_config() */
2808*4882a593Smuzhiyun 				continue;
2809*4882a593Smuzhiyun 
2810*4882a593Smuzhiyun 			ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
2811*4882a593Smuzhiyun 			ef4_writeo(efx, &filter,
2812*4882a593Smuzhiyun 				   table->offset + table->step * filter_idx);
2813*4882a593Smuzhiyun 		}
2814*4882a593Smuzhiyun 	}
2815*4882a593Smuzhiyun 
2816*4882a593Smuzhiyun 	ef4_farch_filter_push_rx_config(efx);
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun 	spin_unlock_bh(&efx->filter_lock);
2819*4882a593Smuzhiyun }
2820*4882a593Smuzhiyun 
2821*4882a593Smuzhiyun #ifdef CONFIG_RFS_ACCEL
2822*4882a593Smuzhiyun 
ef4_farch_filter_rfs_insert(struct ef4_nic * efx,struct ef4_filter_spec * gen_spec)2823*4882a593Smuzhiyun s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
2824*4882a593Smuzhiyun 				struct ef4_filter_spec *gen_spec)
2825*4882a593Smuzhiyun {
2826*4882a593Smuzhiyun 	return ef4_farch_filter_insert(efx, gen_spec, true);
2827*4882a593Smuzhiyun }
2828*4882a593Smuzhiyun 
ef4_farch_filter_rfs_expire_one(struct ef4_nic * efx,u32 flow_id,unsigned int index)2829*4882a593Smuzhiyun bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
2830*4882a593Smuzhiyun 				     unsigned int index)
2831*4882a593Smuzhiyun {
2832*4882a593Smuzhiyun 	struct ef4_farch_filter_state *state = efx->filter_state;
2833*4882a593Smuzhiyun 	struct ef4_farch_filter_table *table =
2834*4882a593Smuzhiyun 		&state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 	if (test_bit(index, table->used_bitmap) &&
2837*4882a593Smuzhiyun 	    table->spec[index].priority == EF4_FILTER_PRI_HINT &&
2838*4882a593Smuzhiyun 	    rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2839*4882a593Smuzhiyun 				flow_id, index)) {
2840*4882a593Smuzhiyun 		ef4_farch_filter_table_clear_entry(efx, table, index);
2841*4882a593Smuzhiyun 		return true;
2842*4882a593Smuzhiyun 	}
2843*4882a593Smuzhiyun 
2844*4882a593Smuzhiyun 	return false;
2845*4882a593Smuzhiyun }
2846*4882a593Smuzhiyun 
2847*4882a593Smuzhiyun #endif /* CONFIG_RFS_ACCEL */
2848*4882a593Smuzhiyun 
ef4_farch_filter_sync_rx_mode(struct ef4_nic * efx)2849*4882a593Smuzhiyun void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx)
2850*4882a593Smuzhiyun {
2851*4882a593Smuzhiyun 	struct net_device *net_dev = efx->net_dev;
2852*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
2853*4882a593Smuzhiyun 	union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
2854*4882a593Smuzhiyun 	u32 crc;
2855*4882a593Smuzhiyun 	int bit;
2856*4882a593Smuzhiyun 
2857*4882a593Smuzhiyun 	if (!ef4_dev_registered(efx))
2858*4882a593Smuzhiyun 		return;
2859*4882a593Smuzhiyun 
2860*4882a593Smuzhiyun 	netif_addr_lock_bh(net_dev);
2861*4882a593Smuzhiyun 
2862*4882a593Smuzhiyun 	efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2863*4882a593Smuzhiyun 
2864*4882a593Smuzhiyun 	/* Build multicast hash table */
2865*4882a593Smuzhiyun 	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2866*4882a593Smuzhiyun 		memset(mc_hash, 0xff, sizeof(*mc_hash));
2867*4882a593Smuzhiyun 	} else {
2868*4882a593Smuzhiyun 		memset(mc_hash, 0x00, sizeof(*mc_hash));
2869*4882a593Smuzhiyun 		netdev_for_each_mc_addr(ha, net_dev) {
2870*4882a593Smuzhiyun 			crc = ether_crc_le(ETH_ALEN, ha->addr);
2871*4882a593Smuzhiyun 			bit = crc & (EF4_MCAST_HASH_ENTRIES - 1);
2872*4882a593Smuzhiyun 			__set_bit_le(bit, mc_hash);
2873*4882a593Smuzhiyun 		}
2874*4882a593Smuzhiyun 
2875*4882a593Smuzhiyun 		/* Broadcast packets go through the multicast hash filter.
2876*4882a593Smuzhiyun 		 * ether_crc_le() of the broadcast address is 0xbe2612ff
2877*4882a593Smuzhiyun 		 * so we always add bit 0xff to the mask.
2878*4882a593Smuzhiyun 		 */
2879*4882a593Smuzhiyun 		__set_bit_le(0xff, mc_hash);
2880*4882a593Smuzhiyun 	}
2881*4882a593Smuzhiyun 
2882*4882a593Smuzhiyun 	netif_addr_unlock_bh(net_dev);
2883*4882a593Smuzhiyun }
2884