xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/sfc/falcon/falcon.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /****************************************************************************
3*4882a593Smuzhiyun  * Driver for Solarflare network controllers and boards
4*4882a593Smuzhiyun  * Copyright 2005-2006 Fen Systems Ltd.
5*4882a593Smuzhiyun  * Copyright 2006-2013 Solarflare Communications Inc.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/bitops.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/pci.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/seq_file.h>
13*4882a593Smuzhiyun #include <linux/i2c.h>
14*4882a593Smuzhiyun #include <linux/mii.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/sched/signal.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "net_driver.h"
19*4882a593Smuzhiyun #include "bitfield.h"
20*4882a593Smuzhiyun #include "efx.h"
21*4882a593Smuzhiyun #include "nic.h"
22*4882a593Smuzhiyun #include "farch_regs.h"
23*4882a593Smuzhiyun #include "io.h"
24*4882a593Smuzhiyun #include "phy.h"
25*4882a593Smuzhiyun #include "workarounds.h"
26*4882a593Smuzhiyun #include "selftest.h"
27*4882a593Smuzhiyun #include "mdio_10g.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /* Hardware control for SFC4000 (aka Falcon). */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /**************************************************************************
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  * NIC stats
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  **************************************************************************
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define FALCON_MAC_STATS_SIZE 0x100
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define XgRxOctets_offset 0x0
41*4882a593Smuzhiyun #define XgRxOctets_WIDTH 48
42*4882a593Smuzhiyun #define XgRxOctetsOK_offset 0x8
43*4882a593Smuzhiyun #define XgRxOctetsOK_WIDTH 48
44*4882a593Smuzhiyun #define XgRxPkts_offset 0x10
45*4882a593Smuzhiyun #define XgRxPkts_WIDTH 32
46*4882a593Smuzhiyun #define XgRxPktsOK_offset 0x14
47*4882a593Smuzhiyun #define XgRxPktsOK_WIDTH 32
48*4882a593Smuzhiyun #define XgRxBroadcastPkts_offset 0x18
49*4882a593Smuzhiyun #define XgRxBroadcastPkts_WIDTH 32
50*4882a593Smuzhiyun #define XgRxMulticastPkts_offset 0x1C
51*4882a593Smuzhiyun #define XgRxMulticastPkts_WIDTH 32
52*4882a593Smuzhiyun #define XgRxUnicastPkts_offset 0x20
53*4882a593Smuzhiyun #define XgRxUnicastPkts_WIDTH 32
54*4882a593Smuzhiyun #define XgRxUndersizePkts_offset 0x24
55*4882a593Smuzhiyun #define XgRxUndersizePkts_WIDTH 32
56*4882a593Smuzhiyun #define XgRxOversizePkts_offset 0x28
57*4882a593Smuzhiyun #define XgRxOversizePkts_WIDTH 32
58*4882a593Smuzhiyun #define XgRxJabberPkts_offset 0x2C
59*4882a593Smuzhiyun #define XgRxJabberPkts_WIDTH 32
60*4882a593Smuzhiyun #define XgRxUndersizeFCSerrorPkts_offset 0x30
61*4882a593Smuzhiyun #define XgRxUndersizeFCSerrorPkts_WIDTH 32
62*4882a593Smuzhiyun #define XgRxDropEvents_offset 0x34
63*4882a593Smuzhiyun #define XgRxDropEvents_WIDTH 32
64*4882a593Smuzhiyun #define XgRxFCSerrorPkts_offset 0x38
65*4882a593Smuzhiyun #define XgRxFCSerrorPkts_WIDTH 32
66*4882a593Smuzhiyun #define XgRxAlignError_offset 0x3C
67*4882a593Smuzhiyun #define XgRxAlignError_WIDTH 32
68*4882a593Smuzhiyun #define XgRxSymbolError_offset 0x40
69*4882a593Smuzhiyun #define XgRxSymbolError_WIDTH 32
70*4882a593Smuzhiyun #define XgRxInternalMACError_offset 0x44
71*4882a593Smuzhiyun #define XgRxInternalMACError_WIDTH 32
72*4882a593Smuzhiyun #define XgRxControlPkts_offset 0x48
73*4882a593Smuzhiyun #define XgRxControlPkts_WIDTH 32
74*4882a593Smuzhiyun #define XgRxPausePkts_offset 0x4C
75*4882a593Smuzhiyun #define XgRxPausePkts_WIDTH 32
76*4882a593Smuzhiyun #define XgRxPkts64Octets_offset 0x50
77*4882a593Smuzhiyun #define XgRxPkts64Octets_WIDTH 32
78*4882a593Smuzhiyun #define XgRxPkts65to127Octets_offset 0x54
79*4882a593Smuzhiyun #define XgRxPkts65to127Octets_WIDTH 32
80*4882a593Smuzhiyun #define XgRxPkts128to255Octets_offset 0x58
81*4882a593Smuzhiyun #define XgRxPkts128to255Octets_WIDTH 32
82*4882a593Smuzhiyun #define XgRxPkts256to511Octets_offset 0x5C
83*4882a593Smuzhiyun #define XgRxPkts256to511Octets_WIDTH 32
84*4882a593Smuzhiyun #define XgRxPkts512to1023Octets_offset 0x60
85*4882a593Smuzhiyun #define XgRxPkts512to1023Octets_WIDTH 32
86*4882a593Smuzhiyun #define XgRxPkts1024to15xxOctets_offset 0x64
87*4882a593Smuzhiyun #define XgRxPkts1024to15xxOctets_WIDTH 32
88*4882a593Smuzhiyun #define XgRxPkts15xxtoMaxOctets_offset 0x68
89*4882a593Smuzhiyun #define XgRxPkts15xxtoMaxOctets_WIDTH 32
90*4882a593Smuzhiyun #define XgRxLengthError_offset 0x6C
91*4882a593Smuzhiyun #define XgRxLengthError_WIDTH 32
92*4882a593Smuzhiyun #define XgTxPkts_offset 0x80
93*4882a593Smuzhiyun #define XgTxPkts_WIDTH 32
94*4882a593Smuzhiyun #define XgTxOctets_offset 0x88
95*4882a593Smuzhiyun #define XgTxOctets_WIDTH 48
96*4882a593Smuzhiyun #define XgTxMulticastPkts_offset 0x90
97*4882a593Smuzhiyun #define XgTxMulticastPkts_WIDTH 32
98*4882a593Smuzhiyun #define XgTxBroadcastPkts_offset 0x94
99*4882a593Smuzhiyun #define XgTxBroadcastPkts_WIDTH 32
100*4882a593Smuzhiyun #define XgTxUnicastPkts_offset 0x98
101*4882a593Smuzhiyun #define XgTxUnicastPkts_WIDTH 32
102*4882a593Smuzhiyun #define XgTxControlPkts_offset 0x9C
103*4882a593Smuzhiyun #define XgTxControlPkts_WIDTH 32
104*4882a593Smuzhiyun #define XgTxPausePkts_offset 0xA0
105*4882a593Smuzhiyun #define XgTxPausePkts_WIDTH 32
106*4882a593Smuzhiyun #define XgTxPkts64Octets_offset 0xA4
107*4882a593Smuzhiyun #define XgTxPkts64Octets_WIDTH 32
108*4882a593Smuzhiyun #define XgTxPkts65to127Octets_offset 0xA8
109*4882a593Smuzhiyun #define XgTxPkts65to127Octets_WIDTH 32
110*4882a593Smuzhiyun #define XgTxPkts128to255Octets_offset 0xAC
111*4882a593Smuzhiyun #define XgTxPkts128to255Octets_WIDTH 32
112*4882a593Smuzhiyun #define XgTxPkts256to511Octets_offset 0xB0
113*4882a593Smuzhiyun #define XgTxPkts256to511Octets_WIDTH 32
114*4882a593Smuzhiyun #define XgTxPkts512to1023Octets_offset 0xB4
115*4882a593Smuzhiyun #define XgTxPkts512to1023Octets_WIDTH 32
116*4882a593Smuzhiyun #define XgTxPkts1024to15xxOctets_offset 0xB8
117*4882a593Smuzhiyun #define XgTxPkts1024to15xxOctets_WIDTH 32
118*4882a593Smuzhiyun #define XgTxPkts1519toMaxOctets_offset 0xBC
119*4882a593Smuzhiyun #define XgTxPkts1519toMaxOctets_WIDTH 32
120*4882a593Smuzhiyun #define XgTxUndersizePkts_offset 0xC0
121*4882a593Smuzhiyun #define XgTxUndersizePkts_WIDTH 32
122*4882a593Smuzhiyun #define XgTxOversizePkts_offset 0xC4
123*4882a593Smuzhiyun #define XgTxOversizePkts_WIDTH 32
124*4882a593Smuzhiyun #define XgTxNonTcpUdpPkt_offset 0xC8
125*4882a593Smuzhiyun #define XgTxNonTcpUdpPkt_WIDTH 16
126*4882a593Smuzhiyun #define XgTxMacSrcErrPkt_offset 0xCC
127*4882a593Smuzhiyun #define XgTxMacSrcErrPkt_WIDTH 16
128*4882a593Smuzhiyun #define XgTxIpSrcErrPkt_offset 0xD0
129*4882a593Smuzhiyun #define XgTxIpSrcErrPkt_WIDTH 16
130*4882a593Smuzhiyun #define XgDmaDone_offset 0xD4
131*4882a593Smuzhiyun #define XgDmaDone_WIDTH 32
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun #define FALCON_XMAC_STATS_DMA_FLAG(efx)				\
134*4882a593Smuzhiyun 	(*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun #define FALCON_DMA_STAT(ext_name, hw_name)				\
137*4882a593Smuzhiyun 	[FALCON_STAT_ ## ext_name] =					\
138*4882a593Smuzhiyun 	{ #ext_name,							\
139*4882a593Smuzhiyun 	  /* 48-bit stats are zero-padded to 64 on DMA */		\
140*4882a593Smuzhiyun 	  hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH,	\
141*4882a593Smuzhiyun 	  hw_name ## _ ## offset }
142*4882a593Smuzhiyun #define FALCON_OTHER_STAT(ext_name)					\
143*4882a593Smuzhiyun 	[FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
144*4882a593Smuzhiyun #define GENERIC_SW_STAT(ext_name)				\
145*4882a593Smuzhiyun 	[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun static const struct ef4_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
148*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_bytes, XgTxOctets),
149*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_packets, XgTxPkts),
150*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
151*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_control, XgTxControlPkts),
152*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
153*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
154*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
155*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
156*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
157*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
158*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
159*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
160*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
161*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
162*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
163*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
164*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
165*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
166*4882a593Smuzhiyun 	FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
167*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_bytes, XgRxOctets),
168*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
169*4882a593Smuzhiyun 	FALCON_OTHER_STAT(rx_bad_bytes),
170*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_packets, XgRxPkts),
171*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_good, XgRxPktsOK),
172*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
173*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
174*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_control, XgRxControlPkts),
175*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
176*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
177*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
178*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
179*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
180*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
181*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
182*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
183*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
184*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
185*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
186*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
187*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
188*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
189*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
190*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
191*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
192*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
193*4882a593Smuzhiyun 	FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
194*4882a593Smuzhiyun 	FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
195*4882a593Smuzhiyun 	GENERIC_SW_STAT(rx_nodesc_trunc),
196*4882a593Smuzhiyun 	GENERIC_SW_STAT(rx_noskb_drops),
197*4882a593Smuzhiyun };
198*4882a593Smuzhiyun static const unsigned long falcon_stat_mask[] = {
199*4882a593Smuzhiyun 	[0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
200*4882a593Smuzhiyun };
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /**************************************************************************
203*4882a593Smuzhiyun  *
204*4882a593Smuzhiyun  * Basic SPI command set and bit definitions
205*4882a593Smuzhiyun  *
206*4882a593Smuzhiyun  *************************************************************************/
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun #define SPI_WRSR 0x01		/* Write status register */
209*4882a593Smuzhiyun #define SPI_WRITE 0x02		/* Write data to memory array */
210*4882a593Smuzhiyun #define SPI_READ 0x03		/* Read data from memory array */
211*4882a593Smuzhiyun #define SPI_WRDI 0x04		/* Reset write enable latch */
212*4882a593Smuzhiyun #define SPI_RDSR 0x05		/* Read status register */
213*4882a593Smuzhiyun #define SPI_WREN 0x06		/* Set write enable latch */
214*4882a593Smuzhiyun #define SPI_SST_EWSR 0x50	/* SST: Enable write to status register */
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun #define SPI_STATUS_WPEN 0x80	/* Write-protect pin enabled */
217*4882a593Smuzhiyun #define SPI_STATUS_BP2 0x10	/* Block protection bit 2 */
218*4882a593Smuzhiyun #define SPI_STATUS_BP1 0x08	/* Block protection bit 1 */
219*4882a593Smuzhiyun #define SPI_STATUS_BP0 0x04	/* Block protection bit 0 */
220*4882a593Smuzhiyun #define SPI_STATUS_WEN 0x02	/* State of the write enable latch */
221*4882a593Smuzhiyun #define SPI_STATUS_NRDY 0x01	/* Device busy flag */
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /**************************************************************************
224*4882a593Smuzhiyun  *
225*4882a593Smuzhiyun  * Non-volatile memory layout
226*4882a593Smuzhiyun  *
227*4882a593Smuzhiyun  **************************************************************************
228*4882a593Smuzhiyun  */
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /* SFC4000 flash is partitioned into:
231*4882a593Smuzhiyun  *     0-0x400       chip and board config (see struct falcon_nvconfig)
232*4882a593Smuzhiyun  *     0x400-0x8000  unused (or may contain VPD if EEPROM not present)
233*4882a593Smuzhiyun  *     0x8000-end    boot code (mapped to PCI expansion ROM)
234*4882a593Smuzhiyun  * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
235*4882a593Smuzhiyun  * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
236*4882a593Smuzhiyun  *     0-0x400       chip and board config
237*4882a593Smuzhiyun  *     configurable  VPD
238*4882a593Smuzhiyun  *     0x800-0x1800  boot config
239*4882a593Smuzhiyun  * Aside from the chip and board config, all of these are optional and may
240*4882a593Smuzhiyun  * be absent or truncated depending on the devices used.
241*4882a593Smuzhiyun  */
242*4882a593Smuzhiyun #define FALCON_NVCONFIG_END 0x400U
243*4882a593Smuzhiyun #define FALCON_FLASH_BOOTCODE_START 0x8000U
244*4882a593Smuzhiyun #define FALCON_EEPROM_BOOTCONFIG_START 0x800U
245*4882a593Smuzhiyun #define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun /* Board configuration v2 (v1 is obsolete; later versions are compatible) */
248*4882a593Smuzhiyun struct falcon_nvconfig_board_v2 {
249*4882a593Smuzhiyun 	__le16 nports;
250*4882a593Smuzhiyun 	u8 port0_phy_addr;
251*4882a593Smuzhiyun 	u8 port0_phy_type;
252*4882a593Smuzhiyun 	u8 port1_phy_addr;
253*4882a593Smuzhiyun 	u8 port1_phy_type;
254*4882a593Smuzhiyun 	__le16 asic_sub_revision;
255*4882a593Smuzhiyun 	__le16 board_revision;
256*4882a593Smuzhiyun } __packed;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun /* Board configuration v3 extra information */
259*4882a593Smuzhiyun struct falcon_nvconfig_board_v3 {
260*4882a593Smuzhiyun 	__le32 spi_device_type[2];
261*4882a593Smuzhiyun } __packed;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun /* Bit numbers for spi_device_type */
264*4882a593Smuzhiyun #define SPI_DEV_TYPE_SIZE_LBN 0
265*4882a593Smuzhiyun #define SPI_DEV_TYPE_SIZE_WIDTH 5
266*4882a593Smuzhiyun #define SPI_DEV_TYPE_ADDR_LEN_LBN 6
267*4882a593Smuzhiyun #define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
268*4882a593Smuzhiyun #define SPI_DEV_TYPE_ERASE_CMD_LBN 8
269*4882a593Smuzhiyun #define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
270*4882a593Smuzhiyun #define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
271*4882a593Smuzhiyun #define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
272*4882a593Smuzhiyun #define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
273*4882a593Smuzhiyun #define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
274*4882a593Smuzhiyun #define SPI_DEV_TYPE_FIELD(type, field)					\
275*4882a593Smuzhiyun 	(((type) >> EF4_LOW_BIT(field)) & EF4_MASK32(EF4_WIDTH(field)))
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun #define FALCON_NVCONFIG_OFFSET 0x300
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun #define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
280*4882a593Smuzhiyun struct falcon_nvconfig {
281*4882a593Smuzhiyun 	ef4_oword_t ee_vpd_cfg_reg;			/* 0x300 */
282*4882a593Smuzhiyun 	u8 mac_address[2][8];			/* 0x310 */
283*4882a593Smuzhiyun 	ef4_oword_t pcie_sd_ctl0123_reg;		/* 0x320 */
284*4882a593Smuzhiyun 	ef4_oword_t pcie_sd_ctl45_reg;			/* 0x330 */
285*4882a593Smuzhiyun 	ef4_oword_t pcie_pcs_ctl_stat_reg;		/* 0x340 */
286*4882a593Smuzhiyun 	ef4_oword_t hw_init_reg;			/* 0x350 */
287*4882a593Smuzhiyun 	ef4_oword_t nic_stat_reg;			/* 0x360 */
288*4882a593Smuzhiyun 	ef4_oword_t glb_ctl_reg;			/* 0x370 */
289*4882a593Smuzhiyun 	ef4_oword_t srm_cfg_reg;			/* 0x380 */
290*4882a593Smuzhiyun 	ef4_oword_t spare_reg;				/* 0x390 */
291*4882a593Smuzhiyun 	__le16 board_magic_num;			/* 0x3A0 */
292*4882a593Smuzhiyun 	__le16 board_struct_ver;
293*4882a593Smuzhiyun 	__le16 board_checksum;
294*4882a593Smuzhiyun 	struct falcon_nvconfig_board_v2 board_v2;
295*4882a593Smuzhiyun 	ef4_oword_t ee_base_page_reg;			/* 0x3B0 */
296*4882a593Smuzhiyun 	struct falcon_nvconfig_board_v3 board_v3;	/* 0x3C0 */
297*4882a593Smuzhiyun } __packed;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun /*************************************************************************/
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method);
302*4882a593Smuzhiyun static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun static const unsigned int
305*4882a593Smuzhiyun /* "Large" EEPROM device: Atmel AT25640 or similar
306*4882a593Smuzhiyun  * 8 KB, 16-bit address, 32 B write block */
307*4882a593Smuzhiyun large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
308*4882a593Smuzhiyun 		     | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
309*4882a593Smuzhiyun 		     | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
310*4882a593Smuzhiyun /* Default flash device: Atmel AT25F1024
311*4882a593Smuzhiyun  * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
312*4882a593Smuzhiyun default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
313*4882a593Smuzhiyun 		      | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
314*4882a593Smuzhiyun 		      | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
315*4882a593Smuzhiyun 		      | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
316*4882a593Smuzhiyun 		      | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun /**************************************************************************
319*4882a593Smuzhiyun  *
320*4882a593Smuzhiyun  * I2C bus - this is a bit-bashing interface using GPIO pins
321*4882a593Smuzhiyun  * Note that it uses the output enables to tristate the outputs
322*4882a593Smuzhiyun  * SDA is the data pin and SCL is the clock
323*4882a593Smuzhiyun  *
324*4882a593Smuzhiyun  **************************************************************************
325*4882a593Smuzhiyun  */
falcon_setsda(void * data,int state)326*4882a593Smuzhiyun static void falcon_setsda(void *data, int state)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	struct ef4_nic *efx = (struct ef4_nic *)data;
329*4882a593Smuzhiyun 	ef4_oword_t reg;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
332*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
333*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
falcon_setscl(void * data,int state)336*4882a593Smuzhiyun static void falcon_setscl(void *data, int state)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	struct ef4_nic *efx = (struct ef4_nic *)data;
339*4882a593Smuzhiyun 	ef4_oword_t reg;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
342*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
343*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_GPIO_CTL);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
falcon_getsda(void * data)346*4882a593Smuzhiyun static int falcon_getsda(void *data)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct ef4_nic *efx = (struct ef4_nic *)data;
349*4882a593Smuzhiyun 	ef4_oword_t reg;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
352*4882a593Smuzhiyun 	return EF4_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
falcon_getscl(void * data)355*4882a593Smuzhiyun static int falcon_getscl(void *data)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	struct ef4_nic *efx = (struct ef4_nic *)data;
358*4882a593Smuzhiyun 	ef4_oword_t reg;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_GPIO_CTL);
361*4882a593Smuzhiyun 	return EF4_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
365*4882a593Smuzhiyun 	.setsda		= falcon_setsda,
366*4882a593Smuzhiyun 	.setscl		= falcon_setscl,
367*4882a593Smuzhiyun 	.getsda		= falcon_getsda,
368*4882a593Smuzhiyun 	.getscl		= falcon_getscl,
369*4882a593Smuzhiyun 	.udelay		= 5,
370*4882a593Smuzhiyun 	/* Wait up to 50 ms for slave to let us pull SCL high */
371*4882a593Smuzhiyun 	.timeout	= DIV_ROUND_UP(HZ, 20),
372*4882a593Smuzhiyun };
373*4882a593Smuzhiyun 
falcon_push_irq_moderation(struct ef4_channel * channel)374*4882a593Smuzhiyun static void falcon_push_irq_moderation(struct ef4_channel *channel)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	ef4_dword_t timer_cmd;
377*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* Set timer register */
380*4882a593Smuzhiyun 	if (channel->irq_moderation_us) {
381*4882a593Smuzhiyun 		unsigned int ticks;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 		ticks = ef4_usecs_to_ticks(efx, channel->irq_moderation_us);
384*4882a593Smuzhiyun 		EF4_POPULATE_DWORD_2(timer_cmd,
385*4882a593Smuzhiyun 				     FRF_AB_TC_TIMER_MODE,
386*4882a593Smuzhiyun 				     FFE_BB_TIMER_MODE_INT_HLDOFF,
387*4882a593Smuzhiyun 				     FRF_AB_TC_TIMER_VAL,
388*4882a593Smuzhiyun 				     ticks - 1);
389*4882a593Smuzhiyun 	} else {
390*4882a593Smuzhiyun 		EF4_POPULATE_DWORD_2(timer_cmd,
391*4882a593Smuzhiyun 				     FRF_AB_TC_TIMER_MODE,
392*4882a593Smuzhiyun 				     FFE_BB_TIMER_MODE_DIS,
393*4882a593Smuzhiyun 				     FRF_AB_TC_TIMER_VAL, 0);
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun 	BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
396*4882a593Smuzhiyun 	ef4_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
397*4882a593Smuzhiyun 			       channel->channel);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx);
401*4882a593Smuzhiyun 
falcon_prepare_flush(struct ef4_nic * efx)402*4882a593Smuzhiyun static void falcon_prepare_flush(struct ef4_nic *efx)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	falcon_deconfigure_mac_wrapper(efx);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	/* Wait for the tx and rx fifo's to get to the next packet boundary
407*4882a593Smuzhiyun 	 * (~1ms without back-pressure), then to drain the remainder of the
408*4882a593Smuzhiyun 	 * fifo's at data path speeds (negligible), with a healthy margin. */
409*4882a593Smuzhiyun 	msleep(10);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun /* Acknowledge a legacy interrupt from Falcon
413*4882a593Smuzhiyun  *
414*4882a593Smuzhiyun  * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
415*4882a593Smuzhiyun  *
416*4882a593Smuzhiyun  * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
417*4882a593Smuzhiyun  * BIU. Interrupt acknowledge is read sensitive so must write instead
418*4882a593Smuzhiyun  * (then read to ensure the BIU collector is flushed)
419*4882a593Smuzhiyun  *
420*4882a593Smuzhiyun  * NB most hardware supports MSI interrupts
421*4882a593Smuzhiyun  */
falcon_irq_ack_a1(struct ef4_nic * efx)422*4882a593Smuzhiyun static inline void falcon_irq_ack_a1(struct ef4_nic *efx)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	ef4_dword_t reg;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	EF4_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
427*4882a593Smuzhiyun 	ef4_writed(efx, &reg, FR_AA_INT_ACK_KER);
428*4882a593Smuzhiyun 	ef4_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
falcon_legacy_interrupt_a1(int irq,void * dev_id)431*4882a593Smuzhiyun static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct ef4_nic *efx = dev_id;
434*4882a593Smuzhiyun 	ef4_oword_t *int_ker = efx->irq_status.addr;
435*4882a593Smuzhiyun 	int syserr;
436*4882a593Smuzhiyun 	int queues;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	/* Check to see if this is our interrupt.  If it isn't, we
439*4882a593Smuzhiyun 	 * exit without having touched the hardware.
440*4882a593Smuzhiyun 	 */
441*4882a593Smuzhiyun 	if (unlikely(EF4_OWORD_IS_ZERO(*int_ker))) {
442*4882a593Smuzhiyun 		netif_vdbg(efx, intr, efx->net_dev,
443*4882a593Smuzhiyun 			   "IRQ %d on CPU %d not for me\n", irq,
444*4882a593Smuzhiyun 			   raw_smp_processor_id());
445*4882a593Smuzhiyun 		return IRQ_NONE;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 	efx->last_irq_cpu = raw_smp_processor_id();
448*4882a593Smuzhiyun 	netif_vdbg(efx, intr, efx->net_dev,
449*4882a593Smuzhiyun 		   "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
450*4882a593Smuzhiyun 		   irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	if (!likely(READ_ONCE(efx->irq_soft_enabled)))
453*4882a593Smuzhiyun 		return IRQ_HANDLED;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	/* Check to see if we have a serious error condition */
456*4882a593Smuzhiyun 	syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
457*4882a593Smuzhiyun 	if (unlikely(syserr))
458*4882a593Smuzhiyun 		return ef4_farch_fatal_interrupt(efx);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	/* Determine interrupting queues, clear interrupt status
461*4882a593Smuzhiyun 	 * register and acknowledge the device interrupt.
462*4882a593Smuzhiyun 	 */
463*4882a593Smuzhiyun 	BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EF4_MAX_CHANNELS);
464*4882a593Smuzhiyun 	queues = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
465*4882a593Smuzhiyun 	EF4_ZERO_OWORD(*int_ker);
466*4882a593Smuzhiyun 	wmb(); /* Ensure the vector is cleared before interrupt ack */
467*4882a593Smuzhiyun 	falcon_irq_ack_a1(efx);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (queues & 1)
470*4882a593Smuzhiyun 		ef4_schedule_channel_irq(ef4_get_channel(efx, 0));
471*4882a593Smuzhiyun 	if (queues & 2)
472*4882a593Smuzhiyun 		ef4_schedule_channel_irq(ef4_get_channel(efx, 1));
473*4882a593Smuzhiyun 	return IRQ_HANDLED;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun /**************************************************************************
477*4882a593Smuzhiyun  *
478*4882a593Smuzhiyun  * RSS
479*4882a593Smuzhiyun  *
480*4882a593Smuzhiyun  **************************************************************************
481*4882a593Smuzhiyun  */
dummy_rx_push_rss_config(struct ef4_nic * efx,bool user,const u32 * rx_indir_table)482*4882a593Smuzhiyun static int dummy_rx_push_rss_config(struct ef4_nic *efx, bool user,
483*4882a593Smuzhiyun 				    const u32 *rx_indir_table)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	(void) efx;
486*4882a593Smuzhiyun 	(void) user;
487*4882a593Smuzhiyun 	(void) rx_indir_table;
488*4882a593Smuzhiyun 	return -ENOSYS;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun 
falcon_b0_rx_push_rss_config(struct ef4_nic * efx,bool user,const u32 * rx_indir_table)491*4882a593Smuzhiyun static int falcon_b0_rx_push_rss_config(struct ef4_nic *efx, bool user,
492*4882a593Smuzhiyun 					const u32 *rx_indir_table)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	ef4_oword_t temp;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	(void) user;
497*4882a593Smuzhiyun 	/* Set hash key for IPv4 */
498*4882a593Smuzhiyun 	memcpy(&temp, efx->rx_hash_key, sizeof(temp));
499*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	memcpy(efx->rx_indir_table, rx_indir_table,
502*4882a593Smuzhiyun 	       sizeof(efx->rx_indir_table));
503*4882a593Smuzhiyun 	ef4_farch_rx_push_indir_table(efx);
504*4882a593Smuzhiyun 	return 0;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun /**************************************************************************
508*4882a593Smuzhiyun  *
509*4882a593Smuzhiyun  * EEPROM/flash
510*4882a593Smuzhiyun  *
511*4882a593Smuzhiyun  **************************************************************************
512*4882a593Smuzhiyun  */
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun #define FALCON_SPI_MAX_LEN sizeof(ef4_oword_t)
515*4882a593Smuzhiyun 
falcon_spi_poll(struct ef4_nic * efx)516*4882a593Smuzhiyun static int falcon_spi_poll(struct ef4_nic *efx)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	ef4_oword_t reg;
519*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
520*4882a593Smuzhiyun 	return EF4_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun /* Wait for SPI command completion */
falcon_spi_wait(struct ef4_nic * efx)524*4882a593Smuzhiyun static int falcon_spi_wait(struct ef4_nic *efx)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	/* Most commands will finish quickly, so we start polling at
527*4882a593Smuzhiyun 	 * very short intervals.  Sometimes the command may have to
528*4882a593Smuzhiyun 	 * wait for VPD or expansion ROM access outside of our
529*4882a593Smuzhiyun 	 * control, so we allow up to 100 ms. */
530*4882a593Smuzhiyun 	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
531*4882a593Smuzhiyun 	int i;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
534*4882a593Smuzhiyun 		if (!falcon_spi_poll(efx))
535*4882a593Smuzhiyun 			return 0;
536*4882a593Smuzhiyun 		udelay(10);
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	for (;;) {
540*4882a593Smuzhiyun 		if (!falcon_spi_poll(efx))
541*4882a593Smuzhiyun 			return 0;
542*4882a593Smuzhiyun 		if (time_after_eq(jiffies, timeout)) {
543*4882a593Smuzhiyun 			netif_err(efx, hw, efx->net_dev,
544*4882a593Smuzhiyun 				  "timed out waiting for SPI\n");
545*4882a593Smuzhiyun 			return -ETIMEDOUT;
546*4882a593Smuzhiyun 		}
547*4882a593Smuzhiyun 		schedule_timeout_uninterruptible(1);
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun static int
falcon_spi_cmd(struct ef4_nic * efx,const struct falcon_spi_device * spi,unsigned int command,int address,const void * in,void * out,size_t len)552*4882a593Smuzhiyun falcon_spi_cmd(struct ef4_nic *efx, const struct falcon_spi_device *spi,
553*4882a593Smuzhiyun 	       unsigned int command, int address,
554*4882a593Smuzhiyun 	       const void *in, void *out, size_t len)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	bool addressed = (address >= 0);
557*4882a593Smuzhiyun 	bool reading = (out != NULL);
558*4882a593Smuzhiyun 	ef4_oword_t reg;
559*4882a593Smuzhiyun 	int rc;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	/* Input validation */
562*4882a593Smuzhiyun 	if (len > FALCON_SPI_MAX_LEN)
563*4882a593Smuzhiyun 		return -EINVAL;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	/* Check that previous command is not still running */
566*4882a593Smuzhiyun 	rc = falcon_spi_poll(efx);
567*4882a593Smuzhiyun 	if (rc)
568*4882a593Smuzhiyun 		return rc;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	/* Program address register, if we have an address */
571*4882a593Smuzhiyun 	if (addressed) {
572*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
573*4882a593Smuzhiyun 		ef4_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	/* Program data register, if we have data */
577*4882a593Smuzhiyun 	if (in != NULL) {
578*4882a593Smuzhiyun 		memcpy(&reg, in, len);
579*4882a593Smuzhiyun 		ef4_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	/* Issue read/write command */
583*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_7(reg,
584*4882a593Smuzhiyun 			     FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
585*4882a593Smuzhiyun 			     FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
586*4882a593Smuzhiyun 			     FRF_AB_EE_SPI_HCMD_DABCNT, len,
587*4882a593Smuzhiyun 			     FRF_AB_EE_SPI_HCMD_READ, reading,
588*4882a593Smuzhiyun 			     FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
589*4882a593Smuzhiyun 			     FRF_AB_EE_SPI_HCMD_ADBCNT,
590*4882a593Smuzhiyun 			     (addressed ? spi->addr_len : 0),
591*4882a593Smuzhiyun 			     FRF_AB_EE_SPI_HCMD_ENC, command);
592*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	/* Wait for read/write to complete */
595*4882a593Smuzhiyun 	rc = falcon_spi_wait(efx);
596*4882a593Smuzhiyun 	if (rc)
597*4882a593Smuzhiyun 		return rc;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* Read data */
600*4882a593Smuzhiyun 	if (out != NULL) {
601*4882a593Smuzhiyun 		ef4_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
602*4882a593Smuzhiyun 		memcpy(out, &reg, len);
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	return 0;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun static inline u8
falcon_spi_munge_command(const struct falcon_spi_device * spi,const u8 command,const unsigned int address)609*4882a593Smuzhiyun falcon_spi_munge_command(const struct falcon_spi_device *spi,
610*4882a593Smuzhiyun 			 const u8 command, const unsigned int address)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	return command | (((address >> 8) & spi->munge_address) << 3);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun static int
falcon_spi_read(struct ef4_nic * efx,const struct falcon_spi_device * spi,loff_t start,size_t len,size_t * retlen,u8 * buffer)616*4882a593Smuzhiyun falcon_spi_read(struct ef4_nic *efx, const struct falcon_spi_device *spi,
617*4882a593Smuzhiyun 		loff_t start, size_t len, size_t *retlen, u8 *buffer)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	size_t block_len, pos = 0;
620*4882a593Smuzhiyun 	unsigned int command;
621*4882a593Smuzhiyun 	int rc = 0;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	while (pos < len) {
624*4882a593Smuzhiyun 		block_len = min(len - pos, FALCON_SPI_MAX_LEN);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
627*4882a593Smuzhiyun 		rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
628*4882a593Smuzhiyun 				    buffer + pos, block_len);
629*4882a593Smuzhiyun 		if (rc)
630*4882a593Smuzhiyun 			break;
631*4882a593Smuzhiyun 		pos += block_len;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 		/* Avoid locking up the system */
634*4882a593Smuzhiyun 		cond_resched();
635*4882a593Smuzhiyun 		if (signal_pending(current)) {
636*4882a593Smuzhiyun 			rc = -EINTR;
637*4882a593Smuzhiyun 			break;
638*4882a593Smuzhiyun 		}
639*4882a593Smuzhiyun 	}
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	if (retlen)
642*4882a593Smuzhiyun 		*retlen = pos;
643*4882a593Smuzhiyun 	return rc;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun #ifdef CONFIG_SFC_FALCON_MTD
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun struct falcon_mtd_partition {
649*4882a593Smuzhiyun 	struct ef4_mtd_partition common;
650*4882a593Smuzhiyun 	const struct falcon_spi_device *spi;
651*4882a593Smuzhiyun 	size_t offset;
652*4882a593Smuzhiyun };
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun #define to_falcon_mtd_partition(mtd)				\
655*4882a593Smuzhiyun 	container_of(mtd, struct falcon_mtd_partition, common.mtd)
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun static size_t
falcon_spi_write_limit(const struct falcon_spi_device * spi,size_t start)658*4882a593Smuzhiyun falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun 	return min(FALCON_SPI_MAX_LEN,
661*4882a593Smuzhiyun 		   (spi->block_size - (start & (spi->block_size - 1))));
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun /* Wait up to 10 ms for buffered write completion */
665*4882a593Smuzhiyun static int
falcon_spi_wait_write(struct ef4_nic * efx,const struct falcon_spi_device * spi)666*4882a593Smuzhiyun falcon_spi_wait_write(struct ef4_nic *efx, const struct falcon_spi_device *spi)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun 	unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
669*4882a593Smuzhiyun 	u8 status;
670*4882a593Smuzhiyun 	int rc;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	for (;;) {
673*4882a593Smuzhiyun 		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
674*4882a593Smuzhiyun 				    &status, sizeof(status));
675*4882a593Smuzhiyun 		if (rc)
676*4882a593Smuzhiyun 			return rc;
677*4882a593Smuzhiyun 		if (!(status & SPI_STATUS_NRDY))
678*4882a593Smuzhiyun 			return 0;
679*4882a593Smuzhiyun 		if (time_after_eq(jiffies, timeout)) {
680*4882a593Smuzhiyun 			netif_err(efx, hw, efx->net_dev,
681*4882a593Smuzhiyun 				  "SPI write timeout on device %d"
682*4882a593Smuzhiyun 				  " last status=0x%02x\n",
683*4882a593Smuzhiyun 				  spi->device_id, status);
684*4882a593Smuzhiyun 			return -ETIMEDOUT;
685*4882a593Smuzhiyun 		}
686*4882a593Smuzhiyun 		schedule_timeout_uninterruptible(1);
687*4882a593Smuzhiyun 	}
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun static int
falcon_spi_write(struct ef4_nic * efx,const struct falcon_spi_device * spi,loff_t start,size_t len,size_t * retlen,const u8 * buffer)691*4882a593Smuzhiyun falcon_spi_write(struct ef4_nic *efx, const struct falcon_spi_device *spi,
692*4882a593Smuzhiyun 		 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	u8 verify_buffer[FALCON_SPI_MAX_LEN];
695*4882a593Smuzhiyun 	size_t block_len, pos = 0;
696*4882a593Smuzhiyun 	unsigned int command;
697*4882a593Smuzhiyun 	int rc = 0;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	while (pos < len) {
700*4882a593Smuzhiyun 		rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
701*4882a593Smuzhiyun 		if (rc)
702*4882a593Smuzhiyun 			break;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 		block_len = min(len - pos,
705*4882a593Smuzhiyun 				falcon_spi_write_limit(spi, start + pos));
706*4882a593Smuzhiyun 		command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
707*4882a593Smuzhiyun 		rc = falcon_spi_cmd(efx, spi, command, start + pos,
708*4882a593Smuzhiyun 				    buffer + pos, NULL, block_len);
709*4882a593Smuzhiyun 		if (rc)
710*4882a593Smuzhiyun 			break;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 		rc = falcon_spi_wait_write(efx, spi);
713*4882a593Smuzhiyun 		if (rc)
714*4882a593Smuzhiyun 			break;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 		command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
717*4882a593Smuzhiyun 		rc = falcon_spi_cmd(efx, spi, command, start + pos,
718*4882a593Smuzhiyun 				    NULL, verify_buffer, block_len);
719*4882a593Smuzhiyun 		if (memcmp(verify_buffer, buffer + pos, block_len)) {
720*4882a593Smuzhiyun 			rc = -EIO;
721*4882a593Smuzhiyun 			break;
722*4882a593Smuzhiyun 		}
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 		pos += block_len;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 		/* Avoid locking up the system */
727*4882a593Smuzhiyun 		cond_resched();
728*4882a593Smuzhiyun 		if (signal_pending(current)) {
729*4882a593Smuzhiyun 			rc = -EINTR;
730*4882a593Smuzhiyun 			break;
731*4882a593Smuzhiyun 		}
732*4882a593Smuzhiyun 	}
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	if (retlen)
735*4882a593Smuzhiyun 		*retlen = pos;
736*4882a593Smuzhiyun 	return rc;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun static int
falcon_spi_slow_wait(struct falcon_mtd_partition * part,bool uninterruptible)740*4882a593Smuzhiyun falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun 	const struct falcon_spi_device *spi = part->spi;
743*4882a593Smuzhiyun 	struct ef4_nic *efx = part->common.mtd.priv;
744*4882a593Smuzhiyun 	u8 status;
745*4882a593Smuzhiyun 	int rc, i;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	/* Wait up to 4s for flash/EEPROM to finish a slow operation. */
748*4882a593Smuzhiyun 	for (i = 0; i < 40; i++) {
749*4882a593Smuzhiyun 		__set_current_state(uninterruptible ?
750*4882a593Smuzhiyun 				    TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
751*4882a593Smuzhiyun 		schedule_timeout(HZ / 10);
752*4882a593Smuzhiyun 		rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
753*4882a593Smuzhiyun 				    &status, sizeof(status));
754*4882a593Smuzhiyun 		if (rc)
755*4882a593Smuzhiyun 			return rc;
756*4882a593Smuzhiyun 		if (!(status & SPI_STATUS_NRDY))
757*4882a593Smuzhiyun 			return 0;
758*4882a593Smuzhiyun 		if (signal_pending(current))
759*4882a593Smuzhiyun 			return -EINTR;
760*4882a593Smuzhiyun 	}
761*4882a593Smuzhiyun 	pr_err("%s: timed out waiting for %s\n",
762*4882a593Smuzhiyun 	       part->common.name, part->common.dev_type_name);
763*4882a593Smuzhiyun 	return -ETIMEDOUT;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun static int
falcon_spi_unlock(struct ef4_nic * efx,const struct falcon_spi_device * spi)767*4882a593Smuzhiyun falcon_spi_unlock(struct ef4_nic *efx, const struct falcon_spi_device *spi)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 	const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
770*4882a593Smuzhiyun 				SPI_STATUS_BP0);
771*4882a593Smuzhiyun 	u8 status;
772*4882a593Smuzhiyun 	int rc;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
775*4882a593Smuzhiyun 			    &status, sizeof(status));
776*4882a593Smuzhiyun 	if (rc)
777*4882a593Smuzhiyun 		return rc;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (!(status & unlock_mask))
780*4882a593Smuzhiyun 		return 0; /* already unlocked */
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
783*4882a593Smuzhiyun 	if (rc)
784*4882a593Smuzhiyun 		return rc;
785*4882a593Smuzhiyun 	rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
786*4882a593Smuzhiyun 	if (rc)
787*4882a593Smuzhiyun 		return rc;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	status &= ~unlock_mask;
790*4882a593Smuzhiyun 	rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
791*4882a593Smuzhiyun 			    NULL, sizeof(status));
792*4882a593Smuzhiyun 	if (rc)
793*4882a593Smuzhiyun 		return rc;
794*4882a593Smuzhiyun 	rc = falcon_spi_wait_write(efx, spi);
795*4882a593Smuzhiyun 	if (rc)
796*4882a593Smuzhiyun 		return rc;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	return 0;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun #define FALCON_SPI_VERIFY_BUF_LEN 16
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun static int
falcon_spi_erase(struct falcon_mtd_partition * part,loff_t start,size_t len)804*4882a593Smuzhiyun falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	const struct falcon_spi_device *spi = part->spi;
807*4882a593Smuzhiyun 	struct ef4_nic *efx = part->common.mtd.priv;
808*4882a593Smuzhiyun 	unsigned pos, block_len;
809*4882a593Smuzhiyun 	u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
810*4882a593Smuzhiyun 	u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
811*4882a593Smuzhiyun 	int rc;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	if (len != spi->erase_size)
814*4882a593Smuzhiyun 		return -EINVAL;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	if (spi->erase_command == 0)
817*4882a593Smuzhiyun 		return -EOPNOTSUPP;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	rc = falcon_spi_unlock(efx, spi);
820*4882a593Smuzhiyun 	if (rc)
821*4882a593Smuzhiyun 		return rc;
822*4882a593Smuzhiyun 	rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
823*4882a593Smuzhiyun 	if (rc)
824*4882a593Smuzhiyun 		return rc;
825*4882a593Smuzhiyun 	rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
826*4882a593Smuzhiyun 			    NULL, 0);
827*4882a593Smuzhiyun 	if (rc)
828*4882a593Smuzhiyun 		return rc;
829*4882a593Smuzhiyun 	rc = falcon_spi_slow_wait(part, false);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	/* Verify the entire region has been wiped */
832*4882a593Smuzhiyun 	memset(empty, 0xff, sizeof(empty));
833*4882a593Smuzhiyun 	for (pos = 0; pos < len; pos += block_len) {
834*4882a593Smuzhiyun 		block_len = min(len - pos, sizeof(buffer));
835*4882a593Smuzhiyun 		rc = falcon_spi_read(efx, spi, start + pos, block_len,
836*4882a593Smuzhiyun 				     NULL, buffer);
837*4882a593Smuzhiyun 		if (rc)
838*4882a593Smuzhiyun 			return rc;
839*4882a593Smuzhiyun 		if (memcmp(empty, buffer, block_len))
840*4882a593Smuzhiyun 			return -EIO;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 		/* Avoid locking up the system */
843*4882a593Smuzhiyun 		cond_resched();
844*4882a593Smuzhiyun 		if (signal_pending(current))
845*4882a593Smuzhiyun 			return -EINTR;
846*4882a593Smuzhiyun 	}
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	return rc;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun 
falcon_mtd_rename(struct ef4_mtd_partition * part)851*4882a593Smuzhiyun static void falcon_mtd_rename(struct ef4_mtd_partition *part)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun 	struct ef4_nic *efx = part->mtd.priv;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	snprintf(part->name, sizeof(part->name), "%s %s",
856*4882a593Smuzhiyun 		 efx->name, part->type_name);
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun 
falcon_mtd_read(struct mtd_info * mtd,loff_t start,size_t len,size_t * retlen,u8 * buffer)859*4882a593Smuzhiyun static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
860*4882a593Smuzhiyun 			   size_t len, size_t *retlen, u8 *buffer)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun 	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
863*4882a593Smuzhiyun 	struct ef4_nic *efx = mtd->priv;
864*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
865*4882a593Smuzhiyun 	int rc;
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	rc = mutex_lock_interruptible(&nic_data->spi_lock);
868*4882a593Smuzhiyun 	if (rc)
869*4882a593Smuzhiyun 		return rc;
870*4882a593Smuzhiyun 	rc = falcon_spi_read(efx, part->spi, part->offset + start,
871*4882a593Smuzhiyun 			     len, retlen, buffer);
872*4882a593Smuzhiyun 	mutex_unlock(&nic_data->spi_lock);
873*4882a593Smuzhiyun 	return rc;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun 
falcon_mtd_erase(struct mtd_info * mtd,loff_t start,size_t len)876*4882a593Smuzhiyun static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun 	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
879*4882a593Smuzhiyun 	struct ef4_nic *efx = mtd->priv;
880*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
881*4882a593Smuzhiyun 	int rc;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	rc = mutex_lock_interruptible(&nic_data->spi_lock);
884*4882a593Smuzhiyun 	if (rc)
885*4882a593Smuzhiyun 		return rc;
886*4882a593Smuzhiyun 	rc = falcon_spi_erase(part, part->offset + start, len);
887*4882a593Smuzhiyun 	mutex_unlock(&nic_data->spi_lock);
888*4882a593Smuzhiyun 	return rc;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun 
falcon_mtd_write(struct mtd_info * mtd,loff_t start,size_t len,size_t * retlen,const u8 * buffer)891*4882a593Smuzhiyun static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
892*4882a593Smuzhiyun 			    size_t len, size_t *retlen, const u8 *buffer)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun 	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
895*4882a593Smuzhiyun 	struct ef4_nic *efx = mtd->priv;
896*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
897*4882a593Smuzhiyun 	int rc;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	rc = mutex_lock_interruptible(&nic_data->spi_lock);
900*4882a593Smuzhiyun 	if (rc)
901*4882a593Smuzhiyun 		return rc;
902*4882a593Smuzhiyun 	rc = falcon_spi_write(efx, part->spi, part->offset + start,
903*4882a593Smuzhiyun 			      len, retlen, buffer);
904*4882a593Smuzhiyun 	mutex_unlock(&nic_data->spi_lock);
905*4882a593Smuzhiyun 	return rc;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
falcon_mtd_sync(struct mtd_info * mtd)908*4882a593Smuzhiyun static int falcon_mtd_sync(struct mtd_info *mtd)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
911*4882a593Smuzhiyun 	struct ef4_nic *efx = mtd->priv;
912*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
913*4882a593Smuzhiyun 	int rc;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	mutex_lock(&nic_data->spi_lock);
916*4882a593Smuzhiyun 	rc = falcon_spi_slow_wait(part, true);
917*4882a593Smuzhiyun 	mutex_unlock(&nic_data->spi_lock);
918*4882a593Smuzhiyun 	return rc;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun 
falcon_mtd_probe(struct ef4_nic * efx)921*4882a593Smuzhiyun static int falcon_mtd_probe(struct ef4_nic *efx)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
924*4882a593Smuzhiyun 	struct falcon_mtd_partition *parts;
925*4882a593Smuzhiyun 	struct falcon_spi_device *spi;
926*4882a593Smuzhiyun 	size_t n_parts;
927*4882a593Smuzhiyun 	int rc = -ENODEV;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	ASSERT_RTNL();
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	/* Allocate space for maximum number of partitions */
932*4882a593Smuzhiyun 	parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
933*4882a593Smuzhiyun 	if (!parts)
934*4882a593Smuzhiyun 		return -ENOMEM;
935*4882a593Smuzhiyun 	n_parts = 0;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	spi = &nic_data->spi_flash;
938*4882a593Smuzhiyun 	if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
939*4882a593Smuzhiyun 		parts[n_parts].spi = spi;
940*4882a593Smuzhiyun 		parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
941*4882a593Smuzhiyun 		parts[n_parts].common.dev_type_name = "flash";
942*4882a593Smuzhiyun 		parts[n_parts].common.type_name = "sfc_flash_bootrom";
943*4882a593Smuzhiyun 		parts[n_parts].common.mtd.type = MTD_NORFLASH;
944*4882a593Smuzhiyun 		parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
945*4882a593Smuzhiyun 		parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
946*4882a593Smuzhiyun 		parts[n_parts].common.mtd.erasesize = spi->erase_size;
947*4882a593Smuzhiyun 		n_parts++;
948*4882a593Smuzhiyun 	}
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	spi = &nic_data->spi_eeprom;
951*4882a593Smuzhiyun 	if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
952*4882a593Smuzhiyun 		parts[n_parts].spi = spi;
953*4882a593Smuzhiyun 		parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
954*4882a593Smuzhiyun 		parts[n_parts].common.dev_type_name = "EEPROM";
955*4882a593Smuzhiyun 		parts[n_parts].common.type_name = "sfc_bootconfig";
956*4882a593Smuzhiyun 		parts[n_parts].common.mtd.type = MTD_RAM;
957*4882a593Smuzhiyun 		parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
958*4882a593Smuzhiyun 		parts[n_parts].common.mtd.size =
959*4882a593Smuzhiyun 			min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
960*4882a593Smuzhiyun 			FALCON_EEPROM_BOOTCONFIG_START;
961*4882a593Smuzhiyun 		parts[n_parts].common.mtd.erasesize = spi->erase_size;
962*4882a593Smuzhiyun 		n_parts++;
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	rc = ef4_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
966*4882a593Smuzhiyun 	if (rc)
967*4882a593Smuzhiyun 		kfree(parts);
968*4882a593Smuzhiyun 	return rc;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun #endif /* CONFIG_SFC_FALCON_MTD */
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun /**************************************************************************
974*4882a593Smuzhiyun  *
975*4882a593Smuzhiyun  * XMAC operations
976*4882a593Smuzhiyun  *
977*4882a593Smuzhiyun  **************************************************************************
978*4882a593Smuzhiyun  */
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun /* Configure the XAUI driver that is an output from Falcon */
falcon_setup_xaui(struct ef4_nic * efx)981*4882a593Smuzhiyun static void falcon_setup_xaui(struct ef4_nic *efx)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun 	ef4_oword_t sdctl, txdrv;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	/* Move the XAUI into low power, unless there is no PHY, in
986*4882a593Smuzhiyun 	 * which case the XAUI will have to drive a cable. */
987*4882a593Smuzhiyun 	if (efx->phy_type == PHY_TYPE_NONE)
988*4882a593Smuzhiyun 		return;
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	ef4_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
991*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
992*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
993*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
994*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
995*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
996*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
997*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
998*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
999*4882a593Smuzhiyun 	ef4_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_8(txdrv,
1002*4882a593Smuzhiyun 			     FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
1003*4882a593Smuzhiyun 			     FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
1004*4882a593Smuzhiyun 			     FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
1005*4882a593Smuzhiyun 			     FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
1006*4882a593Smuzhiyun 			     FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
1007*4882a593Smuzhiyun 			     FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
1008*4882a593Smuzhiyun 			     FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
1009*4882a593Smuzhiyun 			     FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
1010*4882a593Smuzhiyun 	ef4_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun 
falcon_reset_xaui(struct ef4_nic * efx)1013*4882a593Smuzhiyun int falcon_reset_xaui(struct ef4_nic *efx)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1016*4882a593Smuzhiyun 	ef4_oword_t reg;
1017*4882a593Smuzhiyun 	int count;
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	/* Don't fetch MAC statistics over an XMAC reset */
1020*4882a593Smuzhiyun 	WARN_ON(nic_data->stats_disable_count == 0);
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	/* Start reset sequence */
1023*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
1024*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XX_PWR_RST);
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	/* Wait up to 10 ms for completion, then reinitialise */
1027*4882a593Smuzhiyun 	for (count = 0; count < 1000; count++) {
1028*4882a593Smuzhiyun 		ef4_reado(efx, &reg, FR_AB_XX_PWR_RST);
1029*4882a593Smuzhiyun 		if (EF4_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
1030*4882a593Smuzhiyun 		    EF4_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
1031*4882a593Smuzhiyun 			falcon_setup_xaui(efx);
1032*4882a593Smuzhiyun 			return 0;
1033*4882a593Smuzhiyun 		}
1034*4882a593Smuzhiyun 		udelay(10);
1035*4882a593Smuzhiyun 	}
1036*4882a593Smuzhiyun 	netif_err(efx, hw, efx->net_dev,
1037*4882a593Smuzhiyun 		  "timed out waiting for XAUI/XGXS reset\n");
1038*4882a593Smuzhiyun 	return -ETIMEDOUT;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
falcon_ack_status_intr(struct ef4_nic * efx)1041*4882a593Smuzhiyun static void falcon_ack_status_intr(struct ef4_nic *efx)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1044*4882a593Smuzhiyun 	ef4_oword_t reg;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	if ((ef4_nic_rev(efx) != EF4_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
1047*4882a593Smuzhiyun 		return;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	/* We expect xgmii faults if the wireside link is down */
1050*4882a593Smuzhiyun 	if (!efx->link_state.up)
1051*4882a593Smuzhiyun 		return;
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	/* We can only use this interrupt to signal the negative edge of
1054*4882a593Smuzhiyun 	 * xaui_align [we have to poll the positive edge]. */
1055*4882a593Smuzhiyun 	if (nic_data->xmac_poll_required)
1056*4882a593Smuzhiyun 		return;
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun 
falcon_xgxs_link_ok(struct ef4_nic * efx)1061*4882a593Smuzhiyun static bool falcon_xgxs_link_ok(struct ef4_nic *efx)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun 	ef4_oword_t reg;
1064*4882a593Smuzhiyun 	bool align_done, link_ok = false;
1065*4882a593Smuzhiyun 	int sync_status;
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	/* Read link status */
1068*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	align_done = EF4_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
1071*4882a593Smuzhiyun 	sync_status = EF4_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
1072*4882a593Smuzhiyun 	if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
1073*4882a593Smuzhiyun 		link_ok = true;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	/* Clear link status ready for next read */
1076*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
1077*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
1078*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
1079*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	return link_ok;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun 
falcon_xmac_link_ok(struct ef4_nic * efx)1084*4882a593Smuzhiyun static bool falcon_xmac_link_ok(struct ef4_nic *efx)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun 	/*
1087*4882a593Smuzhiyun 	 * Check MAC's XGXS link status except when using XGMII loopback
1088*4882a593Smuzhiyun 	 * which bypasses the XGXS block.
1089*4882a593Smuzhiyun 	 * If possible, check PHY's XGXS link status except when using
1090*4882a593Smuzhiyun 	 * MAC loopback.
1091*4882a593Smuzhiyun 	 */
1092*4882a593Smuzhiyun 	return (efx->loopback_mode == LOOPBACK_XGMII ||
1093*4882a593Smuzhiyun 		falcon_xgxs_link_ok(efx)) &&
1094*4882a593Smuzhiyun 		(!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
1095*4882a593Smuzhiyun 		 LOOPBACK_INTERNAL(efx) ||
1096*4882a593Smuzhiyun 		 ef4_mdio_phyxgxs_lane_sync(efx));
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun 
falcon_reconfigure_xmac_core(struct ef4_nic * efx)1099*4882a593Smuzhiyun static void falcon_reconfigure_xmac_core(struct ef4_nic *efx)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun 	unsigned int max_frame_len;
1102*4882a593Smuzhiyun 	ef4_oword_t reg;
1103*4882a593Smuzhiyun 	bool rx_fc = !!(efx->link_state.fc & EF4_FC_RX);
1104*4882a593Smuzhiyun 	bool tx_fc = !!(efx->link_state.fc & EF4_FC_TX);
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	/* Configure MAC  - cut-thru mode is hard wired on */
1107*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_3(reg,
1108*4882a593Smuzhiyun 			     FRF_AB_XM_RX_JUMBO_MODE, 1,
1109*4882a593Smuzhiyun 			     FRF_AB_XM_TX_STAT_EN, 1,
1110*4882a593Smuzhiyun 			     FRF_AB_XM_RX_STAT_EN, 1);
1111*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	/* Configure TX */
1114*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_6(reg,
1115*4882a593Smuzhiyun 			     FRF_AB_XM_TXEN, 1,
1116*4882a593Smuzhiyun 			     FRF_AB_XM_TX_PRMBL, 1,
1117*4882a593Smuzhiyun 			     FRF_AB_XM_AUTO_PAD, 1,
1118*4882a593Smuzhiyun 			     FRF_AB_XM_TXCRC, 1,
1119*4882a593Smuzhiyun 			     FRF_AB_XM_FCNTL, tx_fc,
1120*4882a593Smuzhiyun 			     FRF_AB_XM_IPG, 0x3);
1121*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XM_TX_CFG);
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	/* Configure RX */
1124*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_5(reg,
1125*4882a593Smuzhiyun 			     FRF_AB_XM_RXEN, 1,
1126*4882a593Smuzhiyun 			     FRF_AB_XM_AUTO_DEPAD, 0,
1127*4882a593Smuzhiyun 			     FRF_AB_XM_ACPT_ALL_MCAST, 1,
1128*4882a593Smuzhiyun 			     FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
1129*4882a593Smuzhiyun 			     FRF_AB_XM_PASS_CRC_ERR, 1);
1130*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XM_RX_CFG);
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	/* Set frame length */
1133*4882a593Smuzhiyun 	max_frame_len = EF4_MAX_FRAME_LEN(efx->net_dev->mtu);
1134*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
1135*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
1136*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(reg,
1137*4882a593Smuzhiyun 			     FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
1138*4882a593Smuzhiyun 			     FRF_AB_XM_TX_JUMBO_MODE, 1);
1139*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(reg,
1142*4882a593Smuzhiyun 			     FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
1143*4882a593Smuzhiyun 			     FRF_AB_XM_DIS_FCNTL, !rx_fc);
1144*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XM_FC);
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	/* Set MAC address */
1147*4882a593Smuzhiyun 	memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
1148*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XM_ADR_LO);
1149*4882a593Smuzhiyun 	memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
1150*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XM_ADR_HI);
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun 
falcon_reconfigure_xgxs_core(struct ef4_nic * efx)1153*4882a593Smuzhiyun static void falcon_reconfigure_xgxs_core(struct ef4_nic *efx)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun 	ef4_oword_t reg;
1156*4882a593Smuzhiyun 	bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
1157*4882a593Smuzhiyun 	bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
1158*4882a593Smuzhiyun 	bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
1159*4882a593Smuzhiyun 	bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	/* XGXS block is flaky and will need to be reset if moving
1162*4882a593Smuzhiyun 	 * into our out of XGMII, XGXS or XAUI loopbacks. */
1163*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1164*4882a593Smuzhiyun 	old_xgxs_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
1165*4882a593Smuzhiyun 	old_xgmii_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
1168*4882a593Smuzhiyun 	old_xaui_loopback = EF4_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	/* The PHY driver may have turned XAUI off */
1171*4882a593Smuzhiyun 	if ((xgxs_loopback != old_xgxs_loopback) ||
1172*4882a593Smuzhiyun 	    (xaui_loopback != old_xaui_loopback) ||
1173*4882a593Smuzhiyun 	    (xgmii_loopback != old_xgmii_loopback))
1174*4882a593Smuzhiyun 		falcon_reset_xaui(efx);
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_XX_CORE_STAT);
1177*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
1178*4882a593Smuzhiyun 			    (xgxs_loopback || xaui_loopback) ?
1179*4882a593Smuzhiyun 			    FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
1180*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
1181*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
1182*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_XX_SD_CTL);
1185*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
1186*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
1187*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
1188*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
1189*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_XX_SD_CTL);
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
falcon_xmac_link_ok_retry(struct ef4_nic * efx,int tries)1194*4882a593Smuzhiyun static bool falcon_xmac_link_ok_retry(struct ef4_nic *efx, int tries)
1195*4882a593Smuzhiyun {
1196*4882a593Smuzhiyun 	bool mac_up = falcon_xmac_link_ok(efx);
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
1199*4882a593Smuzhiyun 	    ef4_phy_mode_disabled(efx->phy_mode))
1200*4882a593Smuzhiyun 		/* XAUI link is expected to be down */
1201*4882a593Smuzhiyun 		return mac_up;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	falcon_stop_nic_stats(efx);
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	while (!mac_up && tries) {
1206*4882a593Smuzhiyun 		netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
1207*4882a593Smuzhiyun 		falcon_reset_xaui(efx);
1208*4882a593Smuzhiyun 		udelay(200);
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 		mac_up = falcon_xmac_link_ok(efx);
1211*4882a593Smuzhiyun 		--tries;
1212*4882a593Smuzhiyun 	}
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	falcon_start_nic_stats(efx);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	return mac_up;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun 
falcon_xmac_check_fault(struct ef4_nic * efx)1219*4882a593Smuzhiyun static bool falcon_xmac_check_fault(struct ef4_nic *efx)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun 	return !falcon_xmac_link_ok_retry(efx, 5);
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun 
falcon_reconfigure_xmac(struct ef4_nic * efx)1224*4882a593Smuzhiyun static int falcon_reconfigure_xmac(struct ef4_nic *efx)
1225*4882a593Smuzhiyun {
1226*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	ef4_farch_filter_sync_rx_mode(efx);
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	falcon_reconfigure_xgxs_core(efx);
1231*4882a593Smuzhiyun 	falcon_reconfigure_xmac_core(efx);
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 	falcon_reconfigure_mac_wrapper(efx);
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
1236*4882a593Smuzhiyun 	falcon_ack_status_intr(efx);
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 	return 0;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun 
falcon_poll_xmac(struct ef4_nic * efx)1241*4882a593Smuzhiyun static void falcon_poll_xmac(struct ef4_nic *efx)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 	/* We expect xgmii faults if the wireside link is down */
1246*4882a593Smuzhiyun 	if (!efx->link_state.up || !nic_data->xmac_poll_required)
1247*4882a593Smuzhiyun 		return;
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
1250*4882a593Smuzhiyun 	falcon_ack_status_intr(efx);
1251*4882a593Smuzhiyun }
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun /**************************************************************************
1254*4882a593Smuzhiyun  *
1255*4882a593Smuzhiyun  * MAC wrapper
1256*4882a593Smuzhiyun  *
1257*4882a593Smuzhiyun  **************************************************************************
1258*4882a593Smuzhiyun  */
1259*4882a593Smuzhiyun 
falcon_push_multicast_hash(struct ef4_nic * efx)1260*4882a593Smuzhiyun static void falcon_push_multicast_hash(struct ef4_nic *efx)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun 	union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	ef4_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
1267*4882a593Smuzhiyun 	ef4_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun 
falcon_reset_macs(struct ef4_nic * efx)1270*4882a593Smuzhiyun static void falcon_reset_macs(struct ef4_nic *efx)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1273*4882a593Smuzhiyun 	ef4_oword_t reg, mac_ctrl;
1274*4882a593Smuzhiyun 	int count;
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
1277*4882a593Smuzhiyun 		/* It's not safe to use GLB_CTL_REG to reset the
1278*4882a593Smuzhiyun 		 * macs, so instead use the internal MAC resets
1279*4882a593Smuzhiyun 		 */
1280*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1281*4882a593Smuzhiyun 		ef4_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 		for (count = 0; count < 10000; count++) {
1284*4882a593Smuzhiyun 			ef4_reado(efx, &reg, FR_AB_XM_GLB_CFG);
1285*4882a593Smuzhiyun 			if (EF4_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1286*4882a593Smuzhiyun 			    0)
1287*4882a593Smuzhiyun 				return;
1288*4882a593Smuzhiyun 			udelay(10);
1289*4882a593Smuzhiyun 		}
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev,
1292*4882a593Smuzhiyun 			  "timed out waiting for XMAC core reset\n");
1293*4882a593Smuzhiyun 	}
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	/* Mac stats will fail whist the TX fifo is draining */
1296*4882a593Smuzhiyun 	WARN_ON(nic_data->stats_disable_count == 0);
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	ef4_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1299*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
1300*4882a593Smuzhiyun 	ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_GLB_CTL);
1303*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1304*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1305*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1306*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_GLB_CTL);
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	count = 0;
1309*4882a593Smuzhiyun 	while (1) {
1310*4882a593Smuzhiyun 		ef4_reado(efx, &reg, FR_AB_GLB_CTL);
1311*4882a593Smuzhiyun 		if (!EF4_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1312*4882a593Smuzhiyun 		    !EF4_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1313*4882a593Smuzhiyun 		    !EF4_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1314*4882a593Smuzhiyun 			netif_dbg(efx, hw, efx->net_dev,
1315*4882a593Smuzhiyun 				  "Completed MAC reset after %d loops\n",
1316*4882a593Smuzhiyun 				  count);
1317*4882a593Smuzhiyun 			break;
1318*4882a593Smuzhiyun 		}
1319*4882a593Smuzhiyun 		if (count > 20) {
1320*4882a593Smuzhiyun 			netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
1321*4882a593Smuzhiyun 			break;
1322*4882a593Smuzhiyun 		}
1323*4882a593Smuzhiyun 		count++;
1324*4882a593Smuzhiyun 		udelay(10);
1325*4882a593Smuzhiyun 	}
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	/* Ensure the correct MAC is selected before statistics
1328*4882a593Smuzhiyun 	 * are re-enabled by the caller */
1329*4882a593Smuzhiyun 	ef4_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	falcon_setup_xaui(efx);
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun 
falcon_drain_tx_fifo(struct ef4_nic * efx)1334*4882a593Smuzhiyun static void falcon_drain_tx_fifo(struct ef4_nic *efx)
1335*4882a593Smuzhiyun {
1336*4882a593Smuzhiyun 	ef4_oword_t reg;
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	if ((ef4_nic_rev(efx) < EF4_REV_FALCON_B0) ||
1339*4882a593Smuzhiyun 	    (efx->loopback_mode != LOOPBACK_NONE))
1340*4882a593Smuzhiyun 		return;
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AB_MAC_CTRL);
1343*4882a593Smuzhiyun 	/* There is no point in draining more than once */
1344*4882a593Smuzhiyun 	if (EF4_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1345*4882a593Smuzhiyun 		return;
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	falcon_reset_macs(efx);
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun 
falcon_deconfigure_mac_wrapper(struct ef4_nic * efx)1350*4882a593Smuzhiyun static void falcon_deconfigure_mac_wrapper(struct ef4_nic *efx)
1351*4882a593Smuzhiyun {
1352*4882a593Smuzhiyun 	ef4_oword_t reg;
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0)
1355*4882a593Smuzhiyun 		return;
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	/* Isolate the MAC -> RX */
1358*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
1359*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1360*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	/* Isolate TX -> MAC */
1363*4882a593Smuzhiyun 	falcon_drain_tx_fifo(efx);
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun 
falcon_reconfigure_mac_wrapper(struct ef4_nic * efx)1366*4882a593Smuzhiyun static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx)
1367*4882a593Smuzhiyun {
1368*4882a593Smuzhiyun 	struct ef4_link_state *link_state = &efx->link_state;
1369*4882a593Smuzhiyun 	ef4_oword_t reg;
1370*4882a593Smuzhiyun 	int link_speed, isolate;
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	isolate = !!READ_ONCE(efx->reset_pending);
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	switch (link_state->speed) {
1375*4882a593Smuzhiyun 	case 10000: link_speed = 3; break;
1376*4882a593Smuzhiyun 	case 1000:  link_speed = 2; break;
1377*4882a593Smuzhiyun 	case 100:   link_speed = 1; break;
1378*4882a593Smuzhiyun 	default:    link_speed = 0; break;
1379*4882a593Smuzhiyun 	}
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1382*4882a593Smuzhiyun 	 * as advertised.  Disable to ensure packets are not
1383*4882a593Smuzhiyun 	 * indefinitely held and TX queue can be flushed at any point
1384*4882a593Smuzhiyun 	 * while the link is down. */
1385*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_5(reg,
1386*4882a593Smuzhiyun 			     FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1387*4882a593Smuzhiyun 			     FRF_AB_MAC_BCAD_ACPT, 1,
1388*4882a593Smuzhiyun 			     FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
1389*4882a593Smuzhiyun 			     FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1390*4882a593Smuzhiyun 			     FRF_AB_MAC_SPEED, link_speed);
1391*4882a593Smuzhiyun 	/* On B0, MAC backpressure can be disabled and packets get
1392*4882a593Smuzhiyun 	 * discarded. */
1393*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1394*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1395*4882a593Smuzhiyun 				    !link_state->up || isolate);
1396*4882a593Smuzhiyun 	}
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_MAC_CTRL);
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	/* Restore the multicast hash registers. */
1401*4882a593Smuzhiyun 	falcon_push_multicast_hash(efx);
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
1404*4882a593Smuzhiyun 	/* Enable XOFF signal from RX FIFO (we enabled it during NIC
1405*4882a593Smuzhiyun 	 * initialisation but it may read back as 0) */
1406*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1407*4882a593Smuzhiyun 	/* Unisolate the MAC -> RX */
1408*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1409*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
1410*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun 
falcon_stats_request(struct ef4_nic * efx)1413*4882a593Smuzhiyun static void falcon_stats_request(struct ef4_nic *efx)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1416*4882a593Smuzhiyun 	ef4_oword_t reg;
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	WARN_ON(nic_data->stats_pending);
1419*4882a593Smuzhiyun 	WARN_ON(nic_data->stats_disable_count);
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
1422*4882a593Smuzhiyun 	nic_data->stats_pending = true;
1423*4882a593Smuzhiyun 	wmb(); /* ensure done flag is clear */
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	/* Initiate DMA transfer of stats */
1426*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(reg,
1427*4882a593Smuzhiyun 			     FRF_AB_MAC_STAT_DMA_CMD, 1,
1428*4882a593Smuzhiyun 			     FRF_AB_MAC_STAT_DMA_ADR,
1429*4882a593Smuzhiyun 			     efx->stats_buffer.dma_addr);
1430*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun 
falcon_stats_complete(struct ef4_nic * efx)1435*4882a593Smuzhiyun static void falcon_stats_complete(struct ef4_nic *efx)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	if (!nic_data->stats_pending)
1440*4882a593Smuzhiyun 		return;
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	nic_data->stats_pending = false;
1443*4882a593Smuzhiyun 	if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
1444*4882a593Smuzhiyun 		rmb(); /* read the done flag before the stats */
1445*4882a593Smuzhiyun 		ef4_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
1446*4882a593Smuzhiyun 				     falcon_stat_mask, nic_data->stats,
1447*4882a593Smuzhiyun 				     efx->stats_buffer.addr, true);
1448*4882a593Smuzhiyun 	} else {
1449*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev,
1450*4882a593Smuzhiyun 			  "timed out waiting for statistics\n");
1451*4882a593Smuzhiyun 	}
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun 
falcon_stats_timer_func(struct timer_list * t)1454*4882a593Smuzhiyun static void falcon_stats_timer_func(struct timer_list *t)
1455*4882a593Smuzhiyun {
1456*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = from_timer(nic_data, t,
1457*4882a593Smuzhiyun 						      stats_timer);
1458*4882a593Smuzhiyun 	struct ef4_nic *efx = nic_data->efx;
1459*4882a593Smuzhiyun 
1460*4882a593Smuzhiyun 	spin_lock(&efx->stats_lock);
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun 	falcon_stats_complete(efx);
1463*4882a593Smuzhiyun 	if (nic_data->stats_disable_count == 0)
1464*4882a593Smuzhiyun 		falcon_stats_request(efx);
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	spin_unlock(&efx->stats_lock);
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun 
falcon_loopback_link_poll(struct ef4_nic * efx)1469*4882a593Smuzhiyun static bool falcon_loopback_link_poll(struct ef4_nic *efx)
1470*4882a593Smuzhiyun {
1471*4882a593Smuzhiyun 	struct ef4_link_state old_state = efx->link_state;
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun 	WARN_ON(!mutex_is_locked(&efx->mac_lock));
1474*4882a593Smuzhiyun 	WARN_ON(!LOOPBACK_INTERNAL(efx));
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	efx->link_state.fd = true;
1477*4882a593Smuzhiyun 	efx->link_state.fc = efx->wanted_fc;
1478*4882a593Smuzhiyun 	efx->link_state.up = true;
1479*4882a593Smuzhiyun 	efx->link_state.speed = 10000;
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	return !ef4_link_state_equal(&efx->link_state, &old_state);
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun 
falcon_reconfigure_port(struct ef4_nic * efx)1484*4882a593Smuzhiyun static int falcon_reconfigure_port(struct ef4_nic *efx)
1485*4882a593Smuzhiyun {
1486*4882a593Smuzhiyun 	int rc;
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	WARN_ON(ef4_nic_rev(efx) > EF4_REV_FALCON_B0);
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	/* Poll the PHY link state *before* reconfiguring it. This means we
1491*4882a593Smuzhiyun 	 * will pick up the correct speed (in loopback) to select the correct
1492*4882a593Smuzhiyun 	 * MAC.
1493*4882a593Smuzhiyun 	 */
1494*4882a593Smuzhiyun 	if (LOOPBACK_INTERNAL(efx))
1495*4882a593Smuzhiyun 		falcon_loopback_link_poll(efx);
1496*4882a593Smuzhiyun 	else
1497*4882a593Smuzhiyun 		efx->phy_op->poll(efx);
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	falcon_stop_nic_stats(efx);
1500*4882a593Smuzhiyun 	falcon_deconfigure_mac_wrapper(efx);
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	falcon_reset_macs(efx);
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 	efx->phy_op->reconfigure(efx);
1505*4882a593Smuzhiyun 	rc = falcon_reconfigure_xmac(efx);
1506*4882a593Smuzhiyun 	BUG_ON(rc);
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 	falcon_start_nic_stats(efx);
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	/* Synchronise efx->link_state with the kernel */
1511*4882a593Smuzhiyun 	ef4_link_status_changed(efx);
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	return 0;
1514*4882a593Smuzhiyun }
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun /* TX flow control may automatically turn itself off if the link
1517*4882a593Smuzhiyun  * partner (intermittently) stops responding to pause frames. There
1518*4882a593Smuzhiyun  * isn't any indication that this has happened, so the best we do is
1519*4882a593Smuzhiyun  * leave it up to the user to spot this and fix it by cycling transmit
1520*4882a593Smuzhiyun  * flow control on this end.
1521*4882a593Smuzhiyun  */
1522*4882a593Smuzhiyun 
falcon_a1_prepare_enable_fc_tx(struct ef4_nic * efx)1523*4882a593Smuzhiyun static void falcon_a1_prepare_enable_fc_tx(struct ef4_nic *efx)
1524*4882a593Smuzhiyun {
1525*4882a593Smuzhiyun 	/* Schedule a reset to recover */
1526*4882a593Smuzhiyun 	ef4_schedule_reset(efx, RESET_TYPE_INVISIBLE);
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun 
falcon_b0_prepare_enable_fc_tx(struct ef4_nic * efx)1529*4882a593Smuzhiyun static void falcon_b0_prepare_enable_fc_tx(struct ef4_nic *efx)
1530*4882a593Smuzhiyun {
1531*4882a593Smuzhiyun 	/* Recover by resetting the EM block */
1532*4882a593Smuzhiyun 	falcon_stop_nic_stats(efx);
1533*4882a593Smuzhiyun 	falcon_drain_tx_fifo(efx);
1534*4882a593Smuzhiyun 	falcon_reconfigure_xmac(efx);
1535*4882a593Smuzhiyun 	falcon_start_nic_stats(efx);
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun /**************************************************************************
1539*4882a593Smuzhiyun  *
1540*4882a593Smuzhiyun  * PHY access via GMII
1541*4882a593Smuzhiyun  *
1542*4882a593Smuzhiyun  **************************************************************************
1543*4882a593Smuzhiyun  */
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun /* Wait for GMII access to complete */
falcon_gmii_wait(struct ef4_nic * efx)1546*4882a593Smuzhiyun static int falcon_gmii_wait(struct ef4_nic *efx)
1547*4882a593Smuzhiyun {
1548*4882a593Smuzhiyun 	ef4_oword_t md_stat;
1549*4882a593Smuzhiyun 	int count;
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	/* wait up to 50ms - taken max from datasheet */
1552*4882a593Smuzhiyun 	for (count = 0; count < 5000; count++) {
1553*4882a593Smuzhiyun 		ef4_reado(efx, &md_stat, FR_AB_MD_STAT);
1554*4882a593Smuzhiyun 		if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
1555*4882a593Smuzhiyun 			if (EF4_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
1556*4882a593Smuzhiyun 			    EF4_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
1557*4882a593Smuzhiyun 				netif_err(efx, hw, efx->net_dev,
1558*4882a593Smuzhiyun 					  "error from GMII access "
1559*4882a593Smuzhiyun 					  EF4_OWORD_FMT"\n",
1560*4882a593Smuzhiyun 					  EF4_OWORD_VAL(md_stat));
1561*4882a593Smuzhiyun 				return -EIO;
1562*4882a593Smuzhiyun 			}
1563*4882a593Smuzhiyun 			return 0;
1564*4882a593Smuzhiyun 		}
1565*4882a593Smuzhiyun 		udelay(10);
1566*4882a593Smuzhiyun 	}
1567*4882a593Smuzhiyun 	netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
1568*4882a593Smuzhiyun 	return -ETIMEDOUT;
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun /* Write an MDIO register of a PHY connected to Falcon. */
falcon_mdio_write(struct net_device * net_dev,int prtad,int devad,u16 addr,u16 value)1572*4882a593Smuzhiyun static int falcon_mdio_write(struct net_device *net_dev,
1573*4882a593Smuzhiyun 			     int prtad, int devad, u16 addr, u16 value)
1574*4882a593Smuzhiyun {
1575*4882a593Smuzhiyun 	struct ef4_nic *efx = netdev_priv(net_dev);
1576*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1577*4882a593Smuzhiyun 	ef4_oword_t reg;
1578*4882a593Smuzhiyun 	int rc;
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	netif_vdbg(efx, hw, efx->net_dev,
1581*4882a593Smuzhiyun 		   "writing MDIO %d register %d.%d with 0x%04x\n",
1582*4882a593Smuzhiyun 		    prtad, devad, addr, value);
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	mutex_lock(&nic_data->mdio_lock);
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	/* Check MDIO not currently being accessed */
1587*4882a593Smuzhiyun 	rc = falcon_gmii_wait(efx);
1588*4882a593Smuzhiyun 	if (rc)
1589*4882a593Smuzhiyun 		goto out;
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	/* Write the address/ID register */
1592*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1593*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1596*4882a593Smuzhiyun 			     FRF_AB_MD_DEV_ADR, devad);
1597*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_MD_ID);
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 	/* Write data */
1600*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
1601*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_MD_TXD);
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(reg,
1604*4882a593Smuzhiyun 			     FRF_AB_MD_WRC, 1,
1605*4882a593Smuzhiyun 			     FRF_AB_MD_GC, 0);
1606*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_MD_CS);
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 	/* Wait for data to be written */
1609*4882a593Smuzhiyun 	rc = falcon_gmii_wait(efx);
1610*4882a593Smuzhiyun 	if (rc) {
1611*4882a593Smuzhiyun 		/* Abort the write operation */
1612*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_2(reg,
1613*4882a593Smuzhiyun 				     FRF_AB_MD_WRC, 0,
1614*4882a593Smuzhiyun 				     FRF_AB_MD_GC, 1);
1615*4882a593Smuzhiyun 		ef4_writeo(efx, &reg, FR_AB_MD_CS);
1616*4882a593Smuzhiyun 		udelay(10);
1617*4882a593Smuzhiyun 	}
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun out:
1620*4882a593Smuzhiyun 	mutex_unlock(&nic_data->mdio_lock);
1621*4882a593Smuzhiyun 	return rc;
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun /* Read an MDIO register of a PHY connected to Falcon. */
falcon_mdio_read(struct net_device * net_dev,int prtad,int devad,u16 addr)1625*4882a593Smuzhiyun static int falcon_mdio_read(struct net_device *net_dev,
1626*4882a593Smuzhiyun 			    int prtad, int devad, u16 addr)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun 	struct ef4_nic *efx = netdev_priv(net_dev);
1629*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1630*4882a593Smuzhiyun 	ef4_oword_t reg;
1631*4882a593Smuzhiyun 	int rc;
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	mutex_lock(&nic_data->mdio_lock);
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	/* Check MDIO not currently being accessed */
1636*4882a593Smuzhiyun 	rc = falcon_gmii_wait(efx);
1637*4882a593Smuzhiyun 	if (rc)
1638*4882a593Smuzhiyun 		goto out;
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1641*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1644*4882a593Smuzhiyun 			     FRF_AB_MD_DEV_ADR, devad);
1645*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_MD_ID);
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 	/* Request data to be read */
1648*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
1649*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AB_MD_CS);
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	/* Wait for data to become available */
1652*4882a593Smuzhiyun 	rc = falcon_gmii_wait(efx);
1653*4882a593Smuzhiyun 	if (rc == 0) {
1654*4882a593Smuzhiyun 		ef4_reado(efx, &reg, FR_AB_MD_RXD);
1655*4882a593Smuzhiyun 		rc = EF4_OWORD_FIELD(reg, FRF_AB_MD_RXD);
1656*4882a593Smuzhiyun 		netif_vdbg(efx, hw, efx->net_dev,
1657*4882a593Smuzhiyun 			   "read from MDIO %d register %d.%d, got %04x\n",
1658*4882a593Smuzhiyun 			   prtad, devad, addr, rc);
1659*4882a593Smuzhiyun 	} else {
1660*4882a593Smuzhiyun 		/* Abort the read operation */
1661*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_2(reg,
1662*4882a593Smuzhiyun 				     FRF_AB_MD_RIC, 0,
1663*4882a593Smuzhiyun 				     FRF_AB_MD_GC, 1);
1664*4882a593Smuzhiyun 		ef4_writeo(efx, &reg, FR_AB_MD_CS);
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 		netif_dbg(efx, hw, efx->net_dev,
1667*4882a593Smuzhiyun 			  "read from MDIO %d register %d.%d, got error %d\n",
1668*4882a593Smuzhiyun 			  prtad, devad, addr, rc);
1669*4882a593Smuzhiyun 	}
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun out:
1672*4882a593Smuzhiyun 	mutex_unlock(&nic_data->mdio_lock);
1673*4882a593Smuzhiyun 	return rc;
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun /* This call is responsible for hooking in the MAC and PHY operations */
falcon_probe_port(struct ef4_nic * efx)1677*4882a593Smuzhiyun static int falcon_probe_port(struct ef4_nic *efx)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1680*4882a593Smuzhiyun 	int rc;
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 	switch (efx->phy_type) {
1683*4882a593Smuzhiyun 	case PHY_TYPE_SFX7101:
1684*4882a593Smuzhiyun 		efx->phy_op = &falcon_sfx7101_phy_ops;
1685*4882a593Smuzhiyun 		break;
1686*4882a593Smuzhiyun 	case PHY_TYPE_QT2022C2:
1687*4882a593Smuzhiyun 	case PHY_TYPE_QT2025C:
1688*4882a593Smuzhiyun 		efx->phy_op = &falcon_qt202x_phy_ops;
1689*4882a593Smuzhiyun 		break;
1690*4882a593Smuzhiyun 	case PHY_TYPE_TXC43128:
1691*4882a593Smuzhiyun 		efx->phy_op = &falcon_txc_phy_ops;
1692*4882a593Smuzhiyun 		break;
1693*4882a593Smuzhiyun 	default:
1694*4882a593Smuzhiyun 		netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
1695*4882a593Smuzhiyun 			  efx->phy_type);
1696*4882a593Smuzhiyun 		return -ENODEV;
1697*4882a593Smuzhiyun 	}
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	/* Fill out MDIO structure and loopback modes */
1700*4882a593Smuzhiyun 	mutex_init(&nic_data->mdio_lock);
1701*4882a593Smuzhiyun 	efx->mdio.mdio_read = falcon_mdio_read;
1702*4882a593Smuzhiyun 	efx->mdio.mdio_write = falcon_mdio_write;
1703*4882a593Smuzhiyun 	rc = efx->phy_op->probe(efx);
1704*4882a593Smuzhiyun 	if (rc != 0)
1705*4882a593Smuzhiyun 		return rc;
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	/* Initial assumption */
1708*4882a593Smuzhiyun 	efx->link_state.speed = 10000;
1709*4882a593Smuzhiyun 	efx->link_state.fd = true;
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun 	/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
1712*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1713*4882a593Smuzhiyun 		efx->wanted_fc = EF4_FC_RX | EF4_FC_TX;
1714*4882a593Smuzhiyun 	else
1715*4882a593Smuzhiyun 		efx->wanted_fc = EF4_FC_RX;
1716*4882a593Smuzhiyun 	if (efx->mdio.mmds & MDIO_DEVS_AN)
1717*4882a593Smuzhiyun 		efx->wanted_fc |= EF4_FC_AUTO;
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 	/* Allocate buffer for stats */
1720*4882a593Smuzhiyun 	rc = ef4_nic_alloc_buffer(efx, &efx->stats_buffer,
1721*4882a593Smuzhiyun 				  FALCON_MAC_STATS_SIZE, GFP_KERNEL);
1722*4882a593Smuzhiyun 	if (rc)
1723*4882a593Smuzhiyun 		return rc;
1724*4882a593Smuzhiyun 	netif_dbg(efx, probe, efx->net_dev,
1725*4882a593Smuzhiyun 		  "stats buffer at %llx (virt %p phys %llx)\n",
1726*4882a593Smuzhiyun 		  (u64)efx->stats_buffer.dma_addr,
1727*4882a593Smuzhiyun 		  efx->stats_buffer.addr,
1728*4882a593Smuzhiyun 		  (u64)virt_to_phys(efx->stats_buffer.addr));
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 	return 0;
1731*4882a593Smuzhiyun }
1732*4882a593Smuzhiyun 
falcon_remove_port(struct ef4_nic * efx)1733*4882a593Smuzhiyun static void falcon_remove_port(struct ef4_nic *efx)
1734*4882a593Smuzhiyun {
1735*4882a593Smuzhiyun 	efx->phy_op->remove(efx);
1736*4882a593Smuzhiyun 	ef4_nic_free_buffer(efx, &efx->stats_buffer);
1737*4882a593Smuzhiyun }
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun /* Global events are basically PHY events */
1740*4882a593Smuzhiyun static bool
falcon_handle_global_event(struct ef4_channel * channel,ef4_qword_t * event)1741*4882a593Smuzhiyun falcon_handle_global_event(struct ef4_channel *channel, ef4_qword_t *event)
1742*4882a593Smuzhiyun {
1743*4882a593Smuzhiyun 	struct ef4_nic *efx = channel->efx;
1744*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun 	if (EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
1747*4882a593Smuzhiyun 	    EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
1748*4882a593Smuzhiyun 	    EF4_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
1749*4882a593Smuzhiyun 		/* Ignored */
1750*4882a593Smuzhiyun 		return true;
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 	if ((ef4_nic_rev(efx) == EF4_REV_FALCON_B0) &&
1753*4882a593Smuzhiyun 	    EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
1754*4882a593Smuzhiyun 		nic_data->xmac_poll_required = true;
1755*4882a593Smuzhiyun 		return true;
1756*4882a593Smuzhiyun 	}
1757*4882a593Smuzhiyun 
1758*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ?
1759*4882a593Smuzhiyun 	    EF4_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
1760*4882a593Smuzhiyun 	    EF4_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
1761*4882a593Smuzhiyun 		netif_err(efx, rx_err, efx->net_dev,
1762*4882a593Smuzhiyun 			  "channel %d seen global RX_RESET event. Resetting.\n",
1763*4882a593Smuzhiyun 			  channel->channel);
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 		atomic_inc(&efx->rx_reset);
1766*4882a593Smuzhiyun 		ef4_schedule_reset(efx, EF4_WORKAROUND_6555(efx) ?
1767*4882a593Smuzhiyun 				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1768*4882a593Smuzhiyun 		return true;
1769*4882a593Smuzhiyun 	}
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	return false;
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun /**************************************************************************
1775*4882a593Smuzhiyun  *
1776*4882a593Smuzhiyun  * Falcon test code
1777*4882a593Smuzhiyun  *
1778*4882a593Smuzhiyun  **************************************************************************/
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun static int
falcon_read_nvram(struct ef4_nic * efx,struct falcon_nvconfig * nvconfig_out)1781*4882a593Smuzhiyun falcon_read_nvram(struct ef4_nic *efx, struct falcon_nvconfig *nvconfig_out)
1782*4882a593Smuzhiyun {
1783*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1784*4882a593Smuzhiyun 	struct falcon_nvconfig *nvconfig;
1785*4882a593Smuzhiyun 	struct falcon_spi_device *spi;
1786*4882a593Smuzhiyun 	void *region;
1787*4882a593Smuzhiyun 	int rc, magic_num, struct_ver;
1788*4882a593Smuzhiyun 	__le16 *word, *limit;
1789*4882a593Smuzhiyun 	u32 csum;
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	if (falcon_spi_present(&nic_data->spi_flash))
1792*4882a593Smuzhiyun 		spi = &nic_data->spi_flash;
1793*4882a593Smuzhiyun 	else if (falcon_spi_present(&nic_data->spi_eeprom))
1794*4882a593Smuzhiyun 		spi = &nic_data->spi_eeprom;
1795*4882a593Smuzhiyun 	else
1796*4882a593Smuzhiyun 		return -EINVAL;
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
1799*4882a593Smuzhiyun 	if (!region)
1800*4882a593Smuzhiyun 		return -ENOMEM;
1801*4882a593Smuzhiyun 	nvconfig = region + FALCON_NVCONFIG_OFFSET;
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	mutex_lock(&nic_data->spi_lock);
1804*4882a593Smuzhiyun 	rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
1805*4882a593Smuzhiyun 	mutex_unlock(&nic_data->spi_lock);
1806*4882a593Smuzhiyun 	if (rc) {
1807*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
1808*4882a593Smuzhiyun 			  falcon_spi_present(&nic_data->spi_flash) ?
1809*4882a593Smuzhiyun 			  "flash" : "EEPROM");
1810*4882a593Smuzhiyun 		rc = -EIO;
1811*4882a593Smuzhiyun 		goto out;
1812*4882a593Smuzhiyun 	}
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	magic_num = le16_to_cpu(nvconfig->board_magic_num);
1815*4882a593Smuzhiyun 	struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun 	rc = -EINVAL;
1818*4882a593Smuzhiyun 	if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
1819*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev,
1820*4882a593Smuzhiyun 			  "NVRAM bad magic 0x%x\n", magic_num);
1821*4882a593Smuzhiyun 		goto out;
1822*4882a593Smuzhiyun 	}
1823*4882a593Smuzhiyun 	if (struct_ver < 2) {
1824*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev,
1825*4882a593Smuzhiyun 			  "NVRAM has ancient version 0x%x\n", struct_ver);
1826*4882a593Smuzhiyun 		goto out;
1827*4882a593Smuzhiyun 	} else if (struct_ver < 4) {
1828*4882a593Smuzhiyun 		word = &nvconfig->board_magic_num;
1829*4882a593Smuzhiyun 		limit = (__le16 *) (nvconfig + 1);
1830*4882a593Smuzhiyun 	} else {
1831*4882a593Smuzhiyun 		word = region;
1832*4882a593Smuzhiyun 		limit = region + FALCON_NVCONFIG_END;
1833*4882a593Smuzhiyun 	}
1834*4882a593Smuzhiyun 	for (csum = 0; word < limit; ++word)
1835*4882a593Smuzhiyun 		csum += le16_to_cpu(*word);
1836*4882a593Smuzhiyun 
1837*4882a593Smuzhiyun 	if (~csum & 0xffff) {
1838*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev,
1839*4882a593Smuzhiyun 			  "NVRAM has incorrect checksum\n");
1840*4882a593Smuzhiyun 		goto out;
1841*4882a593Smuzhiyun 	}
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 	rc = 0;
1844*4882a593Smuzhiyun 	if (nvconfig_out)
1845*4882a593Smuzhiyun 		memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun  out:
1848*4882a593Smuzhiyun 	kfree(region);
1849*4882a593Smuzhiyun 	return rc;
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun 
falcon_test_nvram(struct ef4_nic * efx)1852*4882a593Smuzhiyun static int falcon_test_nvram(struct ef4_nic *efx)
1853*4882a593Smuzhiyun {
1854*4882a593Smuzhiyun 	return falcon_read_nvram(efx, NULL);
1855*4882a593Smuzhiyun }
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun static const struct ef4_farch_register_test falcon_b0_register_tests[] = {
1858*4882a593Smuzhiyun 	{ FR_AZ_ADR_REGION,
1859*4882a593Smuzhiyun 	  EF4_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1860*4882a593Smuzhiyun 	{ FR_AZ_RX_CFG,
1861*4882a593Smuzhiyun 	  EF4_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1862*4882a593Smuzhiyun 	{ FR_AZ_TX_CFG,
1863*4882a593Smuzhiyun 	  EF4_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1864*4882a593Smuzhiyun 	{ FR_AZ_TX_RESERVED,
1865*4882a593Smuzhiyun 	  EF4_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1866*4882a593Smuzhiyun 	{ FR_AB_MAC_CTRL,
1867*4882a593Smuzhiyun 	  EF4_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1868*4882a593Smuzhiyun 	{ FR_AZ_SRM_TX_DC_CFG,
1869*4882a593Smuzhiyun 	  EF4_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1870*4882a593Smuzhiyun 	{ FR_AZ_RX_DC_CFG,
1871*4882a593Smuzhiyun 	  EF4_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1872*4882a593Smuzhiyun 	{ FR_AZ_RX_DC_PF_WM,
1873*4882a593Smuzhiyun 	  EF4_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1874*4882a593Smuzhiyun 	{ FR_BZ_DP_CTRL,
1875*4882a593Smuzhiyun 	  EF4_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1876*4882a593Smuzhiyun 	{ FR_AB_GM_CFG2,
1877*4882a593Smuzhiyun 	  EF4_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1878*4882a593Smuzhiyun 	{ FR_AB_GMF_CFG0,
1879*4882a593Smuzhiyun 	  EF4_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1880*4882a593Smuzhiyun 	{ FR_AB_XM_GLB_CFG,
1881*4882a593Smuzhiyun 	  EF4_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1882*4882a593Smuzhiyun 	{ FR_AB_XM_TX_CFG,
1883*4882a593Smuzhiyun 	  EF4_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1884*4882a593Smuzhiyun 	{ FR_AB_XM_RX_CFG,
1885*4882a593Smuzhiyun 	  EF4_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1886*4882a593Smuzhiyun 	{ FR_AB_XM_RX_PARAM,
1887*4882a593Smuzhiyun 	  EF4_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1888*4882a593Smuzhiyun 	{ FR_AB_XM_FC,
1889*4882a593Smuzhiyun 	  EF4_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1890*4882a593Smuzhiyun 	{ FR_AB_XM_ADR_LO,
1891*4882a593Smuzhiyun 	  EF4_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1892*4882a593Smuzhiyun 	{ FR_AB_XX_SD_CTL,
1893*4882a593Smuzhiyun 	  EF4_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1894*4882a593Smuzhiyun };
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun static int
falcon_b0_test_chip(struct ef4_nic * efx,struct ef4_self_tests * tests)1897*4882a593Smuzhiyun falcon_b0_test_chip(struct ef4_nic *efx, struct ef4_self_tests *tests)
1898*4882a593Smuzhiyun {
1899*4882a593Smuzhiyun 	enum reset_type reset_method = RESET_TYPE_INVISIBLE;
1900*4882a593Smuzhiyun 	int rc, rc2;
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 	mutex_lock(&efx->mac_lock);
1903*4882a593Smuzhiyun 	if (efx->loopback_modes) {
1904*4882a593Smuzhiyun 		/* We need the 312 clock from the PHY to test the XMAC
1905*4882a593Smuzhiyun 		 * registers, so move into XGMII loopback if available */
1906*4882a593Smuzhiyun 		if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
1907*4882a593Smuzhiyun 			efx->loopback_mode = LOOPBACK_XGMII;
1908*4882a593Smuzhiyun 		else
1909*4882a593Smuzhiyun 			efx->loopback_mode = __ffs(efx->loopback_modes);
1910*4882a593Smuzhiyun 	}
1911*4882a593Smuzhiyun 	__ef4_reconfigure_port(efx);
1912*4882a593Smuzhiyun 	mutex_unlock(&efx->mac_lock);
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	ef4_reset_down(efx, reset_method);
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	tests->registers =
1917*4882a593Smuzhiyun 		ef4_farch_test_registers(efx, falcon_b0_register_tests,
1918*4882a593Smuzhiyun 					 ARRAY_SIZE(falcon_b0_register_tests))
1919*4882a593Smuzhiyun 		? -1 : 1;
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	rc = falcon_reset_hw(efx, reset_method);
1922*4882a593Smuzhiyun 	rc2 = ef4_reset_up(efx, reset_method, rc == 0);
1923*4882a593Smuzhiyun 	return rc ? rc : rc2;
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun /**************************************************************************
1927*4882a593Smuzhiyun  *
1928*4882a593Smuzhiyun  * Device reset
1929*4882a593Smuzhiyun  *
1930*4882a593Smuzhiyun  **************************************************************************
1931*4882a593Smuzhiyun  */
1932*4882a593Smuzhiyun 
falcon_map_reset_reason(enum reset_type reason)1933*4882a593Smuzhiyun static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1934*4882a593Smuzhiyun {
1935*4882a593Smuzhiyun 	switch (reason) {
1936*4882a593Smuzhiyun 	case RESET_TYPE_RX_RECOVERY:
1937*4882a593Smuzhiyun 	case RESET_TYPE_DMA_ERROR:
1938*4882a593Smuzhiyun 	case RESET_TYPE_TX_SKIP:
1939*4882a593Smuzhiyun 		/* These can occasionally occur due to hardware bugs.
1940*4882a593Smuzhiyun 		 * We try to reset without disrupting the link.
1941*4882a593Smuzhiyun 		 */
1942*4882a593Smuzhiyun 		return RESET_TYPE_INVISIBLE;
1943*4882a593Smuzhiyun 	default:
1944*4882a593Smuzhiyun 		return RESET_TYPE_ALL;
1945*4882a593Smuzhiyun 	}
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun 
falcon_map_reset_flags(u32 * flags)1948*4882a593Smuzhiyun static int falcon_map_reset_flags(u32 *flags)
1949*4882a593Smuzhiyun {
1950*4882a593Smuzhiyun 	enum {
1951*4882a593Smuzhiyun 		FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
1952*4882a593Smuzhiyun 					  ETH_RESET_OFFLOAD | ETH_RESET_MAC),
1953*4882a593Smuzhiyun 		FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
1954*4882a593Smuzhiyun 		FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
1955*4882a593Smuzhiyun 	};
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 	if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
1958*4882a593Smuzhiyun 		*flags &= ~FALCON_RESET_WORLD;
1959*4882a593Smuzhiyun 		return RESET_TYPE_WORLD;
1960*4882a593Smuzhiyun 	}
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 	if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
1963*4882a593Smuzhiyun 		*flags &= ~FALCON_RESET_ALL;
1964*4882a593Smuzhiyun 		return RESET_TYPE_ALL;
1965*4882a593Smuzhiyun 	}
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 	if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
1968*4882a593Smuzhiyun 		*flags &= ~FALCON_RESET_INVISIBLE;
1969*4882a593Smuzhiyun 		return RESET_TYPE_INVISIBLE;
1970*4882a593Smuzhiyun 	}
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun 	return -EINVAL;
1973*4882a593Smuzhiyun }
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun /* Resets NIC to known state.  This routine must be called in process
1976*4882a593Smuzhiyun  * context and is allowed to sleep. */
__falcon_reset_hw(struct ef4_nic * efx,enum reset_type method)1977*4882a593Smuzhiyun static int __falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
1978*4882a593Smuzhiyun {
1979*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
1980*4882a593Smuzhiyun 	ef4_oword_t glb_ctl_reg_ker;
1981*4882a593Smuzhiyun 	int rc;
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1984*4882a593Smuzhiyun 		  RESET_TYPE(method));
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	/* Initiate device reset */
1987*4882a593Smuzhiyun 	if (method == RESET_TYPE_WORLD) {
1988*4882a593Smuzhiyun 		rc = pci_save_state(efx->pci_dev);
1989*4882a593Smuzhiyun 		if (rc) {
1990*4882a593Smuzhiyun 			netif_err(efx, drv, efx->net_dev,
1991*4882a593Smuzhiyun 				  "failed to backup PCI state of primary "
1992*4882a593Smuzhiyun 				  "function prior to hardware reset\n");
1993*4882a593Smuzhiyun 			goto fail1;
1994*4882a593Smuzhiyun 		}
1995*4882a593Smuzhiyun 		if (ef4_nic_is_dual_func(efx)) {
1996*4882a593Smuzhiyun 			rc = pci_save_state(nic_data->pci_dev2);
1997*4882a593Smuzhiyun 			if (rc) {
1998*4882a593Smuzhiyun 				netif_err(efx, drv, efx->net_dev,
1999*4882a593Smuzhiyun 					  "failed to backup PCI state of "
2000*4882a593Smuzhiyun 					  "secondary function prior to "
2001*4882a593Smuzhiyun 					  "hardware reset\n");
2002*4882a593Smuzhiyun 				goto fail2;
2003*4882a593Smuzhiyun 			}
2004*4882a593Smuzhiyun 		}
2005*4882a593Smuzhiyun 
2006*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_2(glb_ctl_reg_ker,
2007*4882a593Smuzhiyun 				     FRF_AB_EXT_PHY_RST_DUR,
2008*4882a593Smuzhiyun 				     FFE_AB_EXT_PHY_RST_DUR_10240US,
2009*4882a593Smuzhiyun 				     FRF_AB_SWRST, 1);
2010*4882a593Smuzhiyun 	} else {
2011*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_7(glb_ctl_reg_ker,
2012*4882a593Smuzhiyun 				     /* exclude PHY from "invisible" reset */
2013*4882a593Smuzhiyun 				     FRF_AB_EXT_PHY_RST_CTL,
2014*4882a593Smuzhiyun 				     method == RESET_TYPE_INVISIBLE,
2015*4882a593Smuzhiyun 				     /* exclude EEPROM/flash and PCIe */
2016*4882a593Smuzhiyun 				     FRF_AB_PCIE_CORE_RST_CTL, 1,
2017*4882a593Smuzhiyun 				     FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2018*4882a593Smuzhiyun 				     FRF_AB_PCIE_SD_RST_CTL, 1,
2019*4882a593Smuzhiyun 				     FRF_AB_EE_RST_CTL, 1,
2020*4882a593Smuzhiyun 				     FRF_AB_EXT_PHY_RST_DUR,
2021*4882a593Smuzhiyun 				     FFE_AB_EXT_PHY_RST_DUR_10240US,
2022*4882a593Smuzhiyun 				     FRF_AB_SWRST, 1);
2023*4882a593Smuzhiyun 	}
2024*4882a593Smuzhiyun 	ef4_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 	netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
2027*4882a593Smuzhiyun 	schedule_timeout_uninterruptible(HZ / 20);
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 	/* Restore PCI configuration if needed */
2030*4882a593Smuzhiyun 	if (method == RESET_TYPE_WORLD) {
2031*4882a593Smuzhiyun 		if (ef4_nic_is_dual_func(efx))
2032*4882a593Smuzhiyun 			pci_restore_state(nic_data->pci_dev2);
2033*4882a593Smuzhiyun 		pci_restore_state(efx->pci_dev);
2034*4882a593Smuzhiyun 		netif_dbg(efx, drv, efx->net_dev,
2035*4882a593Smuzhiyun 			  "successfully restored PCI config\n");
2036*4882a593Smuzhiyun 	}
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	/* Assert that reset complete */
2039*4882a593Smuzhiyun 	ef4_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2040*4882a593Smuzhiyun 	if (EF4_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2041*4882a593Smuzhiyun 		rc = -ETIMEDOUT;
2042*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev,
2043*4882a593Smuzhiyun 			  "timed out waiting for hardware reset\n");
2044*4882a593Smuzhiyun 		goto fail3;
2045*4882a593Smuzhiyun 	}
2046*4882a593Smuzhiyun 	netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 	return 0;
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 	/* pci_save_state() and pci_restore_state() MUST be called in pairs */
2051*4882a593Smuzhiyun fail2:
2052*4882a593Smuzhiyun 	pci_restore_state(efx->pci_dev);
2053*4882a593Smuzhiyun fail1:
2054*4882a593Smuzhiyun fail3:
2055*4882a593Smuzhiyun 	return rc;
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun 
falcon_reset_hw(struct ef4_nic * efx,enum reset_type method)2058*4882a593Smuzhiyun static int falcon_reset_hw(struct ef4_nic *efx, enum reset_type method)
2059*4882a593Smuzhiyun {
2060*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
2061*4882a593Smuzhiyun 	int rc;
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun 	mutex_lock(&nic_data->spi_lock);
2064*4882a593Smuzhiyun 	rc = __falcon_reset_hw(efx, method);
2065*4882a593Smuzhiyun 	mutex_unlock(&nic_data->spi_lock);
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun 	return rc;
2068*4882a593Smuzhiyun }
2069*4882a593Smuzhiyun 
falcon_monitor(struct ef4_nic * efx)2070*4882a593Smuzhiyun static void falcon_monitor(struct ef4_nic *efx)
2071*4882a593Smuzhiyun {
2072*4882a593Smuzhiyun 	bool link_changed;
2073*4882a593Smuzhiyun 	int rc;
2074*4882a593Smuzhiyun 
2075*4882a593Smuzhiyun 	BUG_ON(!mutex_is_locked(&efx->mac_lock));
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun 	rc = falcon_board(efx)->type->monitor(efx);
2078*4882a593Smuzhiyun 	if (rc) {
2079*4882a593Smuzhiyun 		netif_err(efx, hw, efx->net_dev,
2080*4882a593Smuzhiyun 			  "Board sensor %s; shutting down PHY\n",
2081*4882a593Smuzhiyun 			  (rc == -ERANGE) ? "reported fault" : "failed");
2082*4882a593Smuzhiyun 		efx->phy_mode |= PHY_MODE_LOW_POWER;
2083*4882a593Smuzhiyun 		rc = __ef4_reconfigure_port(efx);
2084*4882a593Smuzhiyun 		WARN_ON(rc);
2085*4882a593Smuzhiyun 	}
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 	if (LOOPBACK_INTERNAL(efx))
2088*4882a593Smuzhiyun 		link_changed = falcon_loopback_link_poll(efx);
2089*4882a593Smuzhiyun 	else
2090*4882a593Smuzhiyun 		link_changed = efx->phy_op->poll(efx);
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	if (link_changed) {
2093*4882a593Smuzhiyun 		falcon_stop_nic_stats(efx);
2094*4882a593Smuzhiyun 		falcon_deconfigure_mac_wrapper(efx);
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 		falcon_reset_macs(efx);
2097*4882a593Smuzhiyun 		rc = falcon_reconfigure_xmac(efx);
2098*4882a593Smuzhiyun 		BUG_ON(rc);
2099*4882a593Smuzhiyun 
2100*4882a593Smuzhiyun 		falcon_start_nic_stats(efx);
2101*4882a593Smuzhiyun 
2102*4882a593Smuzhiyun 		ef4_link_status_changed(efx);
2103*4882a593Smuzhiyun 	}
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	falcon_poll_xmac(efx);
2106*4882a593Smuzhiyun }
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun /* Zeroes out the SRAM contents.  This routine must be called in
2109*4882a593Smuzhiyun  * process context and is allowed to sleep.
2110*4882a593Smuzhiyun  */
falcon_reset_sram(struct ef4_nic * efx)2111*4882a593Smuzhiyun static int falcon_reset_sram(struct ef4_nic *efx)
2112*4882a593Smuzhiyun {
2113*4882a593Smuzhiyun 	ef4_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
2114*4882a593Smuzhiyun 	int count;
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 	/* Set the SRAM wake/sleep GPIO appropriately. */
2117*4882a593Smuzhiyun 	ef4_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2118*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2119*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2120*4882a593Smuzhiyun 	ef4_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun 	/* Initiate SRAM reset */
2123*4882a593Smuzhiyun 	EF4_POPULATE_OWORD_2(srm_cfg_reg_ker,
2124*4882a593Smuzhiyun 			     FRF_AZ_SRM_INIT_EN, 1,
2125*4882a593Smuzhiyun 			     FRF_AZ_SRM_NB_SZ, 0);
2126*4882a593Smuzhiyun 	ef4_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2127*4882a593Smuzhiyun 
2128*4882a593Smuzhiyun 	/* Wait for SRAM reset to complete */
2129*4882a593Smuzhiyun 	count = 0;
2130*4882a593Smuzhiyun 	do {
2131*4882a593Smuzhiyun 		netif_dbg(efx, hw, efx->net_dev,
2132*4882a593Smuzhiyun 			  "waiting for SRAM reset (attempt %d)...\n", count);
2133*4882a593Smuzhiyun 
2134*4882a593Smuzhiyun 		/* SRAM reset is slow; expect around 16ms */
2135*4882a593Smuzhiyun 		schedule_timeout_uninterruptible(HZ / 50);
2136*4882a593Smuzhiyun 
2137*4882a593Smuzhiyun 		/* Check for reset complete */
2138*4882a593Smuzhiyun 		ef4_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2139*4882a593Smuzhiyun 		if (!EF4_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2140*4882a593Smuzhiyun 			netif_dbg(efx, hw, efx->net_dev,
2141*4882a593Smuzhiyun 				  "SRAM reset complete\n");
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun 			return 0;
2144*4882a593Smuzhiyun 		}
2145*4882a593Smuzhiyun 	} while (++count < 20);	/* wait up to 0.4 sec */
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun 	netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
2148*4882a593Smuzhiyun 	return -ETIMEDOUT;
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun 
falcon_spi_device_init(struct ef4_nic * efx,struct falcon_spi_device * spi_device,unsigned int device_id,u32 device_type)2151*4882a593Smuzhiyun static void falcon_spi_device_init(struct ef4_nic *efx,
2152*4882a593Smuzhiyun 				  struct falcon_spi_device *spi_device,
2153*4882a593Smuzhiyun 				  unsigned int device_id, u32 device_type)
2154*4882a593Smuzhiyun {
2155*4882a593Smuzhiyun 	if (device_type != 0) {
2156*4882a593Smuzhiyun 		spi_device->device_id = device_id;
2157*4882a593Smuzhiyun 		spi_device->size =
2158*4882a593Smuzhiyun 			1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
2159*4882a593Smuzhiyun 		spi_device->addr_len =
2160*4882a593Smuzhiyun 			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2161*4882a593Smuzhiyun 		spi_device->munge_address = (spi_device->size == 1 << 9 &&
2162*4882a593Smuzhiyun 					     spi_device->addr_len == 1);
2163*4882a593Smuzhiyun 		spi_device->erase_command =
2164*4882a593Smuzhiyun 			SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2165*4882a593Smuzhiyun 		spi_device->erase_size =
2166*4882a593Smuzhiyun 			1 << SPI_DEV_TYPE_FIELD(device_type,
2167*4882a593Smuzhiyun 						SPI_DEV_TYPE_ERASE_SIZE);
2168*4882a593Smuzhiyun 		spi_device->block_size =
2169*4882a593Smuzhiyun 			1 << SPI_DEV_TYPE_FIELD(device_type,
2170*4882a593Smuzhiyun 						SPI_DEV_TYPE_BLOCK_SIZE);
2171*4882a593Smuzhiyun 	} else {
2172*4882a593Smuzhiyun 		spi_device->size = 0;
2173*4882a593Smuzhiyun 	}
2174*4882a593Smuzhiyun }
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun /* Extract non-volatile configuration */
falcon_probe_nvconfig(struct ef4_nic * efx)2177*4882a593Smuzhiyun static int falcon_probe_nvconfig(struct ef4_nic *efx)
2178*4882a593Smuzhiyun {
2179*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
2180*4882a593Smuzhiyun 	struct falcon_nvconfig *nvconfig;
2181*4882a593Smuzhiyun 	int rc;
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
2184*4882a593Smuzhiyun 	if (!nvconfig)
2185*4882a593Smuzhiyun 		return -ENOMEM;
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun 	rc = falcon_read_nvram(efx, nvconfig);
2188*4882a593Smuzhiyun 	if (rc)
2189*4882a593Smuzhiyun 		goto out;
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 	efx->phy_type = nvconfig->board_v2.port0_phy_type;
2192*4882a593Smuzhiyun 	efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 	if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2195*4882a593Smuzhiyun 		falcon_spi_device_init(
2196*4882a593Smuzhiyun 			efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2197*4882a593Smuzhiyun 			le32_to_cpu(nvconfig->board_v3
2198*4882a593Smuzhiyun 				    .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
2199*4882a593Smuzhiyun 		falcon_spi_device_init(
2200*4882a593Smuzhiyun 			efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2201*4882a593Smuzhiyun 			le32_to_cpu(nvconfig->board_v3
2202*4882a593Smuzhiyun 				    .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
2203*4882a593Smuzhiyun 	}
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 	/* Read the MAC addresses */
2206*4882a593Smuzhiyun 	ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]);
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun 	netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
2209*4882a593Smuzhiyun 		  efx->phy_type, efx->mdio.prtad);
2210*4882a593Smuzhiyun 
2211*4882a593Smuzhiyun 	rc = falcon_probe_board(efx,
2212*4882a593Smuzhiyun 				le16_to_cpu(nvconfig->board_v2.board_revision));
2213*4882a593Smuzhiyun out:
2214*4882a593Smuzhiyun 	kfree(nvconfig);
2215*4882a593Smuzhiyun 	return rc;
2216*4882a593Smuzhiyun }
2217*4882a593Smuzhiyun 
falcon_dimension_resources(struct ef4_nic * efx)2218*4882a593Smuzhiyun static int falcon_dimension_resources(struct ef4_nic *efx)
2219*4882a593Smuzhiyun {
2220*4882a593Smuzhiyun 	efx->rx_dc_base = 0x20000;
2221*4882a593Smuzhiyun 	efx->tx_dc_base = 0x26000;
2222*4882a593Smuzhiyun 	return 0;
2223*4882a593Smuzhiyun }
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun /* Probe all SPI devices on the NIC */
falcon_probe_spi_devices(struct ef4_nic * efx)2226*4882a593Smuzhiyun static void falcon_probe_spi_devices(struct ef4_nic *efx)
2227*4882a593Smuzhiyun {
2228*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
2229*4882a593Smuzhiyun 	ef4_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2230*4882a593Smuzhiyun 	int boot_dev;
2231*4882a593Smuzhiyun 
2232*4882a593Smuzhiyun 	ef4_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2233*4882a593Smuzhiyun 	ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2234*4882a593Smuzhiyun 	ef4_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 	if (EF4_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2237*4882a593Smuzhiyun 		boot_dev = (EF4_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2238*4882a593Smuzhiyun 			    FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2239*4882a593Smuzhiyun 		netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
2240*4882a593Smuzhiyun 			  boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
2241*4882a593Smuzhiyun 			  "flash" : "EEPROM");
2242*4882a593Smuzhiyun 	} else {
2243*4882a593Smuzhiyun 		/* Disable VPD and set clock dividers to safe
2244*4882a593Smuzhiyun 		 * values for initial programming. */
2245*4882a593Smuzhiyun 		boot_dev = -1;
2246*4882a593Smuzhiyun 		netif_dbg(efx, probe, efx->net_dev,
2247*4882a593Smuzhiyun 			  "Booted from internal ASIC settings;"
2248*4882a593Smuzhiyun 			  " setting SPI config\n");
2249*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2250*4882a593Smuzhiyun 				     /* 125 MHz / 7 ~= 20 MHz */
2251*4882a593Smuzhiyun 				     FRF_AB_EE_SF_CLOCK_DIV, 7,
2252*4882a593Smuzhiyun 				     /* 125 MHz / 63 ~= 2 MHz */
2253*4882a593Smuzhiyun 				     FRF_AB_EE_EE_CLOCK_DIV, 63);
2254*4882a593Smuzhiyun 		ef4_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2255*4882a593Smuzhiyun 	}
2256*4882a593Smuzhiyun 
2257*4882a593Smuzhiyun 	mutex_init(&nic_data->spi_lock);
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun 	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2260*4882a593Smuzhiyun 		falcon_spi_device_init(efx, &nic_data->spi_flash,
2261*4882a593Smuzhiyun 				       FFE_AB_SPI_DEVICE_FLASH,
2262*4882a593Smuzhiyun 				       default_flash_type);
2263*4882a593Smuzhiyun 	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2264*4882a593Smuzhiyun 		falcon_spi_device_init(efx, &nic_data->spi_eeprom,
2265*4882a593Smuzhiyun 				       FFE_AB_SPI_DEVICE_EEPROM,
2266*4882a593Smuzhiyun 				       large_eeprom_type);
2267*4882a593Smuzhiyun }
2268*4882a593Smuzhiyun 
falcon_a1_mem_map_size(struct ef4_nic * efx)2269*4882a593Smuzhiyun static unsigned int falcon_a1_mem_map_size(struct ef4_nic *efx)
2270*4882a593Smuzhiyun {
2271*4882a593Smuzhiyun 	return 0x20000;
2272*4882a593Smuzhiyun }
2273*4882a593Smuzhiyun 
falcon_b0_mem_map_size(struct ef4_nic * efx)2274*4882a593Smuzhiyun static unsigned int falcon_b0_mem_map_size(struct ef4_nic *efx)
2275*4882a593Smuzhiyun {
2276*4882a593Smuzhiyun 	/* Map everything up to and including the RSS indirection table.
2277*4882a593Smuzhiyun 	 * The PCI core takes care of mapping the MSI-X tables.
2278*4882a593Smuzhiyun 	 */
2279*4882a593Smuzhiyun 	return FR_BZ_RX_INDIRECTION_TBL +
2280*4882a593Smuzhiyun 		FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
2281*4882a593Smuzhiyun }
2282*4882a593Smuzhiyun 
falcon_probe_nic(struct ef4_nic * efx)2283*4882a593Smuzhiyun static int falcon_probe_nic(struct ef4_nic *efx)
2284*4882a593Smuzhiyun {
2285*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data;
2286*4882a593Smuzhiyun 	struct falcon_board *board;
2287*4882a593Smuzhiyun 	int rc;
2288*4882a593Smuzhiyun 
2289*4882a593Smuzhiyun 	efx->primary = efx; /* only one usable function per controller */
2290*4882a593Smuzhiyun 
2291*4882a593Smuzhiyun 	/* Allocate storage for hardware specific data */
2292*4882a593Smuzhiyun 	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
2293*4882a593Smuzhiyun 	if (!nic_data)
2294*4882a593Smuzhiyun 		return -ENOMEM;
2295*4882a593Smuzhiyun 	efx->nic_data = nic_data;
2296*4882a593Smuzhiyun 	nic_data->efx = efx;
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun 	rc = -ENODEV;
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	if (ef4_farch_fpga_ver(efx) != 0) {
2301*4882a593Smuzhiyun 		netif_err(efx, probe, efx->net_dev,
2302*4882a593Smuzhiyun 			  "Falcon FPGA not supported\n");
2303*4882a593Smuzhiyun 		goto fail1;
2304*4882a593Smuzhiyun 	}
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
2307*4882a593Smuzhiyun 		ef4_oword_t nic_stat;
2308*4882a593Smuzhiyun 		struct pci_dev *dev;
2309*4882a593Smuzhiyun 		u8 pci_rev = efx->pci_dev->revision;
2310*4882a593Smuzhiyun 
2311*4882a593Smuzhiyun 		if ((pci_rev == 0xff) || (pci_rev == 0)) {
2312*4882a593Smuzhiyun 			netif_err(efx, probe, efx->net_dev,
2313*4882a593Smuzhiyun 				  "Falcon rev A0 not supported\n");
2314*4882a593Smuzhiyun 			goto fail1;
2315*4882a593Smuzhiyun 		}
2316*4882a593Smuzhiyun 		ef4_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2317*4882a593Smuzhiyun 		if (EF4_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
2318*4882a593Smuzhiyun 			netif_err(efx, probe, efx->net_dev,
2319*4882a593Smuzhiyun 				  "Falcon rev A1 1G not supported\n");
2320*4882a593Smuzhiyun 			goto fail1;
2321*4882a593Smuzhiyun 		}
2322*4882a593Smuzhiyun 		if (EF4_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2323*4882a593Smuzhiyun 			netif_err(efx, probe, efx->net_dev,
2324*4882a593Smuzhiyun 				  "Falcon rev A1 PCI-X not supported\n");
2325*4882a593Smuzhiyun 			goto fail1;
2326*4882a593Smuzhiyun 		}
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun 		dev = pci_dev_get(efx->pci_dev);
2329*4882a593Smuzhiyun 		while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
2330*4882a593Smuzhiyun 					     PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
2331*4882a593Smuzhiyun 					     dev))) {
2332*4882a593Smuzhiyun 			if (dev->bus == efx->pci_dev->bus &&
2333*4882a593Smuzhiyun 			    dev->devfn == efx->pci_dev->devfn + 1) {
2334*4882a593Smuzhiyun 				nic_data->pci_dev2 = dev;
2335*4882a593Smuzhiyun 				break;
2336*4882a593Smuzhiyun 			}
2337*4882a593Smuzhiyun 		}
2338*4882a593Smuzhiyun 		if (!nic_data->pci_dev2) {
2339*4882a593Smuzhiyun 			netif_err(efx, probe, efx->net_dev,
2340*4882a593Smuzhiyun 				  "failed to find secondary function\n");
2341*4882a593Smuzhiyun 			rc = -ENODEV;
2342*4882a593Smuzhiyun 			goto fail2;
2343*4882a593Smuzhiyun 		}
2344*4882a593Smuzhiyun 	}
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 	/* Now we can reset the NIC */
2347*4882a593Smuzhiyun 	rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
2348*4882a593Smuzhiyun 	if (rc) {
2349*4882a593Smuzhiyun 		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
2350*4882a593Smuzhiyun 		goto fail3;
2351*4882a593Smuzhiyun 	}
2352*4882a593Smuzhiyun 
2353*4882a593Smuzhiyun 	/* Allocate memory for INT_KER */
2354*4882a593Smuzhiyun 	rc = ef4_nic_alloc_buffer(efx, &efx->irq_status, sizeof(ef4_oword_t),
2355*4882a593Smuzhiyun 				  GFP_KERNEL);
2356*4882a593Smuzhiyun 	if (rc)
2357*4882a593Smuzhiyun 		goto fail4;
2358*4882a593Smuzhiyun 	BUG_ON(efx->irq_status.dma_addr & 0x0f);
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun 	netif_dbg(efx, probe, efx->net_dev,
2361*4882a593Smuzhiyun 		  "INT_KER at %llx (virt %p phys %llx)\n",
2362*4882a593Smuzhiyun 		  (u64)efx->irq_status.dma_addr,
2363*4882a593Smuzhiyun 		  efx->irq_status.addr,
2364*4882a593Smuzhiyun 		  (u64)virt_to_phys(efx->irq_status.addr));
2365*4882a593Smuzhiyun 
2366*4882a593Smuzhiyun 	falcon_probe_spi_devices(efx);
2367*4882a593Smuzhiyun 
2368*4882a593Smuzhiyun 	/* Read in the non-volatile configuration */
2369*4882a593Smuzhiyun 	rc = falcon_probe_nvconfig(efx);
2370*4882a593Smuzhiyun 	if (rc) {
2371*4882a593Smuzhiyun 		if (rc == -EINVAL)
2372*4882a593Smuzhiyun 			netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
2373*4882a593Smuzhiyun 		goto fail5;
2374*4882a593Smuzhiyun 	}
2375*4882a593Smuzhiyun 
2376*4882a593Smuzhiyun 	efx->max_channels = (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1 ? 4 :
2377*4882a593Smuzhiyun 			     EF4_MAX_CHANNELS);
2378*4882a593Smuzhiyun 	efx->max_tx_channels = efx->max_channels;
2379*4882a593Smuzhiyun 	efx->timer_quantum_ns = 4968; /* 621 cycles */
2380*4882a593Smuzhiyun 	efx->timer_max_ns = efx->type->timer_period_max *
2381*4882a593Smuzhiyun 			    efx->timer_quantum_ns;
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun 	/* Initialise I2C adapter */
2384*4882a593Smuzhiyun 	board = falcon_board(efx);
2385*4882a593Smuzhiyun 	board->i2c_adap.owner = THIS_MODULE;
2386*4882a593Smuzhiyun 	board->i2c_data = falcon_i2c_bit_operations;
2387*4882a593Smuzhiyun 	board->i2c_data.data = efx;
2388*4882a593Smuzhiyun 	board->i2c_adap.algo_data = &board->i2c_data;
2389*4882a593Smuzhiyun 	board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2390*4882a593Smuzhiyun 	strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
2391*4882a593Smuzhiyun 		sizeof(board->i2c_adap.name));
2392*4882a593Smuzhiyun 	rc = i2c_bit_add_bus(&board->i2c_adap);
2393*4882a593Smuzhiyun 	if (rc)
2394*4882a593Smuzhiyun 		goto fail5;
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun 	rc = falcon_board(efx)->type->init(efx);
2397*4882a593Smuzhiyun 	if (rc) {
2398*4882a593Smuzhiyun 		netif_err(efx, probe, efx->net_dev,
2399*4882a593Smuzhiyun 			  "failed to initialise board\n");
2400*4882a593Smuzhiyun 		goto fail6;
2401*4882a593Smuzhiyun 	}
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 	nic_data->stats_disable_count = 1;
2404*4882a593Smuzhiyun 	timer_setup(&nic_data->stats_timer, falcon_stats_timer_func, 0);
2405*4882a593Smuzhiyun 
2406*4882a593Smuzhiyun 	return 0;
2407*4882a593Smuzhiyun 
2408*4882a593Smuzhiyun  fail6:
2409*4882a593Smuzhiyun 	i2c_del_adapter(&board->i2c_adap);
2410*4882a593Smuzhiyun 	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2411*4882a593Smuzhiyun  fail5:
2412*4882a593Smuzhiyun 	ef4_nic_free_buffer(efx, &efx->irq_status);
2413*4882a593Smuzhiyun  fail4:
2414*4882a593Smuzhiyun  fail3:
2415*4882a593Smuzhiyun 	if (nic_data->pci_dev2) {
2416*4882a593Smuzhiyun 		pci_dev_put(nic_data->pci_dev2);
2417*4882a593Smuzhiyun 		nic_data->pci_dev2 = NULL;
2418*4882a593Smuzhiyun 	}
2419*4882a593Smuzhiyun  fail2:
2420*4882a593Smuzhiyun  fail1:
2421*4882a593Smuzhiyun 	kfree(efx->nic_data);
2422*4882a593Smuzhiyun 	return rc;
2423*4882a593Smuzhiyun }
2424*4882a593Smuzhiyun 
falcon_init_rx_cfg(struct ef4_nic * efx)2425*4882a593Smuzhiyun static void falcon_init_rx_cfg(struct ef4_nic *efx)
2426*4882a593Smuzhiyun {
2427*4882a593Smuzhiyun 	/* RX control FIFO thresholds (32 entries) */
2428*4882a593Smuzhiyun 	const unsigned ctrl_xon_thr = 20;
2429*4882a593Smuzhiyun 	const unsigned ctrl_xoff_thr = 25;
2430*4882a593Smuzhiyun 	ef4_oword_t reg;
2431*4882a593Smuzhiyun 
2432*4882a593Smuzhiyun 	ef4_reado(efx, &reg, FR_AZ_RX_CFG);
2433*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) <= EF4_REV_FALCON_A1) {
2434*4882a593Smuzhiyun 		/* Data FIFO size is 5.5K.  The RX DMA engine only
2435*4882a593Smuzhiyun 		 * supports scattering for user-mode queues, but will
2436*4882a593Smuzhiyun 		 * split DMA writes at intervals of RX_USR_BUF_SIZE
2437*4882a593Smuzhiyun 		 * (32-byte units) even for kernel-mode queues.  We
2438*4882a593Smuzhiyun 		 * set it to be so large that that never happens.
2439*4882a593Smuzhiyun 		 */
2440*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2441*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2442*4882a593Smuzhiyun 				    (3 * 4096) >> 5);
2443*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
2444*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
2445*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2446*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2447*4882a593Smuzhiyun 	} else {
2448*4882a593Smuzhiyun 		/* Data FIFO size is 80K; register fields moved */
2449*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2450*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2451*4882a593Smuzhiyun 				    EF4_RX_USR_BUF_SIZE >> 5);
2452*4882a593Smuzhiyun 		/* Send XON and XOFF at ~3 * max MTU away from empty/full */
2453*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
2454*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
2455*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2456*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2457*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2458*4882a593Smuzhiyun 
2459*4882a593Smuzhiyun 		/* Enable hash insertion. This is broken for the
2460*4882a593Smuzhiyun 		 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
2461*4882a593Smuzhiyun 		 * IPv4 hashes. */
2462*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
2463*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
2464*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
2465*4882a593Smuzhiyun 	}
2466*4882a593Smuzhiyun 	/* Always enable XOFF signal from RX FIFO.  We enable
2467*4882a593Smuzhiyun 	 * or disable transmission of pause frames at the MAC. */
2468*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
2469*4882a593Smuzhiyun 	ef4_writeo(efx, &reg, FR_AZ_RX_CFG);
2470*4882a593Smuzhiyun }
2471*4882a593Smuzhiyun 
2472*4882a593Smuzhiyun /* This call performs hardware-specific global initialisation, such as
2473*4882a593Smuzhiyun  * defining the descriptor cache sizes and number of RSS channels.
2474*4882a593Smuzhiyun  * It does not set up any buffers, descriptor rings or event queues.
2475*4882a593Smuzhiyun  */
falcon_init_nic(struct ef4_nic * efx)2476*4882a593Smuzhiyun static int falcon_init_nic(struct ef4_nic *efx)
2477*4882a593Smuzhiyun {
2478*4882a593Smuzhiyun 	ef4_oword_t temp;
2479*4882a593Smuzhiyun 	int rc;
2480*4882a593Smuzhiyun 
2481*4882a593Smuzhiyun 	/* Use on-chip SRAM */
2482*4882a593Smuzhiyun 	ef4_reado(efx, &temp, FR_AB_NIC_STAT);
2483*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2484*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_AB_NIC_STAT);
2485*4882a593Smuzhiyun 
2486*4882a593Smuzhiyun 	rc = falcon_reset_sram(efx);
2487*4882a593Smuzhiyun 	if (rc)
2488*4882a593Smuzhiyun 		return rc;
2489*4882a593Smuzhiyun 
2490*4882a593Smuzhiyun 	/* Clear the parity enables on the TX data fifos as
2491*4882a593Smuzhiyun 	 * they produce false parity errors because of timing issues
2492*4882a593Smuzhiyun 	 */
2493*4882a593Smuzhiyun 	if (EF4_WORKAROUND_5129(efx)) {
2494*4882a593Smuzhiyun 		ef4_reado(efx, &temp, FR_AZ_CSR_SPARE);
2495*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2496*4882a593Smuzhiyun 		ef4_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2497*4882a593Smuzhiyun 	}
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun 	if (EF4_WORKAROUND_7244(efx)) {
2500*4882a593Smuzhiyun 		ef4_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2501*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2502*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2503*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2504*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2505*4882a593Smuzhiyun 		ef4_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2506*4882a593Smuzhiyun 	}
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 	/* XXX This is documented only for Falcon A0/A1 */
2509*4882a593Smuzhiyun 	/* Setup RX.  Wait for descriptor is broken and must
2510*4882a593Smuzhiyun 	 * be disabled.  RXDP recovery shouldn't be needed, but is.
2511*4882a593Smuzhiyun 	 */
2512*4882a593Smuzhiyun 	ef4_reado(efx, &temp, FR_AA_RX_SELF_RST);
2513*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
2514*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
2515*4882a593Smuzhiyun 	if (EF4_WORKAROUND_5583(efx))
2516*4882a593Smuzhiyun 		EF4_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
2517*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_AA_RX_SELF_RST);
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2520*4882a593Smuzhiyun 	 * descriptors (which is bad).
2521*4882a593Smuzhiyun 	 */
2522*4882a593Smuzhiyun 	ef4_reado(efx, &temp, FR_AZ_TX_CFG);
2523*4882a593Smuzhiyun 	EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
2524*4882a593Smuzhiyun 	ef4_writeo(efx, &temp, FR_AZ_TX_CFG);
2525*4882a593Smuzhiyun 
2526*4882a593Smuzhiyun 	falcon_init_rx_cfg(efx);
2527*4882a593Smuzhiyun 
2528*4882a593Smuzhiyun 	if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
2529*4882a593Smuzhiyun 		falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
2530*4882a593Smuzhiyun 
2531*4882a593Smuzhiyun 		/* Set destination of both TX and RX Flush events */
2532*4882a593Smuzhiyun 		EF4_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
2533*4882a593Smuzhiyun 		ef4_writeo(efx, &temp, FR_BZ_DP_CTRL);
2534*4882a593Smuzhiyun 	}
2535*4882a593Smuzhiyun 
2536*4882a593Smuzhiyun 	ef4_farch_init_common(efx);
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	return 0;
2539*4882a593Smuzhiyun }
2540*4882a593Smuzhiyun 
falcon_remove_nic(struct ef4_nic * efx)2541*4882a593Smuzhiyun static void falcon_remove_nic(struct ef4_nic *efx)
2542*4882a593Smuzhiyun {
2543*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
2544*4882a593Smuzhiyun 	struct falcon_board *board = falcon_board(efx);
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun 	board->type->fini(efx);
2547*4882a593Smuzhiyun 
2548*4882a593Smuzhiyun 	/* Remove I2C adapter and clear it in preparation for a retry */
2549*4882a593Smuzhiyun 	i2c_del_adapter(&board->i2c_adap);
2550*4882a593Smuzhiyun 	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun 	ef4_nic_free_buffer(efx, &efx->irq_status);
2553*4882a593Smuzhiyun 
2554*4882a593Smuzhiyun 	__falcon_reset_hw(efx, RESET_TYPE_ALL);
2555*4882a593Smuzhiyun 
2556*4882a593Smuzhiyun 	/* Release the second function after the reset */
2557*4882a593Smuzhiyun 	if (nic_data->pci_dev2) {
2558*4882a593Smuzhiyun 		pci_dev_put(nic_data->pci_dev2);
2559*4882a593Smuzhiyun 		nic_data->pci_dev2 = NULL;
2560*4882a593Smuzhiyun 	}
2561*4882a593Smuzhiyun 
2562*4882a593Smuzhiyun 	/* Tear down the private nic state */
2563*4882a593Smuzhiyun 	kfree(efx->nic_data);
2564*4882a593Smuzhiyun 	efx->nic_data = NULL;
2565*4882a593Smuzhiyun }
2566*4882a593Smuzhiyun 
falcon_describe_nic_stats(struct ef4_nic * efx,u8 * names)2567*4882a593Smuzhiyun static size_t falcon_describe_nic_stats(struct ef4_nic *efx, u8 *names)
2568*4882a593Smuzhiyun {
2569*4882a593Smuzhiyun 	return ef4_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
2570*4882a593Smuzhiyun 				      falcon_stat_mask, names);
2571*4882a593Smuzhiyun }
2572*4882a593Smuzhiyun 
falcon_update_nic_stats(struct ef4_nic * efx,u64 * full_stats,struct rtnl_link_stats64 * core_stats)2573*4882a593Smuzhiyun static size_t falcon_update_nic_stats(struct ef4_nic *efx, u64 *full_stats,
2574*4882a593Smuzhiyun 				      struct rtnl_link_stats64 *core_stats)
2575*4882a593Smuzhiyun {
2576*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
2577*4882a593Smuzhiyun 	u64 *stats = nic_data->stats;
2578*4882a593Smuzhiyun 	ef4_oword_t cnt;
2579*4882a593Smuzhiyun 
2580*4882a593Smuzhiyun 	if (!nic_data->stats_disable_count) {
2581*4882a593Smuzhiyun 		ef4_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
2582*4882a593Smuzhiyun 		stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
2583*4882a593Smuzhiyun 			EF4_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
2584*4882a593Smuzhiyun 
2585*4882a593Smuzhiyun 		if (nic_data->stats_pending &&
2586*4882a593Smuzhiyun 		    FALCON_XMAC_STATS_DMA_FLAG(efx)) {
2587*4882a593Smuzhiyun 			nic_data->stats_pending = false;
2588*4882a593Smuzhiyun 			rmb(); /* read the done flag before the stats */
2589*4882a593Smuzhiyun 			ef4_nic_update_stats(
2590*4882a593Smuzhiyun 				falcon_stat_desc, FALCON_STAT_COUNT,
2591*4882a593Smuzhiyun 				falcon_stat_mask,
2592*4882a593Smuzhiyun 				stats, efx->stats_buffer.addr, true);
2593*4882a593Smuzhiyun 		}
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 		/* Update derived statistic */
2596*4882a593Smuzhiyun 		ef4_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
2597*4882a593Smuzhiyun 				     stats[FALCON_STAT_rx_bytes] -
2598*4882a593Smuzhiyun 				     stats[FALCON_STAT_rx_good_bytes] -
2599*4882a593Smuzhiyun 				     stats[FALCON_STAT_rx_control] * 64);
2600*4882a593Smuzhiyun 		ef4_update_sw_stats(efx, stats);
2601*4882a593Smuzhiyun 	}
2602*4882a593Smuzhiyun 
2603*4882a593Smuzhiyun 	if (full_stats)
2604*4882a593Smuzhiyun 		memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
2605*4882a593Smuzhiyun 
2606*4882a593Smuzhiyun 	if (core_stats) {
2607*4882a593Smuzhiyun 		core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
2608*4882a593Smuzhiyun 		core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
2609*4882a593Smuzhiyun 		core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
2610*4882a593Smuzhiyun 		core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
2611*4882a593Smuzhiyun 		core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt] +
2612*4882a593Smuzhiyun 					 stats[GENERIC_STAT_rx_nodesc_trunc] +
2613*4882a593Smuzhiyun 					 stats[GENERIC_STAT_rx_noskb_drops];
2614*4882a593Smuzhiyun 		core_stats->multicast = stats[FALCON_STAT_rx_multicast];
2615*4882a593Smuzhiyun 		core_stats->rx_length_errors =
2616*4882a593Smuzhiyun 			stats[FALCON_STAT_rx_gtjumbo] +
2617*4882a593Smuzhiyun 			stats[FALCON_STAT_rx_length_error];
2618*4882a593Smuzhiyun 		core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
2619*4882a593Smuzhiyun 		core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
2620*4882a593Smuzhiyun 		core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
2621*4882a593Smuzhiyun 
2622*4882a593Smuzhiyun 		core_stats->rx_errors = (core_stats->rx_length_errors +
2623*4882a593Smuzhiyun 					 core_stats->rx_crc_errors +
2624*4882a593Smuzhiyun 					 core_stats->rx_frame_errors +
2625*4882a593Smuzhiyun 					 stats[FALCON_STAT_rx_symbol_error]);
2626*4882a593Smuzhiyun 	}
2627*4882a593Smuzhiyun 
2628*4882a593Smuzhiyun 	return FALCON_STAT_COUNT;
2629*4882a593Smuzhiyun }
2630*4882a593Smuzhiyun 
falcon_start_nic_stats(struct ef4_nic * efx)2631*4882a593Smuzhiyun void falcon_start_nic_stats(struct ef4_nic *efx)
2632*4882a593Smuzhiyun {
2633*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
2634*4882a593Smuzhiyun 
2635*4882a593Smuzhiyun 	spin_lock_bh(&efx->stats_lock);
2636*4882a593Smuzhiyun 	if (--nic_data->stats_disable_count == 0)
2637*4882a593Smuzhiyun 		falcon_stats_request(efx);
2638*4882a593Smuzhiyun 	spin_unlock_bh(&efx->stats_lock);
2639*4882a593Smuzhiyun }
2640*4882a593Smuzhiyun 
2641*4882a593Smuzhiyun /* We don't acutally pull stats on falcon. Wait 10ms so that
2642*4882a593Smuzhiyun  * they arrive when we call this just after start_stats
2643*4882a593Smuzhiyun  */
falcon_pull_nic_stats(struct ef4_nic * efx)2644*4882a593Smuzhiyun static void falcon_pull_nic_stats(struct ef4_nic *efx)
2645*4882a593Smuzhiyun {
2646*4882a593Smuzhiyun 	msleep(10);
2647*4882a593Smuzhiyun }
2648*4882a593Smuzhiyun 
falcon_stop_nic_stats(struct ef4_nic * efx)2649*4882a593Smuzhiyun void falcon_stop_nic_stats(struct ef4_nic *efx)
2650*4882a593Smuzhiyun {
2651*4882a593Smuzhiyun 	struct falcon_nic_data *nic_data = efx->nic_data;
2652*4882a593Smuzhiyun 	int i;
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun 	might_sleep();
2655*4882a593Smuzhiyun 
2656*4882a593Smuzhiyun 	spin_lock_bh(&efx->stats_lock);
2657*4882a593Smuzhiyun 	++nic_data->stats_disable_count;
2658*4882a593Smuzhiyun 	spin_unlock_bh(&efx->stats_lock);
2659*4882a593Smuzhiyun 
2660*4882a593Smuzhiyun 	del_timer_sync(&nic_data->stats_timer);
2661*4882a593Smuzhiyun 
2662*4882a593Smuzhiyun 	/* Wait enough time for the most recent transfer to
2663*4882a593Smuzhiyun 	 * complete. */
2664*4882a593Smuzhiyun 	for (i = 0; i < 4 && nic_data->stats_pending; i++) {
2665*4882a593Smuzhiyun 		if (FALCON_XMAC_STATS_DMA_FLAG(efx))
2666*4882a593Smuzhiyun 			break;
2667*4882a593Smuzhiyun 		msleep(1);
2668*4882a593Smuzhiyun 	}
2669*4882a593Smuzhiyun 
2670*4882a593Smuzhiyun 	spin_lock_bh(&efx->stats_lock);
2671*4882a593Smuzhiyun 	falcon_stats_complete(efx);
2672*4882a593Smuzhiyun 	spin_unlock_bh(&efx->stats_lock);
2673*4882a593Smuzhiyun }
2674*4882a593Smuzhiyun 
falcon_set_id_led(struct ef4_nic * efx,enum ef4_led_mode mode)2675*4882a593Smuzhiyun static void falcon_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode)
2676*4882a593Smuzhiyun {
2677*4882a593Smuzhiyun 	falcon_board(efx)->type->set_id_led(efx, mode);
2678*4882a593Smuzhiyun }
2679*4882a593Smuzhiyun 
2680*4882a593Smuzhiyun /**************************************************************************
2681*4882a593Smuzhiyun  *
2682*4882a593Smuzhiyun  * Wake on LAN
2683*4882a593Smuzhiyun  *
2684*4882a593Smuzhiyun  **************************************************************************
2685*4882a593Smuzhiyun  */
2686*4882a593Smuzhiyun 
falcon_get_wol(struct ef4_nic * efx,struct ethtool_wolinfo * wol)2687*4882a593Smuzhiyun static void falcon_get_wol(struct ef4_nic *efx, struct ethtool_wolinfo *wol)
2688*4882a593Smuzhiyun {
2689*4882a593Smuzhiyun 	wol->supported = 0;
2690*4882a593Smuzhiyun 	wol->wolopts = 0;
2691*4882a593Smuzhiyun 	memset(&wol->sopass, 0, sizeof(wol->sopass));
2692*4882a593Smuzhiyun }
2693*4882a593Smuzhiyun 
falcon_set_wol(struct ef4_nic * efx,u32 type)2694*4882a593Smuzhiyun static int falcon_set_wol(struct ef4_nic *efx, u32 type)
2695*4882a593Smuzhiyun {
2696*4882a593Smuzhiyun 	if (type != 0)
2697*4882a593Smuzhiyun 		return -EINVAL;
2698*4882a593Smuzhiyun 	return 0;
2699*4882a593Smuzhiyun }
2700*4882a593Smuzhiyun 
2701*4882a593Smuzhiyun /**************************************************************************
2702*4882a593Smuzhiyun  *
2703*4882a593Smuzhiyun  * Revision-dependent attributes used by efx.c and nic.c
2704*4882a593Smuzhiyun  *
2705*4882a593Smuzhiyun  **************************************************************************
2706*4882a593Smuzhiyun  */
2707*4882a593Smuzhiyun 
2708*4882a593Smuzhiyun const struct ef4_nic_type falcon_a1_nic_type = {
2709*4882a593Smuzhiyun 	.mem_bar = EF4_MEM_BAR,
2710*4882a593Smuzhiyun 	.mem_map_size = falcon_a1_mem_map_size,
2711*4882a593Smuzhiyun 	.probe = falcon_probe_nic,
2712*4882a593Smuzhiyun 	.remove = falcon_remove_nic,
2713*4882a593Smuzhiyun 	.init = falcon_init_nic,
2714*4882a593Smuzhiyun 	.dimension_resources = falcon_dimension_resources,
2715*4882a593Smuzhiyun 	.fini = falcon_irq_ack_a1,
2716*4882a593Smuzhiyun 	.monitor = falcon_monitor,
2717*4882a593Smuzhiyun 	.map_reset_reason = falcon_map_reset_reason,
2718*4882a593Smuzhiyun 	.map_reset_flags = falcon_map_reset_flags,
2719*4882a593Smuzhiyun 	.reset = falcon_reset_hw,
2720*4882a593Smuzhiyun 	.probe_port = falcon_probe_port,
2721*4882a593Smuzhiyun 	.remove_port = falcon_remove_port,
2722*4882a593Smuzhiyun 	.handle_global_event = falcon_handle_global_event,
2723*4882a593Smuzhiyun 	.fini_dmaq = ef4_farch_fini_dmaq,
2724*4882a593Smuzhiyun 	.prepare_flush = falcon_prepare_flush,
2725*4882a593Smuzhiyun 	.finish_flush = ef4_port_dummy_op_void,
2726*4882a593Smuzhiyun 	.prepare_flr = ef4_port_dummy_op_void,
2727*4882a593Smuzhiyun 	.finish_flr = ef4_farch_finish_flr,
2728*4882a593Smuzhiyun 	.describe_stats = falcon_describe_nic_stats,
2729*4882a593Smuzhiyun 	.update_stats = falcon_update_nic_stats,
2730*4882a593Smuzhiyun 	.start_stats = falcon_start_nic_stats,
2731*4882a593Smuzhiyun 	.pull_stats = falcon_pull_nic_stats,
2732*4882a593Smuzhiyun 	.stop_stats = falcon_stop_nic_stats,
2733*4882a593Smuzhiyun 	.set_id_led = falcon_set_id_led,
2734*4882a593Smuzhiyun 	.push_irq_moderation = falcon_push_irq_moderation,
2735*4882a593Smuzhiyun 	.reconfigure_port = falcon_reconfigure_port,
2736*4882a593Smuzhiyun 	.prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
2737*4882a593Smuzhiyun 	.reconfigure_mac = falcon_reconfigure_xmac,
2738*4882a593Smuzhiyun 	.check_mac_fault = falcon_xmac_check_fault,
2739*4882a593Smuzhiyun 	.get_wol = falcon_get_wol,
2740*4882a593Smuzhiyun 	.set_wol = falcon_set_wol,
2741*4882a593Smuzhiyun 	.resume_wol = ef4_port_dummy_op_void,
2742*4882a593Smuzhiyun 	.test_nvram = falcon_test_nvram,
2743*4882a593Smuzhiyun 	.irq_enable_master = ef4_farch_irq_enable_master,
2744*4882a593Smuzhiyun 	.irq_test_generate = ef4_farch_irq_test_generate,
2745*4882a593Smuzhiyun 	.irq_disable_non_ev = ef4_farch_irq_disable_master,
2746*4882a593Smuzhiyun 	.irq_handle_msi = ef4_farch_msi_interrupt,
2747*4882a593Smuzhiyun 	.irq_handle_legacy = falcon_legacy_interrupt_a1,
2748*4882a593Smuzhiyun 	.tx_probe = ef4_farch_tx_probe,
2749*4882a593Smuzhiyun 	.tx_init = ef4_farch_tx_init,
2750*4882a593Smuzhiyun 	.tx_remove = ef4_farch_tx_remove,
2751*4882a593Smuzhiyun 	.tx_write = ef4_farch_tx_write,
2752*4882a593Smuzhiyun 	.tx_limit_len = ef4_farch_tx_limit_len,
2753*4882a593Smuzhiyun 	.rx_push_rss_config = dummy_rx_push_rss_config,
2754*4882a593Smuzhiyun 	.rx_probe = ef4_farch_rx_probe,
2755*4882a593Smuzhiyun 	.rx_init = ef4_farch_rx_init,
2756*4882a593Smuzhiyun 	.rx_remove = ef4_farch_rx_remove,
2757*4882a593Smuzhiyun 	.rx_write = ef4_farch_rx_write,
2758*4882a593Smuzhiyun 	.rx_defer_refill = ef4_farch_rx_defer_refill,
2759*4882a593Smuzhiyun 	.ev_probe = ef4_farch_ev_probe,
2760*4882a593Smuzhiyun 	.ev_init = ef4_farch_ev_init,
2761*4882a593Smuzhiyun 	.ev_fini = ef4_farch_ev_fini,
2762*4882a593Smuzhiyun 	.ev_remove = ef4_farch_ev_remove,
2763*4882a593Smuzhiyun 	.ev_process = ef4_farch_ev_process,
2764*4882a593Smuzhiyun 	.ev_read_ack = ef4_farch_ev_read_ack,
2765*4882a593Smuzhiyun 	.ev_test_generate = ef4_farch_ev_test_generate,
2766*4882a593Smuzhiyun 
2767*4882a593Smuzhiyun 	/* We don't expose the filter table on Falcon A1 as it is not
2768*4882a593Smuzhiyun 	 * mapped into function 0, but these implementations still
2769*4882a593Smuzhiyun 	 * work with a degenerate case of all tables set to size 0.
2770*4882a593Smuzhiyun 	 */
2771*4882a593Smuzhiyun 	.filter_table_probe = ef4_farch_filter_table_probe,
2772*4882a593Smuzhiyun 	.filter_table_restore = ef4_farch_filter_table_restore,
2773*4882a593Smuzhiyun 	.filter_table_remove = ef4_farch_filter_table_remove,
2774*4882a593Smuzhiyun 	.filter_insert = ef4_farch_filter_insert,
2775*4882a593Smuzhiyun 	.filter_remove_safe = ef4_farch_filter_remove_safe,
2776*4882a593Smuzhiyun 	.filter_get_safe = ef4_farch_filter_get_safe,
2777*4882a593Smuzhiyun 	.filter_clear_rx = ef4_farch_filter_clear_rx,
2778*4882a593Smuzhiyun 	.filter_count_rx_used = ef4_farch_filter_count_rx_used,
2779*4882a593Smuzhiyun 	.filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
2780*4882a593Smuzhiyun 	.filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun #ifdef CONFIG_SFC_FALCON_MTD
2783*4882a593Smuzhiyun 	.mtd_probe = falcon_mtd_probe,
2784*4882a593Smuzhiyun 	.mtd_rename = falcon_mtd_rename,
2785*4882a593Smuzhiyun 	.mtd_read = falcon_mtd_read,
2786*4882a593Smuzhiyun 	.mtd_erase = falcon_mtd_erase,
2787*4882a593Smuzhiyun 	.mtd_write = falcon_mtd_write,
2788*4882a593Smuzhiyun 	.mtd_sync = falcon_mtd_sync,
2789*4882a593Smuzhiyun #endif
2790*4882a593Smuzhiyun 
2791*4882a593Smuzhiyun 	.revision = EF4_REV_FALCON_A1,
2792*4882a593Smuzhiyun 	.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
2793*4882a593Smuzhiyun 	.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
2794*4882a593Smuzhiyun 	.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
2795*4882a593Smuzhiyun 	.evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
2796*4882a593Smuzhiyun 	.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
2797*4882a593Smuzhiyun 	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2798*4882a593Smuzhiyun 	.rx_buffer_padding = 0x24,
2799*4882a593Smuzhiyun 	.can_rx_scatter = false,
2800*4882a593Smuzhiyun 	.max_interrupt_mode = EF4_INT_MODE_MSI,
2801*4882a593Smuzhiyun 	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2802*4882a593Smuzhiyun 	.offload_features = NETIF_F_IP_CSUM,
2803*4882a593Smuzhiyun };
2804*4882a593Smuzhiyun 
2805*4882a593Smuzhiyun const struct ef4_nic_type falcon_b0_nic_type = {
2806*4882a593Smuzhiyun 	.mem_bar = EF4_MEM_BAR,
2807*4882a593Smuzhiyun 	.mem_map_size = falcon_b0_mem_map_size,
2808*4882a593Smuzhiyun 	.probe = falcon_probe_nic,
2809*4882a593Smuzhiyun 	.remove = falcon_remove_nic,
2810*4882a593Smuzhiyun 	.init = falcon_init_nic,
2811*4882a593Smuzhiyun 	.dimension_resources = falcon_dimension_resources,
2812*4882a593Smuzhiyun 	.fini = ef4_port_dummy_op_void,
2813*4882a593Smuzhiyun 	.monitor = falcon_monitor,
2814*4882a593Smuzhiyun 	.map_reset_reason = falcon_map_reset_reason,
2815*4882a593Smuzhiyun 	.map_reset_flags = falcon_map_reset_flags,
2816*4882a593Smuzhiyun 	.reset = falcon_reset_hw,
2817*4882a593Smuzhiyun 	.probe_port = falcon_probe_port,
2818*4882a593Smuzhiyun 	.remove_port = falcon_remove_port,
2819*4882a593Smuzhiyun 	.handle_global_event = falcon_handle_global_event,
2820*4882a593Smuzhiyun 	.fini_dmaq = ef4_farch_fini_dmaq,
2821*4882a593Smuzhiyun 	.prepare_flush = falcon_prepare_flush,
2822*4882a593Smuzhiyun 	.finish_flush = ef4_port_dummy_op_void,
2823*4882a593Smuzhiyun 	.prepare_flr = ef4_port_dummy_op_void,
2824*4882a593Smuzhiyun 	.finish_flr = ef4_farch_finish_flr,
2825*4882a593Smuzhiyun 	.describe_stats = falcon_describe_nic_stats,
2826*4882a593Smuzhiyun 	.update_stats = falcon_update_nic_stats,
2827*4882a593Smuzhiyun 	.start_stats = falcon_start_nic_stats,
2828*4882a593Smuzhiyun 	.pull_stats = falcon_pull_nic_stats,
2829*4882a593Smuzhiyun 	.stop_stats = falcon_stop_nic_stats,
2830*4882a593Smuzhiyun 	.set_id_led = falcon_set_id_led,
2831*4882a593Smuzhiyun 	.push_irq_moderation = falcon_push_irq_moderation,
2832*4882a593Smuzhiyun 	.reconfigure_port = falcon_reconfigure_port,
2833*4882a593Smuzhiyun 	.prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
2834*4882a593Smuzhiyun 	.reconfigure_mac = falcon_reconfigure_xmac,
2835*4882a593Smuzhiyun 	.check_mac_fault = falcon_xmac_check_fault,
2836*4882a593Smuzhiyun 	.get_wol = falcon_get_wol,
2837*4882a593Smuzhiyun 	.set_wol = falcon_set_wol,
2838*4882a593Smuzhiyun 	.resume_wol = ef4_port_dummy_op_void,
2839*4882a593Smuzhiyun 	.test_chip = falcon_b0_test_chip,
2840*4882a593Smuzhiyun 	.test_nvram = falcon_test_nvram,
2841*4882a593Smuzhiyun 	.irq_enable_master = ef4_farch_irq_enable_master,
2842*4882a593Smuzhiyun 	.irq_test_generate = ef4_farch_irq_test_generate,
2843*4882a593Smuzhiyun 	.irq_disable_non_ev = ef4_farch_irq_disable_master,
2844*4882a593Smuzhiyun 	.irq_handle_msi = ef4_farch_msi_interrupt,
2845*4882a593Smuzhiyun 	.irq_handle_legacy = ef4_farch_legacy_interrupt,
2846*4882a593Smuzhiyun 	.tx_probe = ef4_farch_tx_probe,
2847*4882a593Smuzhiyun 	.tx_init = ef4_farch_tx_init,
2848*4882a593Smuzhiyun 	.tx_remove = ef4_farch_tx_remove,
2849*4882a593Smuzhiyun 	.tx_write = ef4_farch_tx_write,
2850*4882a593Smuzhiyun 	.tx_limit_len = ef4_farch_tx_limit_len,
2851*4882a593Smuzhiyun 	.rx_push_rss_config = falcon_b0_rx_push_rss_config,
2852*4882a593Smuzhiyun 	.rx_probe = ef4_farch_rx_probe,
2853*4882a593Smuzhiyun 	.rx_init = ef4_farch_rx_init,
2854*4882a593Smuzhiyun 	.rx_remove = ef4_farch_rx_remove,
2855*4882a593Smuzhiyun 	.rx_write = ef4_farch_rx_write,
2856*4882a593Smuzhiyun 	.rx_defer_refill = ef4_farch_rx_defer_refill,
2857*4882a593Smuzhiyun 	.ev_probe = ef4_farch_ev_probe,
2858*4882a593Smuzhiyun 	.ev_init = ef4_farch_ev_init,
2859*4882a593Smuzhiyun 	.ev_fini = ef4_farch_ev_fini,
2860*4882a593Smuzhiyun 	.ev_remove = ef4_farch_ev_remove,
2861*4882a593Smuzhiyun 	.ev_process = ef4_farch_ev_process,
2862*4882a593Smuzhiyun 	.ev_read_ack = ef4_farch_ev_read_ack,
2863*4882a593Smuzhiyun 	.ev_test_generate = ef4_farch_ev_test_generate,
2864*4882a593Smuzhiyun 	.filter_table_probe = ef4_farch_filter_table_probe,
2865*4882a593Smuzhiyun 	.filter_table_restore = ef4_farch_filter_table_restore,
2866*4882a593Smuzhiyun 	.filter_table_remove = ef4_farch_filter_table_remove,
2867*4882a593Smuzhiyun 	.filter_update_rx_scatter = ef4_farch_filter_update_rx_scatter,
2868*4882a593Smuzhiyun 	.filter_insert = ef4_farch_filter_insert,
2869*4882a593Smuzhiyun 	.filter_remove_safe = ef4_farch_filter_remove_safe,
2870*4882a593Smuzhiyun 	.filter_get_safe = ef4_farch_filter_get_safe,
2871*4882a593Smuzhiyun 	.filter_clear_rx = ef4_farch_filter_clear_rx,
2872*4882a593Smuzhiyun 	.filter_count_rx_used = ef4_farch_filter_count_rx_used,
2873*4882a593Smuzhiyun 	.filter_get_rx_id_limit = ef4_farch_filter_get_rx_id_limit,
2874*4882a593Smuzhiyun 	.filter_get_rx_ids = ef4_farch_filter_get_rx_ids,
2875*4882a593Smuzhiyun #ifdef CONFIG_RFS_ACCEL
2876*4882a593Smuzhiyun 	.filter_rfs_insert = ef4_farch_filter_rfs_insert,
2877*4882a593Smuzhiyun 	.filter_rfs_expire_one = ef4_farch_filter_rfs_expire_one,
2878*4882a593Smuzhiyun #endif
2879*4882a593Smuzhiyun #ifdef CONFIG_SFC_FALCON_MTD
2880*4882a593Smuzhiyun 	.mtd_probe = falcon_mtd_probe,
2881*4882a593Smuzhiyun 	.mtd_rename = falcon_mtd_rename,
2882*4882a593Smuzhiyun 	.mtd_read = falcon_mtd_read,
2883*4882a593Smuzhiyun 	.mtd_erase = falcon_mtd_erase,
2884*4882a593Smuzhiyun 	.mtd_write = falcon_mtd_write,
2885*4882a593Smuzhiyun 	.mtd_sync = falcon_mtd_sync,
2886*4882a593Smuzhiyun #endif
2887*4882a593Smuzhiyun 
2888*4882a593Smuzhiyun 	.revision = EF4_REV_FALCON_B0,
2889*4882a593Smuzhiyun 	.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
2890*4882a593Smuzhiyun 	.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
2891*4882a593Smuzhiyun 	.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
2892*4882a593Smuzhiyun 	.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
2893*4882a593Smuzhiyun 	.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
2894*4882a593Smuzhiyun 	.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2895*4882a593Smuzhiyun 	.rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
2896*4882a593Smuzhiyun 	.rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
2897*4882a593Smuzhiyun 	.rx_buffer_padding = 0,
2898*4882a593Smuzhiyun 	.can_rx_scatter = true,
2899*4882a593Smuzhiyun 	.max_interrupt_mode = EF4_INT_MODE_MSIX,
2900*4882a593Smuzhiyun 	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2901*4882a593Smuzhiyun 	.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
2902*4882a593Smuzhiyun 	.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
2903*4882a593Smuzhiyun };
2904