xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/ixgb/ixgb_hw.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 1999 - 2008 Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun /* ixgb_hw.c
5*4882a593Smuzhiyun  * Shared functions for accessing and configuring the adapter
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/pci_ids.h>
11*4882a593Smuzhiyun #include "ixgb_hw.h"
12*4882a593Smuzhiyun #include "ixgb_ids.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/etherdevice.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*  Local function prototypes */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value);
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun static void ixgb_get_bus_info(struct ixgb_hw *hw);
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static bool ixgb_link_reset(struct ixgb_hw *hw);
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun static void ixgb_optics_reset(struct ixgb_hw *hw);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun static void ixgb_optics_reset_bcm(struct ixgb_hw *hw);
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw);
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static void ixgb_clear_hw_cntrs(struct ixgb_hw *hw);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun static void ixgb_clear_vfta(struct ixgb_hw *hw);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static void ixgb_init_rx_addrs(struct ixgb_hw *hw);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static u16 ixgb_read_phy_reg(struct ixgb_hw *hw,
39*4882a593Smuzhiyun 				  u32 reg_address,
40*4882a593Smuzhiyun 				  u32 phy_address,
41*4882a593Smuzhiyun 				  u32 device_type);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun static bool ixgb_setup_fc(struct ixgb_hw *hw);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static bool mac_addr_valid(u8 *mac_addr);
46*4882a593Smuzhiyun 
ixgb_mac_reset(struct ixgb_hw * hw)47*4882a593Smuzhiyun static u32 ixgb_mac_reset(struct ixgb_hw *hw)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	u32 ctrl_reg;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	ctrl_reg =  IXGB_CTRL0_RST |
52*4882a593Smuzhiyun 				IXGB_CTRL0_SDP3_DIR |   /* All pins are Output=1 */
53*4882a593Smuzhiyun 				IXGB_CTRL0_SDP2_DIR |
54*4882a593Smuzhiyun 				IXGB_CTRL0_SDP1_DIR |
55*4882a593Smuzhiyun 				IXGB_CTRL0_SDP0_DIR |
56*4882a593Smuzhiyun 				IXGB_CTRL0_SDP3	 |   /* Initial value 1101   */
57*4882a593Smuzhiyun 				IXGB_CTRL0_SDP2	 |
58*4882a593Smuzhiyun 				IXGB_CTRL0_SDP0;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #ifdef HP_ZX1
61*4882a593Smuzhiyun 	/* Workaround for 82597EX reset errata */
62*4882a593Smuzhiyun 	IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg);
63*4882a593Smuzhiyun #else
64*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
65*4882a593Smuzhiyun #endif
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	/* Delay a few ms just to allow the reset to complete */
68*4882a593Smuzhiyun 	msleep(IXGB_DELAY_AFTER_RESET);
69*4882a593Smuzhiyun 	ctrl_reg = IXGB_READ_REG(hw, CTRL0);
70*4882a593Smuzhiyun #ifdef DBG
71*4882a593Smuzhiyun 	/* Make sure the self-clearing global reset bit did self clear */
72*4882a593Smuzhiyun 	ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
73*4882a593Smuzhiyun #endif
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) {
76*4882a593Smuzhiyun 		ctrl_reg =  /* Enable interrupt from XFP and SerDes */
77*4882a593Smuzhiyun 			   IXGB_CTRL1_GPI0_EN |
78*4882a593Smuzhiyun 			   IXGB_CTRL1_SDP6_DIR |
79*4882a593Smuzhiyun 			   IXGB_CTRL1_SDP7_DIR |
80*4882a593Smuzhiyun 			   IXGB_CTRL1_SDP6 |
81*4882a593Smuzhiyun 			   IXGB_CTRL1_SDP7;
82*4882a593Smuzhiyun 		IXGB_WRITE_REG(hw, CTRL1, ctrl_reg);
83*4882a593Smuzhiyun 		ixgb_optics_reset_bcm(hw);
84*4882a593Smuzhiyun 	}
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (hw->phy_type == ixgb_phy_type_txn17401)
87*4882a593Smuzhiyun 		ixgb_optics_reset(hw);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	return ctrl_reg;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /******************************************************************************
93*4882a593Smuzhiyun  * Reset the transmit and receive units; mask and clear all interrupts.
94*4882a593Smuzhiyun  *
95*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
96*4882a593Smuzhiyun  *****************************************************************************/
97*4882a593Smuzhiyun bool
ixgb_adapter_stop(struct ixgb_hw * hw)98*4882a593Smuzhiyun ixgb_adapter_stop(struct ixgb_hw *hw)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	u32 ctrl_reg;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	ENTER();
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/* If we are stopped or resetting exit gracefully and wait to be
105*4882a593Smuzhiyun 	 * started again before accessing the hardware.
106*4882a593Smuzhiyun 	 */
107*4882a593Smuzhiyun 	if (hw->adapter_stopped) {
108*4882a593Smuzhiyun 		pr_debug("Exiting because the adapter is already stopped!!!\n");
109*4882a593Smuzhiyun 		return false;
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/* Set the Adapter Stopped flag so other driver functions stop
113*4882a593Smuzhiyun 	 * touching the Hardware.
114*4882a593Smuzhiyun 	 */
115*4882a593Smuzhiyun 	hw->adapter_stopped = true;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	/* Clear interrupt mask to stop board from generating interrupts */
118*4882a593Smuzhiyun 	pr_debug("Masking off all interrupts\n");
119*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* Disable the Transmit and Receive units.  Then delay to allow
122*4882a593Smuzhiyun 	 * any pending transactions to complete before we hit the MAC with
123*4882a593Smuzhiyun 	 * the global reset.
124*4882a593Smuzhiyun 	 */
125*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
126*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
127*4882a593Smuzhiyun 	IXGB_WRITE_FLUSH(hw);
128*4882a593Smuzhiyun 	msleep(IXGB_DELAY_BEFORE_RESET);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/* Issue a global reset to the MAC.  This will reset the chip's
131*4882a593Smuzhiyun 	 * transmit, receive, DMA, and link units.  It will not effect
132*4882a593Smuzhiyun 	 * the current PCI configuration.  The global reset bit is self-
133*4882a593Smuzhiyun 	 * clearing, and should clear within a microsecond.
134*4882a593Smuzhiyun 	 */
135*4882a593Smuzhiyun 	pr_debug("Issuing a global reset to MAC\n");
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	ctrl_reg = ixgb_mac_reset(hw);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/* Clear interrupt mask to stop board from generating interrupts */
140*4882a593Smuzhiyun 	pr_debug("Masking off all interrupts\n");
141*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, IMC, 0xffffffff);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/* Clear any pending interrupt events. */
144*4882a593Smuzhiyun 	IXGB_READ_REG(hw, ICR);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	return ctrl_reg & IXGB_CTRL0_RST;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /******************************************************************************
151*4882a593Smuzhiyun  * Identifies the vendor of the optics module on the adapter.  The SR adapters
152*4882a593Smuzhiyun  * support two different types of XPAK optics, so it is necessary to determine
153*4882a593Smuzhiyun  * which optics are present before applying any optics-specific workarounds.
154*4882a593Smuzhiyun  *
155*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code.
156*4882a593Smuzhiyun  *
157*4882a593Smuzhiyun  * Returns: the vendor of the XPAK optics module.
158*4882a593Smuzhiyun  *****************************************************************************/
159*4882a593Smuzhiyun static ixgb_xpak_vendor
ixgb_identify_xpak_vendor(struct ixgb_hw * hw)160*4882a593Smuzhiyun ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	u32 i;
163*4882a593Smuzhiyun 	u16 vendor_name[5];
164*4882a593Smuzhiyun 	ixgb_xpak_vendor xpak_vendor;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	ENTER();
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* Read the first few bytes of the vendor string from the XPAK NVR
169*4882a593Smuzhiyun 	 * registers.  These are standard XENPAK/XPAK registers, so all XPAK
170*4882a593Smuzhiyun 	 * devices should implement them. */
171*4882a593Smuzhiyun 	for (i = 0; i < 5; i++) {
172*4882a593Smuzhiyun 		vendor_name[i] = ixgb_read_phy_reg(hw,
173*4882a593Smuzhiyun 						   MDIO_PMA_PMD_XPAK_VENDOR_NAME
174*4882a593Smuzhiyun 						   + i, IXGB_PHY_ADDRESS,
175*4882a593Smuzhiyun 						   MDIO_MMD_PMAPMD);
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* Determine the actual vendor */
179*4882a593Smuzhiyun 	if (vendor_name[0] == 'I' &&
180*4882a593Smuzhiyun 	    vendor_name[1] == 'N' &&
181*4882a593Smuzhiyun 	    vendor_name[2] == 'T' &&
182*4882a593Smuzhiyun 	    vendor_name[3] == 'E' && vendor_name[4] == 'L') {
183*4882a593Smuzhiyun 		xpak_vendor = ixgb_xpak_vendor_intel;
184*4882a593Smuzhiyun 	} else {
185*4882a593Smuzhiyun 		xpak_vendor = ixgb_xpak_vendor_infineon;
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	return xpak_vendor;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /******************************************************************************
192*4882a593Smuzhiyun  * Determine the physical layer module on the adapter.
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code.  The device_id
195*4882a593Smuzhiyun  *      field must be (correctly) populated before calling this routine.
196*4882a593Smuzhiyun  *
197*4882a593Smuzhiyun  * Returns: the phy type of the adapter.
198*4882a593Smuzhiyun  *****************************************************************************/
199*4882a593Smuzhiyun static ixgb_phy_type
ixgb_identify_phy(struct ixgb_hw * hw)200*4882a593Smuzhiyun ixgb_identify_phy(struct ixgb_hw *hw)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	ixgb_phy_type phy_type;
203*4882a593Smuzhiyun 	ixgb_xpak_vendor xpak_vendor;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	ENTER();
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* Infer the transceiver/phy type from the device id */
208*4882a593Smuzhiyun 	switch (hw->device_id) {
209*4882a593Smuzhiyun 	case IXGB_DEVICE_ID_82597EX:
210*4882a593Smuzhiyun 		pr_debug("Identified TXN17401 optics\n");
211*4882a593Smuzhiyun 		phy_type = ixgb_phy_type_txn17401;
212*4882a593Smuzhiyun 		break;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	case IXGB_DEVICE_ID_82597EX_SR:
215*4882a593Smuzhiyun 		/* The SR adapters carry two different types of XPAK optics
216*4882a593Smuzhiyun 		 * modules; read the vendor identifier to determine the exact
217*4882a593Smuzhiyun 		 * type of optics. */
218*4882a593Smuzhiyun 		xpak_vendor = ixgb_identify_xpak_vendor(hw);
219*4882a593Smuzhiyun 		if (xpak_vendor == ixgb_xpak_vendor_intel) {
220*4882a593Smuzhiyun 			pr_debug("Identified TXN17201 optics\n");
221*4882a593Smuzhiyun 			phy_type = ixgb_phy_type_txn17201;
222*4882a593Smuzhiyun 		} else {
223*4882a593Smuzhiyun 			pr_debug("Identified G6005 optics\n");
224*4882a593Smuzhiyun 			phy_type = ixgb_phy_type_g6005;
225*4882a593Smuzhiyun 		}
226*4882a593Smuzhiyun 		break;
227*4882a593Smuzhiyun 	case IXGB_DEVICE_ID_82597EX_LR:
228*4882a593Smuzhiyun 		pr_debug("Identified G6104 optics\n");
229*4882a593Smuzhiyun 		phy_type = ixgb_phy_type_g6104;
230*4882a593Smuzhiyun 		break;
231*4882a593Smuzhiyun 	case IXGB_DEVICE_ID_82597EX_CX4:
232*4882a593Smuzhiyun 		pr_debug("Identified CX4\n");
233*4882a593Smuzhiyun 		xpak_vendor = ixgb_identify_xpak_vendor(hw);
234*4882a593Smuzhiyun 		if (xpak_vendor == ixgb_xpak_vendor_intel) {
235*4882a593Smuzhiyun 			pr_debug("Identified TXN17201 optics\n");
236*4882a593Smuzhiyun 			phy_type = ixgb_phy_type_txn17201;
237*4882a593Smuzhiyun 		} else {
238*4882a593Smuzhiyun 			pr_debug("Identified G6005 optics\n");
239*4882a593Smuzhiyun 			phy_type = ixgb_phy_type_g6005;
240*4882a593Smuzhiyun 		}
241*4882a593Smuzhiyun 		break;
242*4882a593Smuzhiyun 	default:
243*4882a593Smuzhiyun 		pr_debug("Unknown physical layer module\n");
244*4882a593Smuzhiyun 		phy_type = ixgb_phy_type_unknown;
245*4882a593Smuzhiyun 		break;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	/* update phy type for sun specific board */
249*4882a593Smuzhiyun 	if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN)
250*4882a593Smuzhiyun 		phy_type = ixgb_phy_type_bcm;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	return phy_type;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /******************************************************************************
256*4882a593Smuzhiyun  * Performs basic configuration of the adapter.
257*4882a593Smuzhiyun  *
258*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
259*4882a593Smuzhiyun  *
260*4882a593Smuzhiyun  * Resets the controller.
261*4882a593Smuzhiyun  * Reads and validates the EEPROM.
262*4882a593Smuzhiyun  * Initializes the receive address registers.
263*4882a593Smuzhiyun  * Initializes the multicast table.
264*4882a593Smuzhiyun  * Clears all on-chip counters.
265*4882a593Smuzhiyun  * Calls routine to setup flow control settings.
266*4882a593Smuzhiyun  * Leaves the transmit and receive units disabled and uninitialized.
267*4882a593Smuzhiyun  *
268*4882a593Smuzhiyun  * Returns:
269*4882a593Smuzhiyun  *      true if successful,
270*4882a593Smuzhiyun  *      false if unrecoverable problems were encountered.
271*4882a593Smuzhiyun  *****************************************************************************/
272*4882a593Smuzhiyun bool
ixgb_init_hw(struct ixgb_hw * hw)273*4882a593Smuzhiyun ixgb_init_hw(struct ixgb_hw *hw)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	u32 i;
276*4882a593Smuzhiyun 	bool status;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	ENTER();
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	/* Issue a global reset to the MAC.  This will reset the chip's
281*4882a593Smuzhiyun 	 * transmit, receive, DMA, and link units.  It will not effect
282*4882a593Smuzhiyun 	 * the current PCI configuration.  The global reset bit is self-
283*4882a593Smuzhiyun 	 * clearing, and should clear within a microsecond.
284*4882a593Smuzhiyun 	 */
285*4882a593Smuzhiyun 	pr_debug("Issuing a global reset to MAC\n");
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	ixgb_mac_reset(hw);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	pr_debug("Issuing an EE reset to MAC\n");
290*4882a593Smuzhiyun #ifdef HP_ZX1
291*4882a593Smuzhiyun 	/* Workaround for 82597EX reset errata */
292*4882a593Smuzhiyun 	IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
293*4882a593Smuzhiyun #else
294*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST);
295*4882a593Smuzhiyun #endif
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* Delay a few ms just to allow the reset to complete */
298*4882a593Smuzhiyun 	msleep(IXGB_DELAY_AFTER_EE_RESET);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	if (!ixgb_get_eeprom_data(hw))
301*4882a593Smuzhiyun 		return false;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	/* Use the device id to determine the type of phy/transceiver. */
304*4882a593Smuzhiyun 	hw->device_id = ixgb_get_ee_device_id(hw);
305*4882a593Smuzhiyun 	hw->phy_type = ixgb_identify_phy(hw);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* Setup the receive addresses.
308*4882a593Smuzhiyun 	 * Receive Address Registers (RARs 0 - 15).
309*4882a593Smuzhiyun 	 */
310*4882a593Smuzhiyun 	ixgb_init_rx_addrs(hw);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	/*
313*4882a593Smuzhiyun 	 * Check that a valid MAC address has been set.
314*4882a593Smuzhiyun 	 * If it is not valid, we fail hardware init.
315*4882a593Smuzhiyun 	 */
316*4882a593Smuzhiyun 	if (!mac_addr_valid(hw->curr_mac_addr)) {
317*4882a593Smuzhiyun 		pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
318*4882a593Smuzhiyun 		return(false);
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/* tell the routines in this file they can access hardware again */
322*4882a593Smuzhiyun 	hw->adapter_stopped = false;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	/* Fill in the bus_info structure */
325*4882a593Smuzhiyun 	ixgb_get_bus_info(hw);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	/* Zero out the Multicast HASH table */
328*4882a593Smuzhiyun 	pr_debug("Zeroing the MTA\n");
329*4882a593Smuzhiyun 	for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
330*4882a593Smuzhiyun 		IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/* Zero out the VLAN Filter Table Array */
333*4882a593Smuzhiyun 	ixgb_clear_vfta(hw);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/* Zero all of the hardware counters */
336*4882a593Smuzhiyun 	ixgb_clear_hw_cntrs(hw);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* Call a subroutine to setup flow control. */
339*4882a593Smuzhiyun 	status = ixgb_setup_fc(hw);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/* 82597EX errata: Call check-for-link in case lane deskew is locked */
342*4882a593Smuzhiyun 	ixgb_check_for_link(hw);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	return status;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun /******************************************************************************
348*4882a593Smuzhiyun  * Initializes receive address filters.
349*4882a593Smuzhiyun  *
350*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
351*4882a593Smuzhiyun  *
352*4882a593Smuzhiyun  * Places the MAC address in receive address register 0 and clears the rest
353*4882a593Smuzhiyun  * of the receive address registers. Clears the multicast table. Assumes
354*4882a593Smuzhiyun  * the receiver is in reset when the routine is called.
355*4882a593Smuzhiyun  *****************************************************************************/
356*4882a593Smuzhiyun static void
ixgb_init_rx_addrs(struct ixgb_hw * hw)357*4882a593Smuzhiyun ixgb_init_rx_addrs(struct ixgb_hw *hw)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	u32 i;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	ENTER();
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	/*
364*4882a593Smuzhiyun 	 * If the current mac address is valid, assume it is a software override
365*4882a593Smuzhiyun 	 * to the permanent address.
366*4882a593Smuzhiyun 	 * Otherwise, use the permanent address from the eeprom.
367*4882a593Smuzhiyun 	 */
368*4882a593Smuzhiyun 	if (!mac_addr_valid(hw->curr_mac_addr)) {
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 		/* Get the MAC address from the eeprom for later reference */
371*4882a593Smuzhiyun 		ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 		pr_debug("Keeping Permanent MAC Addr = %pM\n",
374*4882a593Smuzhiyun 			 hw->curr_mac_addr);
375*4882a593Smuzhiyun 	} else {
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		/* Setup the receive address. */
378*4882a593Smuzhiyun 		pr_debug("Overriding MAC Address in RAR[0]\n");
379*4882a593Smuzhiyun 		pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 		ixgb_rar_set(hw, hw->curr_mac_addr, 0);
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	/* Zero out the other 15 receive addresses. */
385*4882a593Smuzhiyun 	pr_debug("Clearing RAR[1-15]\n");
386*4882a593Smuzhiyun 	for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
387*4882a593Smuzhiyun 		/* Write high reg first to disable the AV bit first */
388*4882a593Smuzhiyun 		IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
389*4882a593Smuzhiyun 		IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun /******************************************************************************
394*4882a593Smuzhiyun  * Updates the MAC's list of multicast addresses.
395*4882a593Smuzhiyun  *
396*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
397*4882a593Smuzhiyun  * mc_addr_list - the list of new multicast addresses
398*4882a593Smuzhiyun  * mc_addr_count - number of addresses
399*4882a593Smuzhiyun  * pad - number of bytes between addresses in the list
400*4882a593Smuzhiyun  *
401*4882a593Smuzhiyun  * The given list replaces any existing list. Clears the last 15 receive
402*4882a593Smuzhiyun  * address registers and the multicast table. Uses receive address registers
403*4882a593Smuzhiyun  * for the first 15 multicast addresses, and hashes the rest into the
404*4882a593Smuzhiyun  * multicast table.
405*4882a593Smuzhiyun  *****************************************************************************/
406*4882a593Smuzhiyun void
ixgb_mc_addr_list_update(struct ixgb_hw * hw,u8 * mc_addr_list,u32 mc_addr_count,u32 pad)407*4882a593Smuzhiyun ixgb_mc_addr_list_update(struct ixgb_hw *hw,
408*4882a593Smuzhiyun 			  u8 *mc_addr_list,
409*4882a593Smuzhiyun 			  u32 mc_addr_count,
410*4882a593Smuzhiyun 			  u32 pad)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	u32 hash_value;
413*4882a593Smuzhiyun 	u32 i;
414*4882a593Smuzhiyun 	u32 rar_used_count = 1;		/* RAR[0] is used for our MAC address */
415*4882a593Smuzhiyun 	u8 *mca;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	ENTER();
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	/* Set the new number of MC addresses that we are being requested to use. */
420*4882a593Smuzhiyun 	hw->num_mc_addrs = mc_addr_count;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	/* Clear RAR[1-15] */
423*4882a593Smuzhiyun 	pr_debug("Clearing RAR[1-15]\n");
424*4882a593Smuzhiyun 	for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
425*4882a593Smuzhiyun 		IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
426*4882a593Smuzhiyun 		IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	/* Clear the MTA */
430*4882a593Smuzhiyun 	pr_debug("Clearing MTA\n");
431*4882a593Smuzhiyun 	for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
432*4882a593Smuzhiyun 		IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	/* Add the new addresses */
435*4882a593Smuzhiyun 	mca = mc_addr_list;
436*4882a593Smuzhiyun 	for (i = 0; i < mc_addr_count; i++) {
437*4882a593Smuzhiyun 		pr_debug("Adding the multicast addresses:\n");
438*4882a593Smuzhiyun 		pr_debug("MC Addr #%d = %pM\n", i, mca);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		/* Place this multicast address in the RAR if there is room, *
441*4882a593Smuzhiyun 		 * else put it in the MTA
442*4882a593Smuzhiyun 		 */
443*4882a593Smuzhiyun 		if (rar_used_count < IXGB_RAR_ENTRIES) {
444*4882a593Smuzhiyun 			ixgb_rar_set(hw, mca, rar_used_count);
445*4882a593Smuzhiyun 			pr_debug("Added a multicast address to RAR[%d]\n", i);
446*4882a593Smuzhiyun 			rar_used_count++;
447*4882a593Smuzhiyun 		} else {
448*4882a593Smuzhiyun 			hash_value = ixgb_hash_mc_addr(hw, mca);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 			pr_debug("Hash value = 0x%03X\n", hash_value);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 			ixgb_mta_set(hw, hash_value);
453*4882a593Smuzhiyun 		}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 		mca += ETH_ALEN + pad;
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	pr_debug("MC Update Complete\n");
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun /******************************************************************************
462*4882a593Smuzhiyun  * Hashes an address to determine its location in the multicast table
463*4882a593Smuzhiyun  *
464*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
465*4882a593Smuzhiyun  * mc_addr - the multicast address to hash
466*4882a593Smuzhiyun  *
467*4882a593Smuzhiyun  * Returns:
468*4882a593Smuzhiyun  *      The hash value
469*4882a593Smuzhiyun  *****************************************************************************/
470*4882a593Smuzhiyun static u32
ixgb_hash_mc_addr(struct ixgb_hw * hw,u8 * mc_addr)471*4882a593Smuzhiyun ixgb_hash_mc_addr(struct ixgb_hw *hw,
472*4882a593Smuzhiyun 		   u8 *mc_addr)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	u32 hash_value = 0;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	ENTER();
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	/* The portion of the address that is used for the hash table is
479*4882a593Smuzhiyun 	 * determined by the mc_filter_type setting.
480*4882a593Smuzhiyun 	 */
481*4882a593Smuzhiyun 	switch (hw->mc_filter_type) {
482*4882a593Smuzhiyun 		/* [0] [1] [2] [3] [4] [5]
483*4882a593Smuzhiyun 		 * 01  AA  00  12  34  56
484*4882a593Smuzhiyun 		 * LSB                 MSB - According to H/W docs */
485*4882a593Smuzhiyun 	case 0:
486*4882a593Smuzhiyun 		/* [47:36] i.e. 0x563 for above example address */
487*4882a593Smuzhiyun 		hash_value =
488*4882a593Smuzhiyun 		    ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
489*4882a593Smuzhiyun 		break;
490*4882a593Smuzhiyun 	case 1:		/* [46:35] i.e. 0xAC6 for above example address */
491*4882a593Smuzhiyun 		hash_value =
492*4882a593Smuzhiyun 		    ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
493*4882a593Smuzhiyun 		break;
494*4882a593Smuzhiyun 	case 2:		/* [45:34] i.e. 0x5D8 for above example address */
495*4882a593Smuzhiyun 		hash_value =
496*4882a593Smuzhiyun 		    ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
497*4882a593Smuzhiyun 		break;
498*4882a593Smuzhiyun 	case 3:		/* [43:32] i.e. 0x634 for above example address */
499*4882a593Smuzhiyun 		hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
500*4882a593Smuzhiyun 		break;
501*4882a593Smuzhiyun 	default:
502*4882a593Smuzhiyun 		/* Invalid mc_filter_type, what should we do? */
503*4882a593Smuzhiyun 		pr_debug("MC filter type param set incorrectly\n");
504*4882a593Smuzhiyun 		ASSERT(0);
505*4882a593Smuzhiyun 		break;
506*4882a593Smuzhiyun 	}
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	hash_value &= 0xFFF;
509*4882a593Smuzhiyun 	return hash_value;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun /******************************************************************************
513*4882a593Smuzhiyun  * Sets the bit in the multicast table corresponding to the hash value.
514*4882a593Smuzhiyun  *
515*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
516*4882a593Smuzhiyun  * hash_value - Multicast address hash value
517*4882a593Smuzhiyun  *****************************************************************************/
518*4882a593Smuzhiyun static void
ixgb_mta_set(struct ixgb_hw * hw,u32 hash_value)519*4882a593Smuzhiyun ixgb_mta_set(struct ixgb_hw *hw,
520*4882a593Smuzhiyun 		  u32 hash_value)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun 	u32 hash_bit, hash_reg;
523*4882a593Smuzhiyun 	u32 mta_reg;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	/* The MTA is a register array of 128 32-bit registers.
526*4882a593Smuzhiyun 	 * It is treated like an array of 4096 bits.  We want to set
527*4882a593Smuzhiyun 	 * bit BitArray[hash_value]. So we figure out what register
528*4882a593Smuzhiyun 	 * the bit is in, read it, OR in the new bit, then write
529*4882a593Smuzhiyun 	 * back the new value.  The register is determined by the
530*4882a593Smuzhiyun 	 * upper 7 bits of the hash value and the bit within that
531*4882a593Smuzhiyun 	 * register are determined by the lower 5 bits of the value.
532*4882a593Smuzhiyun 	 */
533*4882a593Smuzhiyun 	hash_reg = (hash_value >> 5) & 0x7F;
534*4882a593Smuzhiyun 	hash_bit = hash_value & 0x1F;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	mta_reg |= (1 << hash_bit);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun /******************************************************************************
544*4882a593Smuzhiyun  * Puts an ethernet address into a receive address register.
545*4882a593Smuzhiyun  *
546*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
547*4882a593Smuzhiyun  * addr - Address to put into receive address register
548*4882a593Smuzhiyun  * index - Receive address register to write
549*4882a593Smuzhiyun  *****************************************************************************/
550*4882a593Smuzhiyun void
ixgb_rar_set(struct ixgb_hw * hw,u8 * addr,u32 index)551*4882a593Smuzhiyun ixgb_rar_set(struct ixgb_hw *hw,
552*4882a593Smuzhiyun 		  u8 *addr,
553*4882a593Smuzhiyun 		  u32 index)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	u32 rar_low, rar_high;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	ENTER();
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	/* HW expects these in little endian so we reverse the byte order
560*4882a593Smuzhiyun 	 * from network order (big endian) to little endian
561*4882a593Smuzhiyun 	 */
562*4882a593Smuzhiyun 	rar_low = ((u32) addr[0] |
563*4882a593Smuzhiyun 		   ((u32)addr[1] << 8) |
564*4882a593Smuzhiyun 		   ((u32)addr[2] << 16) |
565*4882a593Smuzhiyun 		   ((u32)addr[3] << 24));
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	rar_high = ((u32) addr[4] |
568*4882a593Smuzhiyun 			((u32)addr[5] << 8) |
569*4882a593Smuzhiyun 			IXGB_RAH_AV);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
572*4882a593Smuzhiyun 	IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun /******************************************************************************
576*4882a593Smuzhiyun  * Writes a value to the specified offset in the VLAN filter table.
577*4882a593Smuzhiyun  *
578*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
579*4882a593Smuzhiyun  * offset - Offset in VLAN filer table to write
580*4882a593Smuzhiyun  * value - Value to write into VLAN filter table
581*4882a593Smuzhiyun  *****************************************************************************/
582*4882a593Smuzhiyun void
ixgb_write_vfta(struct ixgb_hw * hw,u32 offset,u32 value)583*4882a593Smuzhiyun ixgb_write_vfta(struct ixgb_hw *hw,
584*4882a593Smuzhiyun 		 u32 offset,
585*4882a593Smuzhiyun 		 u32 value)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun /******************************************************************************
591*4882a593Smuzhiyun  * Clears the VLAN filer table
592*4882a593Smuzhiyun  *
593*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
594*4882a593Smuzhiyun  *****************************************************************************/
595*4882a593Smuzhiyun static void
ixgb_clear_vfta(struct ixgb_hw * hw)596*4882a593Smuzhiyun ixgb_clear_vfta(struct ixgb_hw *hw)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun 	u32 offset;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
601*4882a593Smuzhiyun 		IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun /******************************************************************************
605*4882a593Smuzhiyun  * Configures the flow control settings based on SW configuration.
606*4882a593Smuzhiyun  *
607*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
608*4882a593Smuzhiyun  *****************************************************************************/
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun static bool
ixgb_setup_fc(struct ixgb_hw * hw)611*4882a593Smuzhiyun ixgb_setup_fc(struct ixgb_hw *hw)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	u32 ctrl_reg;
614*4882a593Smuzhiyun 	u32 pap_reg = 0;   /* by default, assume no pause time */
615*4882a593Smuzhiyun 	bool status = true;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	ENTER();
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	/* Get the current control reg 0 settings */
620*4882a593Smuzhiyun 	ctrl_reg = IXGB_READ_REG(hw, CTRL0);
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	/* Clear the Receive Pause Enable and Transmit Pause Enable bits */
623*4882a593Smuzhiyun 	ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/* The possible values of the "flow_control" parameter are:
626*4882a593Smuzhiyun 	 *      0:  Flow control is completely disabled
627*4882a593Smuzhiyun 	 *      1:  Rx flow control is enabled (we can receive pause frames
628*4882a593Smuzhiyun 	 *          but not send pause frames).
629*4882a593Smuzhiyun 	 *      2:  Tx flow control is enabled (we can send pause frames
630*4882a593Smuzhiyun 	 *          but we do not support receiving pause frames).
631*4882a593Smuzhiyun 	 *      3:  Both Rx and TX flow control (symmetric) are enabled.
632*4882a593Smuzhiyun 	 *  other:  Invalid.
633*4882a593Smuzhiyun 	 */
634*4882a593Smuzhiyun 	switch (hw->fc.type) {
635*4882a593Smuzhiyun 	case ixgb_fc_none:	/* 0 */
636*4882a593Smuzhiyun 		/* Set CMDC bit to disable Rx Flow control */
637*4882a593Smuzhiyun 		ctrl_reg |= (IXGB_CTRL0_CMDC);
638*4882a593Smuzhiyun 		break;
639*4882a593Smuzhiyun 	case ixgb_fc_rx_pause:	/* 1 */
640*4882a593Smuzhiyun 		/* RX Flow control is enabled, and TX Flow control is
641*4882a593Smuzhiyun 		 * disabled.
642*4882a593Smuzhiyun 		 */
643*4882a593Smuzhiyun 		ctrl_reg |= (IXGB_CTRL0_RPE);
644*4882a593Smuzhiyun 		break;
645*4882a593Smuzhiyun 	case ixgb_fc_tx_pause:	/* 2 */
646*4882a593Smuzhiyun 		/* TX Flow control is enabled, and RX Flow control is
647*4882a593Smuzhiyun 		 * disabled, by a software over-ride.
648*4882a593Smuzhiyun 		 */
649*4882a593Smuzhiyun 		ctrl_reg |= (IXGB_CTRL0_TPE);
650*4882a593Smuzhiyun 		pap_reg = hw->fc.pause_time;
651*4882a593Smuzhiyun 		break;
652*4882a593Smuzhiyun 	case ixgb_fc_full:	/* 3 */
653*4882a593Smuzhiyun 		/* Flow control (both RX and TX) is enabled by a software
654*4882a593Smuzhiyun 		 * over-ride.
655*4882a593Smuzhiyun 		 */
656*4882a593Smuzhiyun 		ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
657*4882a593Smuzhiyun 		pap_reg = hw->fc.pause_time;
658*4882a593Smuzhiyun 		break;
659*4882a593Smuzhiyun 	default:
660*4882a593Smuzhiyun 		/* We should never get here.  The value should be 0-3. */
661*4882a593Smuzhiyun 		pr_debug("Flow control param set incorrectly\n");
662*4882a593Smuzhiyun 		ASSERT(0);
663*4882a593Smuzhiyun 		break;
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	/* Write the new settings */
667*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	if (pap_reg != 0)
670*4882a593Smuzhiyun 		IXGB_WRITE_REG(hw, PAP, pap_reg);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	/* Set the flow control receive threshold registers.  Normally,
673*4882a593Smuzhiyun 	 * these registers will be set to a default threshold that may be
674*4882a593Smuzhiyun 	 * adjusted later by the driver's runtime code.  However, if the
675*4882a593Smuzhiyun 	 * ability to transmit pause frames in not enabled, then these
676*4882a593Smuzhiyun 	 * registers will be set to 0.
677*4882a593Smuzhiyun 	 */
678*4882a593Smuzhiyun 	if (!(hw->fc.type & ixgb_fc_tx_pause)) {
679*4882a593Smuzhiyun 		IXGB_WRITE_REG(hw, FCRTL, 0);
680*4882a593Smuzhiyun 		IXGB_WRITE_REG(hw, FCRTH, 0);
681*4882a593Smuzhiyun 	} else {
682*4882a593Smuzhiyun 	   /* We need to set up the Receive Threshold high and low water
683*4882a593Smuzhiyun 	    * marks as well as (optionally) enabling the transmission of XON
684*4882a593Smuzhiyun 	    * frames. */
685*4882a593Smuzhiyun 		if (hw->fc.send_xon) {
686*4882a593Smuzhiyun 			IXGB_WRITE_REG(hw, FCRTL,
687*4882a593Smuzhiyun 				(hw->fc.low_water | IXGB_FCRTL_XONE));
688*4882a593Smuzhiyun 		} else {
689*4882a593Smuzhiyun 			IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water);
690*4882a593Smuzhiyun 		}
691*4882a593Smuzhiyun 		IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
692*4882a593Smuzhiyun 	}
693*4882a593Smuzhiyun 	return status;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun /******************************************************************************
697*4882a593Smuzhiyun  * Reads a word from a device over the Management Data Interface (MDI) bus.
698*4882a593Smuzhiyun  * This interface is used to manage Physical layer devices.
699*4882a593Smuzhiyun  *
700*4882a593Smuzhiyun  * hw          - Struct containing variables accessed by hw code
701*4882a593Smuzhiyun  * reg_address - Offset of device register being read.
702*4882a593Smuzhiyun  * phy_address - Address of device on MDI.
703*4882a593Smuzhiyun  *
704*4882a593Smuzhiyun  * Returns:  Data word (16 bits) from MDI device.
705*4882a593Smuzhiyun  *
706*4882a593Smuzhiyun  * The 82597EX has support for several MDI access methods.  This routine
707*4882a593Smuzhiyun  * uses the new protocol MDI Single Command and Address Operation.
708*4882a593Smuzhiyun  * This requires that first an address cycle command is sent, followed by a
709*4882a593Smuzhiyun  * read command.
710*4882a593Smuzhiyun  *****************************************************************************/
711*4882a593Smuzhiyun static u16
ixgb_read_phy_reg(struct ixgb_hw * hw,u32 reg_address,u32 phy_address,u32 device_type)712*4882a593Smuzhiyun ixgb_read_phy_reg(struct ixgb_hw *hw,
713*4882a593Smuzhiyun 		u32 reg_address,
714*4882a593Smuzhiyun 		u32 phy_address,
715*4882a593Smuzhiyun 		u32 device_type)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	u32 i;
718*4882a593Smuzhiyun 	u32 data;
719*4882a593Smuzhiyun 	u32 command = 0;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
722*4882a593Smuzhiyun 	ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
723*4882a593Smuzhiyun 	ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	/* Setup and write the address cycle command */
726*4882a593Smuzhiyun 	command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
727*4882a593Smuzhiyun 		   (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
728*4882a593Smuzhiyun 		   (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
729*4882a593Smuzhiyun 		   (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, MSCA, command);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun     /**************************************************************
734*4882a593Smuzhiyun     ** Check every 10 usec to see if the address cycle completed
735*4882a593Smuzhiyun     ** The COMMAND bit will clear when the operation is complete.
736*4882a593Smuzhiyun     ** This may take as long as 64 usecs (we'll wait 100 usecs max)
737*4882a593Smuzhiyun     ** from the CPU Write to the Ready bit assertion.
738*4882a593Smuzhiyun     **************************************************************/
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	for (i = 0; i < 10; i++)
741*4882a593Smuzhiyun 	{
742*4882a593Smuzhiyun 		udelay(10);
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 		command = IXGB_READ_REG(hw, MSCA);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 		if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
747*4882a593Smuzhiyun 			break;
748*4882a593Smuzhiyun 	}
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	/* Address cycle complete, setup and write the read command */
753*4882a593Smuzhiyun 	command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
754*4882a593Smuzhiyun 		   (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
755*4882a593Smuzhiyun 		   (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
756*4882a593Smuzhiyun 		   (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND));
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, MSCA, command);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun     /**************************************************************
761*4882a593Smuzhiyun     ** Check every 10 usec to see if the read command completed
762*4882a593Smuzhiyun     ** The COMMAND bit will clear when the operation is complete.
763*4882a593Smuzhiyun     ** The read may take as long as 64 usecs (we'll wait 100 usecs max)
764*4882a593Smuzhiyun     ** from the CPU Write to the Ready bit assertion.
765*4882a593Smuzhiyun     **************************************************************/
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	for (i = 0; i < 10; i++)
768*4882a593Smuzhiyun 	{
769*4882a593Smuzhiyun 		udelay(10);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 		command = IXGB_READ_REG(hw, MSCA);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 		if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
774*4882a593Smuzhiyun 			break;
775*4882a593Smuzhiyun 	}
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	/* Operation is complete, get the data from the MDIO Read/Write Data
780*4882a593Smuzhiyun 	 * register and return.
781*4882a593Smuzhiyun 	 */
782*4882a593Smuzhiyun 	data = IXGB_READ_REG(hw, MSRWD);
783*4882a593Smuzhiyun 	data >>= IXGB_MSRWD_READ_DATA_SHIFT;
784*4882a593Smuzhiyun 	return((u16) data);
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun /******************************************************************************
788*4882a593Smuzhiyun  * Writes a word to a device over the Management Data Interface (MDI) bus.
789*4882a593Smuzhiyun  * This interface is used to manage Physical layer devices.
790*4882a593Smuzhiyun  *
791*4882a593Smuzhiyun  * hw          - Struct containing variables accessed by hw code
792*4882a593Smuzhiyun  * reg_address - Offset of device register being read.
793*4882a593Smuzhiyun  * phy_address - Address of device on MDI.
794*4882a593Smuzhiyun  * device_type - Also known as the Device ID or DID.
795*4882a593Smuzhiyun  * data        - 16-bit value to be written
796*4882a593Smuzhiyun  *
797*4882a593Smuzhiyun  * Returns:  void.
798*4882a593Smuzhiyun  *
799*4882a593Smuzhiyun  * The 82597EX has support for several MDI access methods.  This routine
800*4882a593Smuzhiyun  * uses the new protocol MDI Single Command and Address Operation.
801*4882a593Smuzhiyun  * This requires that first an address cycle command is sent, followed by a
802*4882a593Smuzhiyun  * write command.
803*4882a593Smuzhiyun  *****************************************************************************/
804*4882a593Smuzhiyun static void
ixgb_write_phy_reg(struct ixgb_hw * hw,u32 reg_address,u32 phy_address,u32 device_type,u16 data)805*4882a593Smuzhiyun ixgb_write_phy_reg(struct ixgb_hw *hw,
806*4882a593Smuzhiyun 			u32 reg_address,
807*4882a593Smuzhiyun 			u32 phy_address,
808*4882a593Smuzhiyun 			u32 device_type,
809*4882a593Smuzhiyun 			u16 data)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun 	u32 i;
812*4882a593Smuzhiyun 	u32 command = 0;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
815*4882a593Smuzhiyun 	ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
816*4882a593Smuzhiyun 	ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	/* Put the data in the MDIO Read/Write Data register */
819*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, MSRWD, (u32)data);
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	/* Setup and write the address cycle command */
822*4882a593Smuzhiyun 	command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT)  |
823*4882a593Smuzhiyun 			   (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
824*4882a593Smuzhiyun 			   (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
825*4882a593Smuzhiyun 			   (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, MSCA, command);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	/**************************************************************
830*4882a593Smuzhiyun 	** Check every 10 usec to see if the address cycle completed
831*4882a593Smuzhiyun 	** The COMMAND bit will clear when the operation is complete.
832*4882a593Smuzhiyun 	** This may take as long as 64 usecs (we'll wait 100 usecs max)
833*4882a593Smuzhiyun 	** from the CPU Write to the Ready bit assertion.
834*4882a593Smuzhiyun 	**************************************************************/
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	for (i = 0; i < 10; i++)
837*4882a593Smuzhiyun 	{
838*4882a593Smuzhiyun 		udelay(10);
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 		command = IXGB_READ_REG(hw, MSCA);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 		if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
843*4882a593Smuzhiyun 			break;
844*4882a593Smuzhiyun 	}
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	/* Address cycle complete, setup and write the write command */
849*4882a593Smuzhiyun 	command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT)  |
850*4882a593Smuzhiyun 			   (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
851*4882a593Smuzhiyun 			   (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
852*4882a593Smuzhiyun 			   (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, MSCA, command);
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	/**************************************************************
857*4882a593Smuzhiyun 	** Check every 10 usec to see if the read command completed
858*4882a593Smuzhiyun 	** The COMMAND bit will clear when the operation is complete.
859*4882a593Smuzhiyun 	** The write may take as long as 64 usecs (we'll wait 100 usecs max)
860*4882a593Smuzhiyun 	** from the CPU Write to the Ready bit assertion.
861*4882a593Smuzhiyun 	**************************************************************/
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	for (i = 0; i < 10; i++)
864*4882a593Smuzhiyun 	{
865*4882a593Smuzhiyun 		udelay(10);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 		command = IXGB_READ_REG(hw, MSCA);
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 		if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
870*4882a593Smuzhiyun 			break;
871*4882a593Smuzhiyun 	}
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	/* Operation is complete, return. */
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun /******************************************************************************
879*4882a593Smuzhiyun  * Checks to see if the link status of the hardware has changed.
880*4882a593Smuzhiyun  *
881*4882a593Smuzhiyun  * hw - Struct containing variables accessed by hw code
882*4882a593Smuzhiyun  *
883*4882a593Smuzhiyun  * Called by any function that needs to check the link status of the adapter.
884*4882a593Smuzhiyun  *****************************************************************************/
885*4882a593Smuzhiyun void
ixgb_check_for_link(struct ixgb_hw * hw)886*4882a593Smuzhiyun ixgb_check_for_link(struct ixgb_hw *hw)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun 	u32 status_reg;
889*4882a593Smuzhiyun 	u32 xpcss_reg;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	ENTER();
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	xpcss_reg = IXGB_READ_REG(hw, XPCSS);
894*4882a593Smuzhiyun 	status_reg = IXGB_READ_REG(hw, STATUS);
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
897*4882a593Smuzhiyun 	    (status_reg & IXGB_STATUS_LU)) {
898*4882a593Smuzhiyun 		hw->link_up = true;
899*4882a593Smuzhiyun 	} else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
900*4882a593Smuzhiyun 		   (status_reg & IXGB_STATUS_LU)) {
901*4882a593Smuzhiyun 		pr_debug("XPCSS Not Aligned while Status:LU is set\n");
902*4882a593Smuzhiyun 		hw->link_up = ixgb_link_reset(hw);
903*4882a593Smuzhiyun 	} else {
904*4882a593Smuzhiyun 		/*
905*4882a593Smuzhiyun 		 * 82597EX errata.  Since the lane deskew problem may prevent
906*4882a593Smuzhiyun 		 * link, reset the link before reporting link down.
907*4882a593Smuzhiyun 		 */
908*4882a593Smuzhiyun 		hw->link_up = ixgb_link_reset(hw);
909*4882a593Smuzhiyun 	}
910*4882a593Smuzhiyun 	/*  Anything else for 10 Gig?? */
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun /******************************************************************************
914*4882a593Smuzhiyun  * Check for a bad link condition that may have occurred.
915*4882a593Smuzhiyun  * The indication is that the RFC / LFC registers may be incrementing
916*4882a593Smuzhiyun  * continually.  A full adapter reset is required to recover.
917*4882a593Smuzhiyun  *
918*4882a593Smuzhiyun  * hw - Struct containing variables accessed by hw code
919*4882a593Smuzhiyun  *
920*4882a593Smuzhiyun  * Called by any function that needs to check the link status of the adapter.
921*4882a593Smuzhiyun  *****************************************************************************/
ixgb_check_for_bad_link(struct ixgb_hw * hw)922*4882a593Smuzhiyun bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun 	u32 newLFC, newRFC;
925*4882a593Smuzhiyun 	bool bad_link_returncode = false;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	if (hw->phy_type == ixgb_phy_type_txn17401) {
928*4882a593Smuzhiyun 		newLFC = IXGB_READ_REG(hw, LFC);
929*4882a593Smuzhiyun 		newRFC = IXGB_READ_REG(hw, RFC);
930*4882a593Smuzhiyun 		if ((hw->lastLFC + 250 < newLFC)
931*4882a593Smuzhiyun 		    || (hw->lastRFC + 250 < newRFC)) {
932*4882a593Smuzhiyun 			pr_debug("BAD LINK! too many LFC/RFC since last check\n");
933*4882a593Smuzhiyun 			bad_link_returncode = true;
934*4882a593Smuzhiyun 		}
935*4882a593Smuzhiyun 		hw->lastLFC = newLFC;
936*4882a593Smuzhiyun 		hw->lastRFC = newRFC;
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	return bad_link_returncode;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun /******************************************************************************
943*4882a593Smuzhiyun  * Clears all hardware statistics counters.
944*4882a593Smuzhiyun  *
945*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
946*4882a593Smuzhiyun  *****************************************************************************/
947*4882a593Smuzhiyun static void
ixgb_clear_hw_cntrs(struct ixgb_hw * hw)948*4882a593Smuzhiyun ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	ENTER();
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	/* if we are stopped or resetting exit gracefully */
953*4882a593Smuzhiyun 	if (hw->adapter_stopped) {
954*4882a593Smuzhiyun 		pr_debug("Exiting because the adapter is stopped!!!\n");
955*4882a593Smuzhiyun 		return;
956*4882a593Smuzhiyun 	}
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	IXGB_READ_REG(hw, TPRL);
959*4882a593Smuzhiyun 	IXGB_READ_REG(hw, TPRH);
960*4882a593Smuzhiyun 	IXGB_READ_REG(hw, GPRCL);
961*4882a593Smuzhiyun 	IXGB_READ_REG(hw, GPRCH);
962*4882a593Smuzhiyun 	IXGB_READ_REG(hw, BPRCL);
963*4882a593Smuzhiyun 	IXGB_READ_REG(hw, BPRCH);
964*4882a593Smuzhiyun 	IXGB_READ_REG(hw, MPRCL);
965*4882a593Smuzhiyun 	IXGB_READ_REG(hw, MPRCH);
966*4882a593Smuzhiyun 	IXGB_READ_REG(hw, UPRCL);
967*4882a593Smuzhiyun 	IXGB_READ_REG(hw, UPRCH);
968*4882a593Smuzhiyun 	IXGB_READ_REG(hw, VPRCL);
969*4882a593Smuzhiyun 	IXGB_READ_REG(hw, VPRCH);
970*4882a593Smuzhiyun 	IXGB_READ_REG(hw, JPRCL);
971*4882a593Smuzhiyun 	IXGB_READ_REG(hw, JPRCH);
972*4882a593Smuzhiyun 	IXGB_READ_REG(hw, GORCL);
973*4882a593Smuzhiyun 	IXGB_READ_REG(hw, GORCH);
974*4882a593Smuzhiyun 	IXGB_READ_REG(hw, TORL);
975*4882a593Smuzhiyun 	IXGB_READ_REG(hw, TORH);
976*4882a593Smuzhiyun 	IXGB_READ_REG(hw, RNBC);
977*4882a593Smuzhiyun 	IXGB_READ_REG(hw, RUC);
978*4882a593Smuzhiyun 	IXGB_READ_REG(hw, ROC);
979*4882a593Smuzhiyun 	IXGB_READ_REG(hw, RLEC);
980*4882a593Smuzhiyun 	IXGB_READ_REG(hw, CRCERRS);
981*4882a593Smuzhiyun 	IXGB_READ_REG(hw, ICBC);
982*4882a593Smuzhiyun 	IXGB_READ_REG(hw, ECBC);
983*4882a593Smuzhiyun 	IXGB_READ_REG(hw, MPC);
984*4882a593Smuzhiyun 	IXGB_READ_REG(hw, TPTL);
985*4882a593Smuzhiyun 	IXGB_READ_REG(hw, TPTH);
986*4882a593Smuzhiyun 	IXGB_READ_REG(hw, GPTCL);
987*4882a593Smuzhiyun 	IXGB_READ_REG(hw, GPTCH);
988*4882a593Smuzhiyun 	IXGB_READ_REG(hw, BPTCL);
989*4882a593Smuzhiyun 	IXGB_READ_REG(hw, BPTCH);
990*4882a593Smuzhiyun 	IXGB_READ_REG(hw, MPTCL);
991*4882a593Smuzhiyun 	IXGB_READ_REG(hw, MPTCH);
992*4882a593Smuzhiyun 	IXGB_READ_REG(hw, UPTCL);
993*4882a593Smuzhiyun 	IXGB_READ_REG(hw, UPTCH);
994*4882a593Smuzhiyun 	IXGB_READ_REG(hw, VPTCL);
995*4882a593Smuzhiyun 	IXGB_READ_REG(hw, VPTCH);
996*4882a593Smuzhiyun 	IXGB_READ_REG(hw, JPTCL);
997*4882a593Smuzhiyun 	IXGB_READ_REG(hw, JPTCH);
998*4882a593Smuzhiyun 	IXGB_READ_REG(hw, GOTCL);
999*4882a593Smuzhiyun 	IXGB_READ_REG(hw, GOTCH);
1000*4882a593Smuzhiyun 	IXGB_READ_REG(hw, TOTL);
1001*4882a593Smuzhiyun 	IXGB_READ_REG(hw, TOTH);
1002*4882a593Smuzhiyun 	IXGB_READ_REG(hw, DC);
1003*4882a593Smuzhiyun 	IXGB_READ_REG(hw, PLT64C);
1004*4882a593Smuzhiyun 	IXGB_READ_REG(hw, TSCTC);
1005*4882a593Smuzhiyun 	IXGB_READ_REG(hw, TSCTFC);
1006*4882a593Smuzhiyun 	IXGB_READ_REG(hw, IBIC);
1007*4882a593Smuzhiyun 	IXGB_READ_REG(hw, RFC);
1008*4882a593Smuzhiyun 	IXGB_READ_REG(hw, LFC);
1009*4882a593Smuzhiyun 	IXGB_READ_REG(hw, PFRC);
1010*4882a593Smuzhiyun 	IXGB_READ_REG(hw, PFTC);
1011*4882a593Smuzhiyun 	IXGB_READ_REG(hw, MCFRC);
1012*4882a593Smuzhiyun 	IXGB_READ_REG(hw, MCFTC);
1013*4882a593Smuzhiyun 	IXGB_READ_REG(hw, XONRXC);
1014*4882a593Smuzhiyun 	IXGB_READ_REG(hw, XONTXC);
1015*4882a593Smuzhiyun 	IXGB_READ_REG(hw, XOFFRXC);
1016*4882a593Smuzhiyun 	IXGB_READ_REG(hw, XOFFTXC);
1017*4882a593Smuzhiyun 	IXGB_READ_REG(hw, RJC);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun /******************************************************************************
1021*4882a593Smuzhiyun  * Turns on the software controllable LED
1022*4882a593Smuzhiyun  *
1023*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
1024*4882a593Smuzhiyun  *****************************************************************************/
1025*4882a593Smuzhiyun void
ixgb_led_on(struct ixgb_hw * hw)1026*4882a593Smuzhiyun ixgb_led_on(struct ixgb_hw *hw)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun 	u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	/* To turn on the LED, clear software-definable pin 0 (SDP0). */
1031*4882a593Smuzhiyun 	ctrl0_reg &= ~IXGB_CTRL0_SDP0;
1032*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun /******************************************************************************
1036*4882a593Smuzhiyun  * Turns off the software controllable LED
1037*4882a593Smuzhiyun  *
1038*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
1039*4882a593Smuzhiyun  *****************************************************************************/
1040*4882a593Smuzhiyun void
ixgb_led_off(struct ixgb_hw * hw)1041*4882a593Smuzhiyun ixgb_led_off(struct ixgb_hw *hw)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun 	u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	/* To turn off the LED, set software-definable pin 0 (SDP0). */
1046*4882a593Smuzhiyun 	ctrl0_reg |= IXGB_CTRL0_SDP0;
1047*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun /******************************************************************************
1051*4882a593Smuzhiyun  * Gets the current PCI bus type, speed, and width of the hardware
1052*4882a593Smuzhiyun  *
1053*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
1054*4882a593Smuzhiyun  *****************************************************************************/
1055*4882a593Smuzhiyun static void
ixgb_get_bus_info(struct ixgb_hw * hw)1056*4882a593Smuzhiyun ixgb_get_bus_info(struct ixgb_hw *hw)
1057*4882a593Smuzhiyun {
1058*4882a593Smuzhiyun 	u32 status_reg;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	status_reg = IXGB_READ_REG(hw, STATUS);
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ?
1063*4882a593Smuzhiyun 		ixgb_bus_type_pcix : ixgb_bus_type_pci;
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	if (hw->bus.type == ixgb_bus_type_pci) {
1066*4882a593Smuzhiyun 		hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ?
1067*4882a593Smuzhiyun 			ixgb_bus_speed_66 : ixgb_bus_speed_33;
1068*4882a593Smuzhiyun 	} else {
1069*4882a593Smuzhiyun 		switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) {
1070*4882a593Smuzhiyun 		case IXGB_STATUS_PCIX_SPD_66:
1071*4882a593Smuzhiyun 			hw->bus.speed = ixgb_bus_speed_66;
1072*4882a593Smuzhiyun 			break;
1073*4882a593Smuzhiyun 		case IXGB_STATUS_PCIX_SPD_100:
1074*4882a593Smuzhiyun 			hw->bus.speed = ixgb_bus_speed_100;
1075*4882a593Smuzhiyun 			break;
1076*4882a593Smuzhiyun 		case IXGB_STATUS_PCIX_SPD_133:
1077*4882a593Smuzhiyun 			hw->bus.speed = ixgb_bus_speed_133;
1078*4882a593Smuzhiyun 			break;
1079*4882a593Smuzhiyun 		default:
1080*4882a593Smuzhiyun 			hw->bus.speed = ixgb_bus_speed_reserved;
1081*4882a593Smuzhiyun 			break;
1082*4882a593Smuzhiyun 		}
1083*4882a593Smuzhiyun 	}
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
1086*4882a593Smuzhiyun 		ixgb_bus_width_64 : ixgb_bus_width_32;
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun /******************************************************************************
1090*4882a593Smuzhiyun  * Tests a MAC address to ensure it is a valid Individual Address
1091*4882a593Smuzhiyun  *
1092*4882a593Smuzhiyun  * mac_addr - pointer to MAC address.
1093*4882a593Smuzhiyun  *
1094*4882a593Smuzhiyun  *****************************************************************************/
1095*4882a593Smuzhiyun static bool
mac_addr_valid(u8 * mac_addr)1096*4882a593Smuzhiyun mac_addr_valid(u8 *mac_addr)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun 	bool is_valid = true;
1099*4882a593Smuzhiyun 	ENTER();
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	/* Make sure it is not a multicast address */
1102*4882a593Smuzhiyun 	if (is_multicast_ether_addr(mac_addr)) {
1103*4882a593Smuzhiyun 		pr_debug("MAC address is multicast\n");
1104*4882a593Smuzhiyun 		is_valid = false;
1105*4882a593Smuzhiyun 	}
1106*4882a593Smuzhiyun 	/* Not a broadcast address */
1107*4882a593Smuzhiyun 	else if (is_broadcast_ether_addr(mac_addr)) {
1108*4882a593Smuzhiyun 		pr_debug("MAC address is broadcast\n");
1109*4882a593Smuzhiyun 		is_valid = false;
1110*4882a593Smuzhiyun 	}
1111*4882a593Smuzhiyun 	/* Reject the zero address */
1112*4882a593Smuzhiyun 	else if (is_zero_ether_addr(mac_addr)) {
1113*4882a593Smuzhiyun 		pr_debug("MAC address is all zeros\n");
1114*4882a593Smuzhiyun 		is_valid = false;
1115*4882a593Smuzhiyun 	}
1116*4882a593Smuzhiyun 	return is_valid;
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun /******************************************************************************
1120*4882a593Smuzhiyun  * Resets the 10GbE link.  Waits the settle time and returns the state of
1121*4882a593Smuzhiyun  * the link.
1122*4882a593Smuzhiyun  *
1123*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
1124*4882a593Smuzhiyun  *****************************************************************************/
1125*4882a593Smuzhiyun static bool
ixgb_link_reset(struct ixgb_hw * hw)1126*4882a593Smuzhiyun ixgb_link_reset(struct ixgb_hw *hw)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun 	bool link_status = false;
1129*4882a593Smuzhiyun 	u8 wait_retries = MAX_RESET_ITERATIONS;
1130*4882a593Smuzhiyun 	u8 lrst_retries = MAX_RESET_ITERATIONS;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	do {
1133*4882a593Smuzhiyun 		/* Reset the link */
1134*4882a593Smuzhiyun 		IXGB_WRITE_REG(hw, CTRL0,
1135*4882a593Smuzhiyun 			       IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 		/* Wait for link-up and lane re-alignment */
1138*4882a593Smuzhiyun 		do {
1139*4882a593Smuzhiyun 			udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET);
1140*4882a593Smuzhiyun 			link_status =
1141*4882a593Smuzhiyun 			    ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
1142*4882a593Smuzhiyun 			     && (IXGB_READ_REG(hw, XPCSS) &
1143*4882a593Smuzhiyun 				 IXGB_XPCSS_ALIGN_STATUS)) ? true : false;
1144*4882a593Smuzhiyun 		} while (!link_status && --wait_retries);
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	} while (!link_status && --lrst_retries);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	return link_status;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun /******************************************************************************
1152*4882a593Smuzhiyun  * Resets the 10GbE optics module.
1153*4882a593Smuzhiyun  *
1154*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
1155*4882a593Smuzhiyun  *****************************************************************************/
1156*4882a593Smuzhiyun static void
ixgb_optics_reset(struct ixgb_hw * hw)1157*4882a593Smuzhiyun ixgb_optics_reset(struct ixgb_hw *hw)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun 	if (hw->phy_type == ixgb_phy_type_txn17401) {
1160*4882a593Smuzhiyun 		ixgb_write_phy_reg(hw,
1161*4882a593Smuzhiyun 				   MDIO_CTRL1,
1162*4882a593Smuzhiyun 				   IXGB_PHY_ADDRESS,
1163*4882a593Smuzhiyun 				   MDIO_MMD_PMAPMD,
1164*4882a593Smuzhiyun 				   MDIO_CTRL1_RESET);
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 		ixgb_read_phy_reg(hw, MDIO_CTRL1, IXGB_PHY_ADDRESS, MDIO_MMD_PMAPMD);
1167*4882a593Smuzhiyun 	}
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun /******************************************************************************
1171*4882a593Smuzhiyun  * Resets the 10GbE optics module for Sun variant NIC.
1172*4882a593Smuzhiyun  *
1173*4882a593Smuzhiyun  * hw - Struct containing variables accessed by shared code
1174*4882a593Smuzhiyun  *****************************************************************************/
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun #define   IXGB_BCM8704_USER_PMD_TX_CTRL_REG         0xC803
1177*4882a593Smuzhiyun #define   IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL     0x0164
1178*4882a593Smuzhiyun #define   IXGB_BCM8704_USER_CTRL_REG                0xC800
1179*4882a593Smuzhiyun #define   IXGB_BCM8704_USER_CTRL_REG_VAL            0x7FBF
1180*4882a593Smuzhiyun #define   IXGB_BCM8704_USER_DEV3_ADDR               0x0003
1181*4882a593Smuzhiyun #define   IXGB_SUN_PHY_ADDRESS                      0x0000
1182*4882a593Smuzhiyun #define   IXGB_SUN_PHY_RESET_DELAY                     305
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun static void
ixgb_optics_reset_bcm(struct ixgb_hw * hw)1185*4882a593Smuzhiyun ixgb_optics_reset_bcm(struct ixgb_hw *hw)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun 	u32 ctrl = IXGB_READ_REG(hw, CTRL0);
1188*4882a593Smuzhiyun 	ctrl &= ~IXGB_CTRL0_SDP2;
1189*4882a593Smuzhiyun 	ctrl |= IXGB_CTRL0_SDP3;
1190*4882a593Smuzhiyun 	IXGB_WRITE_REG(hw, CTRL0, ctrl);
1191*4882a593Smuzhiyun 	IXGB_WRITE_FLUSH(hw);
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	/* SerDes needs extra delay */
1194*4882a593Smuzhiyun 	msleep(IXGB_SUN_PHY_RESET_DELAY);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	/* Broadcom 7408L configuration */
1197*4882a593Smuzhiyun 	/* Reference clock config */
1198*4882a593Smuzhiyun 	ixgb_write_phy_reg(hw,
1199*4882a593Smuzhiyun 			   IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
1200*4882a593Smuzhiyun 			   IXGB_SUN_PHY_ADDRESS,
1201*4882a593Smuzhiyun 			   IXGB_BCM8704_USER_DEV3_ADDR,
1202*4882a593Smuzhiyun 			   IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL);
1203*4882a593Smuzhiyun 	/*  we must read the registers twice */
1204*4882a593Smuzhiyun 	ixgb_read_phy_reg(hw,
1205*4882a593Smuzhiyun 			  IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
1206*4882a593Smuzhiyun 			  IXGB_SUN_PHY_ADDRESS,
1207*4882a593Smuzhiyun 			  IXGB_BCM8704_USER_DEV3_ADDR);
1208*4882a593Smuzhiyun 	ixgb_read_phy_reg(hw,
1209*4882a593Smuzhiyun 			  IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
1210*4882a593Smuzhiyun 			  IXGB_SUN_PHY_ADDRESS,
1211*4882a593Smuzhiyun 			  IXGB_BCM8704_USER_DEV3_ADDR);
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	ixgb_write_phy_reg(hw,
1214*4882a593Smuzhiyun 			   IXGB_BCM8704_USER_CTRL_REG,
1215*4882a593Smuzhiyun 			   IXGB_SUN_PHY_ADDRESS,
1216*4882a593Smuzhiyun 			   IXGB_BCM8704_USER_DEV3_ADDR,
1217*4882a593Smuzhiyun 			   IXGB_BCM8704_USER_CTRL_REG_VAL);
1218*4882a593Smuzhiyun 	ixgb_read_phy_reg(hw,
1219*4882a593Smuzhiyun 			  IXGB_BCM8704_USER_CTRL_REG,
1220*4882a593Smuzhiyun 			  IXGB_SUN_PHY_ADDRESS,
1221*4882a593Smuzhiyun 			  IXGB_BCM8704_USER_DEV3_ADDR);
1222*4882a593Smuzhiyun 	ixgb_read_phy_reg(hw,
1223*4882a593Smuzhiyun 			  IXGB_BCM8704_USER_CTRL_REG,
1224*4882a593Smuzhiyun 			  IXGB_SUN_PHY_ADDRESS,
1225*4882a593Smuzhiyun 			  IXGB_BCM8704_USER_DEV3_ADDR);
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	/* SerDes needs extra delay */
1228*4882a593Smuzhiyun 	msleep(IXGB_SUN_PHY_RESET_DELAY);
1229*4882a593Smuzhiyun }
1230