1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 1999 - 2018 Intel Corporation. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/pci.h>
5*4882a593Smuzhiyun #include <linux/delay.h>
6*4882a593Smuzhiyun #include <linux/sched.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include "ixgbe.h"
9*4882a593Smuzhiyun #include "ixgbe_phy.h"
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define IXGBE_82598_MAX_TX_QUEUES 32
12*4882a593Smuzhiyun #define IXGBE_82598_MAX_RX_QUEUES 64
13*4882a593Smuzhiyun #define IXGBE_82598_RAR_ENTRIES 16
14*4882a593Smuzhiyun #define IXGBE_82598_MC_TBL_SIZE 128
15*4882a593Smuzhiyun #define IXGBE_82598_VFT_TBL_SIZE 128
16*4882a593Smuzhiyun #define IXGBE_82598_RX_PB_SIZE 512
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
19*4882a593Smuzhiyun ixgbe_link_speed speed,
20*4882a593Smuzhiyun bool autoneg_wait_to_complete);
21*4882a593Smuzhiyun static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
22*4882a593Smuzhiyun u8 *eeprom_data);
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /**
25*4882a593Smuzhiyun * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
26*4882a593Smuzhiyun * @hw: pointer to the HW structure
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * The defaults for 82598 should be in the range of 50us to 50ms,
29*4882a593Smuzhiyun * however the hardware default for these parts is 500us to 1ms which is less
30*4882a593Smuzhiyun * than the 10ms recommended by the pci-e spec. To address this we need to
31*4882a593Smuzhiyun * increase the value to either 10ms to 250ms for capability version 1 config,
32*4882a593Smuzhiyun * or 16ms to 55ms for version 2.
33*4882a593Smuzhiyun **/
ixgbe_set_pcie_completion_timeout(struct ixgbe_hw * hw)34*4882a593Smuzhiyun static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
37*4882a593Smuzhiyun u16 pcie_devctl2;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun if (ixgbe_removed(hw->hw_addr))
40*4882a593Smuzhiyun return;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* only take action if timeout value is defaulted to 0 */
43*4882a593Smuzhiyun if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
44*4882a593Smuzhiyun goto out;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * if capababilities version is type 1 we can write the
48*4882a593Smuzhiyun * timeout of 10ms to 250ms through the GCR register
49*4882a593Smuzhiyun */
50*4882a593Smuzhiyun if (!(gcr & IXGBE_GCR_CAP_VER2)) {
51*4882a593Smuzhiyun gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
52*4882a593Smuzhiyun goto out;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * for version 2 capabilities we need to write the config space
57*4882a593Smuzhiyun * directly in order to set the completion timeout value for
58*4882a593Smuzhiyun * 16ms to 55ms
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
61*4882a593Smuzhiyun pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
62*4882a593Smuzhiyun ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
63*4882a593Smuzhiyun out:
64*4882a593Smuzhiyun /* disable completion timeout resend */
65*4882a593Smuzhiyun gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
66*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
ixgbe_get_invariants_82598(struct ixgbe_hw * hw)69*4882a593Smuzhiyun static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Call PHY identify routine to get the phy type */
74*4882a593Smuzhiyun ixgbe_identify_phy_generic(hw);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
77*4882a593Smuzhiyun mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
78*4882a593Smuzhiyun mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
79*4882a593Smuzhiyun mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
80*4882a593Smuzhiyun mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
81*4882a593Smuzhiyun mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
82*4882a593Smuzhiyun mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun return 0;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /**
88*4882a593Smuzhiyun * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
89*4882a593Smuzhiyun * @hw: pointer to hardware structure
90*4882a593Smuzhiyun *
91*4882a593Smuzhiyun * Initialize any function pointers that were not able to be
92*4882a593Smuzhiyun * set during get_invariants because the PHY/SFP type was
93*4882a593Smuzhiyun * not known. Perform the SFP init if necessary.
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun **/
ixgbe_init_phy_ops_82598(struct ixgbe_hw * hw)96*4882a593Smuzhiyun static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
99*4882a593Smuzhiyun struct ixgbe_phy_info *phy = &hw->phy;
100*4882a593Smuzhiyun s32 ret_val;
101*4882a593Smuzhiyun u16 list_offset, data_offset;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* Identify the PHY */
104*4882a593Smuzhiyun phy->ops.identify(hw);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* Overwrite the link function pointers if copper PHY */
107*4882a593Smuzhiyun if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
108*4882a593Smuzhiyun mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
109*4882a593Smuzhiyun mac->ops.get_link_capabilities =
110*4882a593Smuzhiyun &ixgbe_get_copper_link_capabilities_generic;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun switch (hw->phy.type) {
114*4882a593Smuzhiyun case ixgbe_phy_tn:
115*4882a593Smuzhiyun phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
116*4882a593Smuzhiyun phy->ops.check_link = &ixgbe_check_phy_link_tnx;
117*4882a593Smuzhiyun break;
118*4882a593Smuzhiyun case ixgbe_phy_nl:
119*4882a593Smuzhiyun phy->ops.reset = &ixgbe_reset_phy_nl;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Call SFP+ identify routine to get the SFP+ module type */
122*4882a593Smuzhiyun ret_val = phy->ops.identify_sfp(hw);
123*4882a593Smuzhiyun if (ret_val)
124*4882a593Smuzhiyun return ret_val;
125*4882a593Smuzhiyun if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
126*4882a593Smuzhiyun return IXGBE_ERR_SFP_NOT_SUPPORTED;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* Check to see if SFP+ module is supported */
129*4882a593Smuzhiyun ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
130*4882a593Smuzhiyun &list_offset,
131*4882a593Smuzhiyun &data_offset);
132*4882a593Smuzhiyun if (ret_val)
133*4882a593Smuzhiyun return IXGBE_ERR_SFP_NOT_SUPPORTED;
134*4882a593Smuzhiyun break;
135*4882a593Smuzhiyun default:
136*4882a593Smuzhiyun break;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
144*4882a593Smuzhiyun * @hw: pointer to hardware structure
145*4882a593Smuzhiyun *
146*4882a593Smuzhiyun * Starts the hardware using the generic start_hw function.
147*4882a593Smuzhiyun * Disables relaxed ordering for archs other than SPARC
148*4882a593Smuzhiyun * Then set pcie completion timeout
149*4882a593Smuzhiyun *
150*4882a593Smuzhiyun **/
ixgbe_start_hw_82598(struct ixgbe_hw * hw)151*4882a593Smuzhiyun static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun s32 ret_val;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun ret_val = ixgbe_start_hw_generic(hw);
156*4882a593Smuzhiyun if (ret_val)
157*4882a593Smuzhiyun return ret_val;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* set the completion timeout for interface */
160*4882a593Smuzhiyun ixgbe_set_pcie_completion_timeout(hw);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return 0;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun * ixgbe_get_link_capabilities_82598 - Determines link capabilities
167*4882a593Smuzhiyun * @hw: pointer to hardware structure
168*4882a593Smuzhiyun * @speed: pointer to link speed
169*4882a593Smuzhiyun * @autoneg: boolean auto-negotiation value
170*4882a593Smuzhiyun *
171*4882a593Smuzhiyun * Determines the link capabilities by reading the AUTOC register.
172*4882a593Smuzhiyun **/
ixgbe_get_link_capabilities_82598(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)173*4882a593Smuzhiyun static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
174*4882a593Smuzhiyun ixgbe_link_speed *speed,
175*4882a593Smuzhiyun bool *autoneg)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun u32 autoc = 0;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * Determine link capabilities based on the stored value of AUTOC,
181*4882a593Smuzhiyun * which represents EEPROM defaults. If AUTOC value has not been
182*4882a593Smuzhiyun * stored, use the current register value.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun if (hw->mac.orig_link_settings_stored)
185*4882a593Smuzhiyun autoc = hw->mac.orig_autoc;
186*4882a593Smuzhiyun else
187*4882a593Smuzhiyun autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun switch (autoc & IXGBE_AUTOC_LMS_MASK) {
190*4882a593Smuzhiyun case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
191*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_1GB_FULL;
192*4882a593Smuzhiyun *autoneg = false;
193*4882a593Smuzhiyun break;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
196*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_10GB_FULL;
197*4882a593Smuzhiyun *autoneg = false;
198*4882a593Smuzhiyun break;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun case IXGBE_AUTOC_LMS_1G_AN:
201*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_1GB_FULL;
202*4882a593Smuzhiyun *autoneg = true;
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun case IXGBE_AUTOC_LMS_KX4_AN:
206*4882a593Smuzhiyun case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
207*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_UNKNOWN;
208*4882a593Smuzhiyun if (autoc & IXGBE_AUTOC_KX4_SUPP)
209*4882a593Smuzhiyun *speed |= IXGBE_LINK_SPEED_10GB_FULL;
210*4882a593Smuzhiyun if (autoc & IXGBE_AUTOC_KX_SUPP)
211*4882a593Smuzhiyun *speed |= IXGBE_LINK_SPEED_1GB_FULL;
212*4882a593Smuzhiyun *autoneg = true;
213*4882a593Smuzhiyun break;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun default:
216*4882a593Smuzhiyun return IXGBE_ERR_LINK_SETUP;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return 0;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /**
223*4882a593Smuzhiyun * ixgbe_get_media_type_82598 - Determines media type
224*4882a593Smuzhiyun * @hw: pointer to hardware structure
225*4882a593Smuzhiyun *
226*4882a593Smuzhiyun * Returns the media type (fiber, copper, backplane)
227*4882a593Smuzhiyun **/
ixgbe_get_media_type_82598(struct ixgbe_hw * hw)228*4882a593Smuzhiyun static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun /* Detect if there is a copper PHY attached. */
231*4882a593Smuzhiyun switch (hw->phy.type) {
232*4882a593Smuzhiyun case ixgbe_phy_cu_unknown:
233*4882a593Smuzhiyun case ixgbe_phy_tn:
234*4882a593Smuzhiyun return ixgbe_media_type_copper;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun default:
237*4882a593Smuzhiyun break;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Media type for I82598 is based on device ID */
241*4882a593Smuzhiyun switch (hw->device_id) {
242*4882a593Smuzhiyun case IXGBE_DEV_ID_82598:
243*4882a593Smuzhiyun case IXGBE_DEV_ID_82598_BX:
244*4882a593Smuzhiyun /* Default device ID is mezzanine card KX/KX4 */
245*4882a593Smuzhiyun return ixgbe_media_type_backplane;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun case IXGBE_DEV_ID_82598AF_DUAL_PORT:
248*4882a593Smuzhiyun case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
249*4882a593Smuzhiyun case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
250*4882a593Smuzhiyun case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
251*4882a593Smuzhiyun case IXGBE_DEV_ID_82598EB_XF_LR:
252*4882a593Smuzhiyun case IXGBE_DEV_ID_82598EB_SFP_LOM:
253*4882a593Smuzhiyun return ixgbe_media_type_fiber;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun case IXGBE_DEV_ID_82598EB_CX4:
256*4882a593Smuzhiyun case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
257*4882a593Smuzhiyun return ixgbe_media_type_cx4;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun case IXGBE_DEV_ID_82598AT:
260*4882a593Smuzhiyun case IXGBE_DEV_ID_82598AT2:
261*4882a593Smuzhiyun return ixgbe_media_type_copper;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun default:
264*4882a593Smuzhiyun return ixgbe_media_type_unknown;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /**
269*4882a593Smuzhiyun * ixgbe_fc_enable_82598 - Enable flow control
270*4882a593Smuzhiyun * @hw: pointer to hardware structure
271*4882a593Smuzhiyun *
272*4882a593Smuzhiyun * Enable flow control according to the current settings.
273*4882a593Smuzhiyun **/
ixgbe_fc_enable_82598(struct ixgbe_hw * hw)274*4882a593Smuzhiyun static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun u32 fctrl_reg;
277*4882a593Smuzhiyun u32 rmcs_reg;
278*4882a593Smuzhiyun u32 reg;
279*4882a593Smuzhiyun u32 fcrtl, fcrth;
280*4882a593Smuzhiyun u32 link_speed = 0;
281*4882a593Smuzhiyun int i;
282*4882a593Smuzhiyun bool link_up;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* Validate the water mark configuration */
285*4882a593Smuzhiyun if (!hw->fc.pause_time)
286*4882a593Smuzhiyun return IXGBE_ERR_INVALID_LINK_SETTINGS;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* Low water mark of zero causes XOFF floods */
289*4882a593Smuzhiyun for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
290*4882a593Smuzhiyun if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
291*4882a593Smuzhiyun hw->fc.high_water[i]) {
292*4882a593Smuzhiyun if (!hw->fc.low_water[i] ||
293*4882a593Smuzhiyun hw->fc.low_water[i] >= hw->fc.high_water[i]) {
294*4882a593Smuzhiyun hw_dbg(hw, "Invalid water mark configuration\n");
295*4882a593Smuzhiyun return IXGBE_ERR_INVALID_LINK_SETTINGS;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * On 82598 having Rx FC on causes resets while doing 1G
302*4882a593Smuzhiyun * so if it's on turn it off once we know link_speed. For
303*4882a593Smuzhiyun * more details see 82598 Specification update.
304*4882a593Smuzhiyun */
305*4882a593Smuzhiyun hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
306*4882a593Smuzhiyun if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
307*4882a593Smuzhiyun switch (hw->fc.requested_mode) {
308*4882a593Smuzhiyun case ixgbe_fc_full:
309*4882a593Smuzhiyun hw->fc.requested_mode = ixgbe_fc_tx_pause;
310*4882a593Smuzhiyun break;
311*4882a593Smuzhiyun case ixgbe_fc_rx_pause:
312*4882a593Smuzhiyun hw->fc.requested_mode = ixgbe_fc_none;
313*4882a593Smuzhiyun break;
314*4882a593Smuzhiyun default:
315*4882a593Smuzhiyun /* no change */
316*4882a593Smuzhiyun break;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Negotiate the fc mode to use */
321*4882a593Smuzhiyun hw->mac.ops.fc_autoneg(hw);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* Disable any previous flow control settings */
324*4882a593Smuzhiyun fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
325*4882a593Smuzhiyun fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
328*4882a593Smuzhiyun rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun * The possible values of fc.current_mode are:
332*4882a593Smuzhiyun * 0: Flow control is completely disabled
333*4882a593Smuzhiyun * 1: Rx flow control is enabled (we can receive pause frames,
334*4882a593Smuzhiyun * but not send pause frames).
335*4882a593Smuzhiyun * 2: Tx flow control is enabled (we can send pause frames but
336*4882a593Smuzhiyun * we do not support receiving pause frames).
337*4882a593Smuzhiyun * 3: Both Rx and Tx flow control (symmetric) are enabled.
338*4882a593Smuzhiyun * other: Invalid.
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun switch (hw->fc.current_mode) {
341*4882a593Smuzhiyun case ixgbe_fc_none:
342*4882a593Smuzhiyun /*
343*4882a593Smuzhiyun * Flow control is disabled by software override or autoneg.
344*4882a593Smuzhiyun * The code below will actually disable it in the HW.
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun break;
347*4882a593Smuzhiyun case ixgbe_fc_rx_pause:
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun * Rx Flow control is enabled and Tx Flow control is
350*4882a593Smuzhiyun * disabled by software override. Since there really
351*4882a593Smuzhiyun * isn't a way to advertise that we are capable of RX
352*4882a593Smuzhiyun * Pause ONLY, we will advertise that we support both
353*4882a593Smuzhiyun * symmetric and asymmetric Rx PAUSE. Later, we will
354*4882a593Smuzhiyun * disable the adapter's ability to send PAUSE frames.
355*4882a593Smuzhiyun */
356*4882a593Smuzhiyun fctrl_reg |= IXGBE_FCTRL_RFCE;
357*4882a593Smuzhiyun break;
358*4882a593Smuzhiyun case ixgbe_fc_tx_pause:
359*4882a593Smuzhiyun /*
360*4882a593Smuzhiyun * Tx Flow control is enabled, and Rx Flow control is
361*4882a593Smuzhiyun * disabled by software override.
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
364*4882a593Smuzhiyun break;
365*4882a593Smuzhiyun case ixgbe_fc_full:
366*4882a593Smuzhiyun /* Flow control (both Rx and Tx) is enabled by SW override. */
367*4882a593Smuzhiyun fctrl_reg |= IXGBE_FCTRL_RFCE;
368*4882a593Smuzhiyun rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
369*4882a593Smuzhiyun break;
370*4882a593Smuzhiyun default:
371*4882a593Smuzhiyun hw_dbg(hw, "Flow control param set incorrectly\n");
372*4882a593Smuzhiyun return IXGBE_ERR_CONFIG;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* Set 802.3x based flow control settings. */
376*4882a593Smuzhiyun fctrl_reg |= IXGBE_FCTRL_DPF;
377*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
378*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* Set up and enable Rx high/low water mark thresholds, enable XON. */
381*4882a593Smuzhiyun for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
382*4882a593Smuzhiyun if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
383*4882a593Smuzhiyun hw->fc.high_water[i]) {
384*4882a593Smuzhiyun fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
385*4882a593Smuzhiyun fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
386*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
387*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
388*4882a593Smuzhiyun } else {
389*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
390*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /* Configure pause time (2 TCs per register) */
396*4882a593Smuzhiyun reg = hw->fc.pause_time * 0x00010001;
397*4882a593Smuzhiyun for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
398*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* Configure flow control refresh threshold value */
401*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return 0;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /**
407*4882a593Smuzhiyun * ixgbe_start_mac_link_82598 - Configures MAC link settings
408*4882a593Smuzhiyun * @hw: pointer to hardware structure
409*4882a593Smuzhiyun * @autoneg_wait_to_complete: true when waiting for completion is needed
410*4882a593Smuzhiyun *
411*4882a593Smuzhiyun * Configures link settings based on values in the ixgbe_hw struct.
412*4882a593Smuzhiyun * Restarts the link. Performs autonegotiation if needed.
413*4882a593Smuzhiyun **/
ixgbe_start_mac_link_82598(struct ixgbe_hw * hw,bool autoneg_wait_to_complete)414*4882a593Smuzhiyun static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
415*4882a593Smuzhiyun bool autoneg_wait_to_complete)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun u32 autoc_reg;
418*4882a593Smuzhiyun u32 links_reg;
419*4882a593Smuzhiyun u32 i;
420*4882a593Smuzhiyun s32 status = 0;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* Restart link */
423*4882a593Smuzhiyun autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
424*4882a593Smuzhiyun autoc_reg |= IXGBE_AUTOC_AN_RESTART;
425*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /* Only poll for autoneg to complete if specified to do so */
428*4882a593Smuzhiyun if (autoneg_wait_to_complete) {
429*4882a593Smuzhiyun if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
430*4882a593Smuzhiyun IXGBE_AUTOC_LMS_KX4_AN ||
431*4882a593Smuzhiyun (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
432*4882a593Smuzhiyun IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
433*4882a593Smuzhiyun links_reg = 0; /* Just in case Autoneg time = 0 */
434*4882a593Smuzhiyun for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
435*4882a593Smuzhiyun links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
436*4882a593Smuzhiyun if (links_reg & IXGBE_LINKS_KX_AN_COMP)
437*4882a593Smuzhiyun break;
438*4882a593Smuzhiyun msleep(100);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
441*4882a593Smuzhiyun status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
442*4882a593Smuzhiyun hw_dbg(hw, "Autonegotiation did not complete.\n");
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /* Add delay to filter out noises during initial link setup */
448*4882a593Smuzhiyun msleep(50);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun return status;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /**
454*4882a593Smuzhiyun * ixgbe_validate_link_ready - Function looks for phy link
455*4882a593Smuzhiyun * @hw: pointer to hardware structure
456*4882a593Smuzhiyun *
457*4882a593Smuzhiyun * Function indicates success when phy link is available. If phy is not ready
458*4882a593Smuzhiyun * within 5 seconds of MAC indicating link, the function returns error.
459*4882a593Smuzhiyun **/
ixgbe_validate_link_ready(struct ixgbe_hw * hw)460*4882a593Smuzhiyun static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun u32 timeout;
463*4882a593Smuzhiyun u16 an_reg;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun if (hw->device_id != IXGBE_DEV_ID_82598AT2)
466*4882a593Smuzhiyun return 0;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun for (timeout = 0;
469*4882a593Smuzhiyun timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
470*4882a593Smuzhiyun hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun if ((an_reg & MDIO_AN_STAT1_COMPLETE) &&
473*4882a593Smuzhiyun (an_reg & MDIO_STAT1_LSTATUS))
474*4882a593Smuzhiyun break;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun msleep(100);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
480*4882a593Smuzhiyun hw_dbg(hw, "Link was indicated but link is down\n");
481*4882a593Smuzhiyun return IXGBE_ERR_LINK_SETUP;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun return 0;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun /**
488*4882a593Smuzhiyun * ixgbe_check_mac_link_82598 - Get link/speed status
489*4882a593Smuzhiyun * @hw: pointer to hardware structure
490*4882a593Smuzhiyun * @speed: pointer to link speed
491*4882a593Smuzhiyun * @link_up: true is link is up, false otherwise
492*4882a593Smuzhiyun * @link_up_wait_to_complete: bool used to wait for link up or not
493*4882a593Smuzhiyun *
494*4882a593Smuzhiyun * Reads the links register to determine if link is up and the current speed
495*4882a593Smuzhiyun **/
ixgbe_check_mac_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)496*4882a593Smuzhiyun static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
497*4882a593Smuzhiyun ixgbe_link_speed *speed, bool *link_up,
498*4882a593Smuzhiyun bool link_up_wait_to_complete)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun u32 links_reg;
501*4882a593Smuzhiyun u32 i;
502*4882a593Smuzhiyun u16 link_reg, adapt_comp_reg;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun /*
505*4882a593Smuzhiyun * SERDES PHY requires us to read link status from register 0xC79F.
506*4882a593Smuzhiyun * Bit 0 set indicates link is up/ready; clear indicates link down.
507*4882a593Smuzhiyun * 0xC00C is read to check that the XAUI lanes are active. Bit 0
508*4882a593Smuzhiyun * clear indicates active; set indicates inactive.
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun if (hw->phy.type == ixgbe_phy_nl) {
511*4882a593Smuzhiyun hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
512*4882a593Smuzhiyun hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
513*4882a593Smuzhiyun hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
514*4882a593Smuzhiyun &adapt_comp_reg);
515*4882a593Smuzhiyun if (link_up_wait_to_complete) {
516*4882a593Smuzhiyun for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
517*4882a593Smuzhiyun if ((link_reg & 1) &&
518*4882a593Smuzhiyun ((adapt_comp_reg & 1) == 0)) {
519*4882a593Smuzhiyun *link_up = true;
520*4882a593Smuzhiyun break;
521*4882a593Smuzhiyun } else {
522*4882a593Smuzhiyun *link_up = false;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun msleep(100);
525*4882a593Smuzhiyun hw->phy.ops.read_reg(hw, 0xC79F,
526*4882a593Smuzhiyun MDIO_MMD_PMAPMD,
527*4882a593Smuzhiyun &link_reg);
528*4882a593Smuzhiyun hw->phy.ops.read_reg(hw, 0xC00C,
529*4882a593Smuzhiyun MDIO_MMD_PMAPMD,
530*4882a593Smuzhiyun &adapt_comp_reg);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun } else {
533*4882a593Smuzhiyun if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
534*4882a593Smuzhiyun *link_up = true;
535*4882a593Smuzhiyun else
536*4882a593Smuzhiyun *link_up = false;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (!*link_up)
540*4882a593Smuzhiyun return 0;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
544*4882a593Smuzhiyun if (link_up_wait_to_complete) {
545*4882a593Smuzhiyun for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
546*4882a593Smuzhiyun if (links_reg & IXGBE_LINKS_UP) {
547*4882a593Smuzhiyun *link_up = true;
548*4882a593Smuzhiyun break;
549*4882a593Smuzhiyun } else {
550*4882a593Smuzhiyun *link_up = false;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun msleep(100);
553*4882a593Smuzhiyun links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun } else {
556*4882a593Smuzhiyun if (links_reg & IXGBE_LINKS_UP)
557*4882a593Smuzhiyun *link_up = true;
558*4882a593Smuzhiyun else
559*4882a593Smuzhiyun *link_up = false;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun if (links_reg & IXGBE_LINKS_SPEED)
563*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_10GB_FULL;
564*4882a593Smuzhiyun else
565*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_1GB_FULL;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up &&
568*4882a593Smuzhiyun (ixgbe_validate_link_ready(hw) != 0))
569*4882a593Smuzhiyun *link_up = false;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun return 0;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /**
575*4882a593Smuzhiyun * ixgbe_setup_mac_link_82598 - Set MAC link speed
576*4882a593Smuzhiyun * @hw: pointer to hardware structure
577*4882a593Smuzhiyun * @speed: new link speed
578*4882a593Smuzhiyun * @autoneg_wait_to_complete: true when waiting for completion is needed
579*4882a593Smuzhiyun *
580*4882a593Smuzhiyun * Set the link speed in the AUTOC register and restarts link.
581*4882a593Smuzhiyun **/
ixgbe_setup_mac_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)582*4882a593Smuzhiyun static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
583*4882a593Smuzhiyun ixgbe_link_speed speed,
584*4882a593Smuzhiyun bool autoneg_wait_to_complete)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun bool autoneg = false;
587*4882a593Smuzhiyun ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
588*4882a593Smuzhiyun u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
589*4882a593Smuzhiyun u32 autoc = curr_autoc;
590*4882a593Smuzhiyun u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* Check to see if speed passed in is supported. */
593*4882a593Smuzhiyun ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg);
594*4882a593Smuzhiyun speed &= link_capabilities;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun if (speed == IXGBE_LINK_SPEED_UNKNOWN)
597*4882a593Smuzhiyun return IXGBE_ERR_LINK_SETUP;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /* Set KX4/KX support according to speed requested */
600*4882a593Smuzhiyun else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
601*4882a593Smuzhiyun link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
602*4882a593Smuzhiyun autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
603*4882a593Smuzhiyun if (speed & IXGBE_LINK_SPEED_10GB_FULL)
604*4882a593Smuzhiyun autoc |= IXGBE_AUTOC_KX4_SUPP;
605*4882a593Smuzhiyun if (speed & IXGBE_LINK_SPEED_1GB_FULL)
606*4882a593Smuzhiyun autoc |= IXGBE_AUTOC_KX_SUPP;
607*4882a593Smuzhiyun if (autoc != curr_autoc)
608*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /* Setup and restart the link based on the new values in
612*4882a593Smuzhiyun * ixgbe_hw This will write the AUTOC register based on the new
613*4882a593Smuzhiyun * stored values
614*4882a593Smuzhiyun */
615*4882a593Smuzhiyun return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun /**
620*4882a593Smuzhiyun * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
621*4882a593Smuzhiyun * @hw: pointer to hardware structure
622*4882a593Smuzhiyun * @speed: new link speed
623*4882a593Smuzhiyun * @autoneg_wait_to_complete: true if waiting is needed to complete
624*4882a593Smuzhiyun *
625*4882a593Smuzhiyun * Sets the link speed in the AUTOC register in the MAC and restarts link.
626*4882a593Smuzhiyun **/
ixgbe_setup_copper_link_82598(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)627*4882a593Smuzhiyun static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
628*4882a593Smuzhiyun ixgbe_link_speed speed,
629*4882a593Smuzhiyun bool autoneg_wait_to_complete)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun s32 status;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /* Setup the PHY according to input speed */
634*4882a593Smuzhiyun status = hw->phy.ops.setup_link_speed(hw, speed,
635*4882a593Smuzhiyun autoneg_wait_to_complete);
636*4882a593Smuzhiyun /* Set up MAC */
637*4882a593Smuzhiyun ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun return status;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /**
643*4882a593Smuzhiyun * ixgbe_reset_hw_82598 - Performs hardware reset
644*4882a593Smuzhiyun * @hw: pointer to hardware structure
645*4882a593Smuzhiyun *
646*4882a593Smuzhiyun * Resets the hardware by resetting the transmit and receive units, masks and
647*4882a593Smuzhiyun * clears all interrupts, performing a PHY reset, and performing a link (MAC)
648*4882a593Smuzhiyun * reset.
649*4882a593Smuzhiyun **/
ixgbe_reset_hw_82598(struct ixgbe_hw * hw)650*4882a593Smuzhiyun static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun s32 status;
653*4882a593Smuzhiyun s32 phy_status = 0;
654*4882a593Smuzhiyun u32 ctrl;
655*4882a593Smuzhiyun u32 gheccr;
656*4882a593Smuzhiyun u32 i;
657*4882a593Smuzhiyun u32 autoc;
658*4882a593Smuzhiyun u8 analog_val;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /* Call adapter stop to disable tx/rx and clear interrupts */
661*4882a593Smuzhiyun status = hw->mac.ops.stop_adapter(hw);
662*4882a593Smuzhiyun if (status)
663*4882a593Smuzhiyun return status;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun * Power up the Atlas Tx lanes if they are currently powered down.
667*4882a593Smuzhiyun * Atlas Tx lanes are powered down for MAC loopback tests, but
668*4882a593Smuzhiyun * they are not automatically restored on reset.
669*4882a593Smuzhiyun */
670*4882a593Smuzhiyun hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
671*4882a593Smuzhiyun if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
672*4882a593Smuzhiyun /* Enable Tx Atlas so packets can be transmitted again */
673*4882a593Smuzhiyun hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
674*4882a593Smuzhiyun &analog_val);
675*4882a593Smuzhiyun analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
676*4882a593Smuzhiyun hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
677*4882a593Smuzhiyun analog_val);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
680*4882a593Smuzhiyun &analog_val);
681*4882a593Smuzhiyun analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
682*4882a593Smuzhiyun hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
683*4882a593Smuzhiyun analog_val);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
686*4882a593Smuzhiyun &analog_val);
687*4882a593Smuzhiyun analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
688*4882a593Smuzhiyun hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
689*4882a593Smuzhiyun analog_val);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
692*4882a593Smuzhiyun &analog_val);
693*4882a593Smuzhiyun analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
694*4882a593Smuzhiyun hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
695*4882a593Smuzhiyun analog_val);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun /* Reset PHY */
699*4882a593Smuzhiyun if (hw->phy.reset_disable == false) {
700*4882a593Smuzhiyun /* PHY ops must be identified and initialized prior to reset */
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /* Init PHY and function pointers, perform SFP setup */
703*4882a593Smuzhiyun phy_status = hw->phy.ops.init(hw);
704*4882a593Smuzhiyun if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
705*4882a593Smuzhiyun return phy_status;
706*4882a593Smuzhiyun if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
707*4882a593Smuzhiyun goto mac_reset_top;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun hw->phy.ops.reset(hw);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun mac_reset_top:
713*4882a593Smuzhiyun /*
714*4882a593Smuzhiyun * Issue global reset to the MAC. This needs to be a SW reset.
715*4882a593Smuzhiyun * If link reset is used, it might reset the MAC when mng is using it
716*4882a593Smuzhiyun */
717*4882a593Smuzhiyun ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
718*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
719*4882a593Smuzhiyun IXGBE_WRITE_FLUSH(hw);
720*4882a593Smuzhiyun usleep_range(1000, 1200);
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun /* Poll for reset bit to self-clear indicating reset is complete */
723*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
724*4882a593Smuzhiyun ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
725*4882a593Smuzhiyun if (!(ctrl & IXGBE_CTRL_RST))
726*4882a593Smuzhiyun break;
727*4882a593Smuzhiyun udelay(1);
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun if (ctrl & IXGBE_CTRL_RST) {
730*4882a593Smuzhiyun status = IXGBE_ERR_RESET_FAILED;
731*4882a593Smuzhiyun hw_dbg(hw, "Reset polling failed to complete.\n");
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun msleep(50);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /*
737*4882a593Smuzhiyun * Double resets are required for recovery from certain error
738*4882a593Smuzhiyun * conditions. Between resets, it is necessary to stall to allow time
739*4882a593Smuzhiyun * for any pending HW events to complete.
740*4882a593Smuzhiyun */
741*4882a593Smuzhiyun if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
742*4882a593Smuzhiyun hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
743*4882a593Smuzhiyun goto mac_reset_top;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
747*4882a593Smuzhiyun gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6));
748*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /*
751*4882a593Smuzhiyun * Store the original AUTOC value if it has not been
752*4882a593Smuzhiyun * stored off yet. Otherwise restore the stored original
753*4882a593Smuzhiyun * AUTOC value since the reset operation sets back to deaults.
754*4882a593Smuzhiyun */
755*4882a593Smuzhiyun autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
756*4882a593Smuzhiyun if (hw->mac.orig_link_settings_stored == false) {
757*4882a593Smuzhiyun hw->mac.orig_autoc = autoc;
758*4882a593Smuzhiyun hw->mac.orig_link_settings_stored = true;
759*4882a593Smuzhiyun } else if (autoc != hw->mac.orig_autoc) {
760*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun /* Store the permanent mac address */
764*4882a593Smuzhiyun hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /*
767*4882a593Smuzhiyun * Store MAC address from RAR0, clear receive address registers, and
768*4882a593Smuzhiyun * clear the multicast table
769*4882a593Smuzhiyun */
770*4882a593Smuzhiyun hw->mac.ops.init_rx_addrs(hw);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun if (phy_status)
773*4882a593Smuzhiyun status = phy_status;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun return status;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun /**
779*4882a593Smuzhiyun * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
780*4882a593Smuzhiyun * @hw: pointer to hardware struct
781*4882a593Smuzhiyun * @rar: receive address register index to associate with a VMDq index
782*4882a593Smuzhiyun * @vmdq: VMDq set index
783*4882a593Smuzhiyun **/
ixgbe_set_vmdq_82598(struct ixgbe_hw * hw,u32 rar,u32 vmdq)784*4882a593Smuzhiyun static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun u32 rar_high;
787*4882a593Smuzhiyun u32 rar_entries = hw->mac.num_rar_entries;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun /* Make sure we are using a valid rar index range */
790*4882a593Smuzhiyun if (rar >= rar_entries) {
791*4882a593Smuzhiyun hw_dbg(hw, "RAR index %d is out of range.\n", rar);
792*4882a593Smuzhiyun return IXGBE_ERR_INVALID_ARGUMENT;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
796*4882a593Smuzhiyun rar_high &= ~IXGBE_RAH_VIND_MASK;
797*4882a593Smuzhiyun rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
798*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
799*4882a593Smuzhiyun return 0;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun /**
803*4882a593Smuzhiyun * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
804*4882a593Smuzhiyun * @hw: pointer to hardware struct
805*4882a593Smuzhiyun * @rar: receive address register index to associate with a VMDq index
806*4882a593Smuzhiyun * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
807*4882a593Smuzhiyun **/
ixgbe_clear_vmdq_82598(struct ixgbe_hw * hw,u32 rar,u32 vmdq)808*4882a593Smuzhiyun static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun u32 rar_high;
811*4882a593Smuzhiyun u32 rar_entries = hw->mac.num_rar_entries;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /* Make sure we are using a valid rar index range */
815*4882a593Smuzhiyun if (rar >= rar_entries) {
816*4882a593Smuzhiyun hw_dbg(hw, "RAR index %d is out of range.\n", rar);
817*4882a593Smuzhiyun return IXGBE_ERR_INVALID_ARGUMENT;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
821*4882a593Smuzhiyun if (rar_high & IXGBE_RAH_VIND_MASK) {
822*4882a593Smuzhiyun rar_high &= ~IXGBE_RAH_VIND_MASK;
823*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun return 0;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /**
830*4882a593Smuzhiyun * ixgbe_set_vfta_82598 - Set VLAN filter table
831*4882a593Smuzhiyun * @hw: pointer to hardware structure
832*4882a593Smuzhiyun * @vlan: VLAN id to write to VLAN filter
833*4882a593Smuzhiyun * @vind: VMDq output index that maps queue to VLAN id in VFTA
834*4882a593Smuzhiyun * @vlan_on: boolean flag to turn on/off VLAN in VFTA
835*4882a593Smuzhiyun * @vlvf_bypass: boolean flag - unused
836*4882a593Smuzhiyun *
837*4882a593Smuzhiyun * Turn on/off specified VLAN in the VLAN filter table.
838*4882a593Smuzhiyun **/
ixgbe_set_vfta_82598(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on,bool vlvf_bypass)839*4882a593Smuzhiyun static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
840*4882a593Smuzhiyun bool vlan_on, bool vlvf_bypass)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun u32 regindex;
843*4882a593Smuzhiyun u32 bitindex;
844*4882a593Smuzhiyun u32 bits;
845*4882a593Smuzhiyun u32 vftabyte;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (vlan > 4095)
848*4882a593Smuzhiyun return IXGBE_ERR_PARAM;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /* Determine 32-bit word position in array */
851*4882a593Smuzhiyun regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /* Determine the location of the (VMD) queue index */
854*4882a593Smuzhiyun vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
855*4882a593Smuzhiyun bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /* Set the nibble for VMD queue index */
858*4882a593Smuzhiyun bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
859*4882a593Smuzhiyun bits &= (~(0x0F << bitindex));
860*4882a593Smuzhiyun bits |= (vind << bitindex);
861*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* Determine the location of the bit for this VLAN id */
864*4882a593Smuzhiyun bitindex = vlan & 0x1F; /* lower five bits */
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
867*4882a593Smuzhiyun if (vlan_on)
868*4882a593Smuzhiyun /* Turn on this VLAN id */
869*4882a593Smuzhiyun bits |= BIT(bitindex);
870*4882a593Smuzhiyun else
871*4882a593Smuzhiyun /* Turn off this VLAN id */
872*4882a593Smuzhiyun bits &= ~BIT(bitindex);
873*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun return 0;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun /**
879*4882a593Smuzhiyun * ixgbe_clear_vfta_82598 - Clear VLAN filter table
880*4882a593Smuzhiyun * @hw: pointer to hardware structure
881*4882a593Smuzhiyun *
882*4882a593Smuzhiyun * Clears the VLAN filer table, and the VMDq index associated with the filter
883*4882a593Smuzhiyun **/
ixgbe_clear_vfta_82598(struct ixgbe_hw * hw)884*4882a593Smuzhiyun static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun u32 offset;
887*4882a593Smuzhiyun u32 vlanbyte;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun for (offset = 0; offset < hw->mac.vft_size; offset++)
890*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
893*4882a593Smuzhiyun for (offset = 0; offset < hw->mac.vft_size; offset++)
894*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
895*4882a593Smuzhiyun 0);
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun return 0;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun /**
901*4882a593Smuzhiyun * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
902*4882a593Smuzhiyun * @hw: pointer to hardware structure
903*4882a593Smuzhiyun * @reg: analog register to read
904*4882a593Smuzhiyun * @val: read value
905*4882a593Smuzhiyun *
906*4882a593Smuzhiyun * Performs read operation to Atlas analog register specified.
907*4882a593Smuzhiyun **/
ixgbe_read_analog_reg8_82598(struct ixgbe_hw * hw,u32 reg,u8 * val)908*4882a593Smuzhiyun static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun u32 atlas_ctl;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
913*4882a593Smuzhiyun IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
914*4882a593Smuzhiyun IXGBE_WRITE_FLUSH(hw);
915*4882a593Smuzhiyun udelay(10);
916*4882a593Smuzhiyun atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
917*4882a593Smuzhiyun *val = (u8)atlas_ctl;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun return 0;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun /**
923*4882a593Smuzhiyun * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
924*4882a593Smuzhiyun * @hw: pointer to hardware structure
925*4882a593Smuzhiyun * @reg: atlas register to write
926*4882a593Smuzhiyun * @val: value to write
927*4882a593Smuzhiyun *
928*4882a593Smuzhiyun * Performs write operation to Atlas analog register specified.
929*4882a593Smuzhiyun **/
ixgbe_write_analog_reg8_82598(struct ixgbe_hw * hw,u32 reg,u8 val)930*4882a593Smuzhiyun static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun u32 atlas_ctl;
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun atlas_ctl = (reg << 8) | val;
935*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
936*4882a593Smuzhiyun IXGBE_WRITE_FLUSH(hw);
937*4882a593Smuzhiyun udelay(10);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun return 0;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun /**
943*4882a593Smuzhiyun * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
944*4882a593Smuzhiyun * @hw: pointer to hardware structure
945*4882a593Smuzhiyun * @dev_addr: address to read from
946*4882a593Smuzhiyun * @byte_offset: byte offset to read from dev_addr
947*4882a593Smuzhiyun * @eeprom_data: value read
948*4882a593Smuzhiyun *
949*4882a593Smuzhiyun * Performs 8 byte read operation to SFP module's data over I2C interface.
950*4882a593Smuzhiyun **/
ixgbe_read_i2c_phy_82598(struct ixgbe_hw * hw,u8 dev_addr,u8 byte_offset,u8 * eeprom_data)951*4882a593Smuzhiyun static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
952*4882a593Smuzhiyun u8 byte_offset, u8 *eeprom_data)
953*4882a593Smuzhiyun {
954*4882a593Smuzhiyun s32 status = 0;
955*4882a593Smuzhiyun u16 sfp_addr = 0;
956*4882a593Smuzhiyun u16 sfp_data = 0;
957*4882a593Smuzhiyun u16 sfp_stat = 0;
958*4882a593Smuzhiyun u16 gssr;
959*4882a593Smuzhiyun u32 i;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
962*4882a593Smuzhiyun gssr = IXGBE_GSSR_PHY1_SM;
963*4882a593Smuzhiyun else
964*4882a593Smuzhiyun gssr = IXGBE_GSSR_PHY0_SM;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
967*4882a593Smuzhiyun return IXGBE_ERR_SWFW_SYNC;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun if (hw->phy.type == ixgbe_phy_nl) {
970*4882a593Smuzhiyun /*
971*4882a593Smuzhiyun * phy SDA/SCL registers are at addresses 0xC30A to
972*4882a593Smuzhiyun * 0xC30D. These registers are used to talk to the SFP+
973*4882a593Smuzhiyun * module's EEPROM through the SDA/SCL (I2C) interface.
974*4882a593Smuzhiyun */
975*4882a593Smuzhiyun sfp_addr = (dev_addr << 8) + byte_offset;
976*4882a593Smuzhiyun sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
977*4882a593Smuzhiyun hw->phy.ops.write_reg_mdi(hw,
978*4882a593Smuzhiyun IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
979*4882a593Smuzhiyun MDIO_MMD_PMAPMD,
980*4882a593Smuzhiyun sfp_addr);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun /* Poll status */
983*4882a593Smuzhiyun for (i = 0; i < 100; i++) {
984*4882a593Smuzhiyun hw->phy.ops.read_reg_mdi(hw,
985*4882a593Smuzhiyun IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
986*4882a593Smuzhiyun MDIO_MMD_PMAPMD,
987*4882a593Smuzhiyun &sfp_stat);
988*4882a593Smuzhiyun sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
989*4882a593Smuzhiyun if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
990*4882a593Smuzhiyun break;
991*4882a593Smuzhiyun usleep_range(10000, 20000);
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
995*4882a593Smuzhiyun hw_dbg(hw, "EEPROM read did not pass.\n");
996*4882a593Smuzhiyun status = IXGBE_ERR_SFP_NOT_PRESENT;
997*4882a593Smuzhiyun goto out;
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun /* Read data */
1001*4882a593Smuzhiyun hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1002*4882a593Smuzhiyun MDIO_MMD_PMAPMD, &sfp_data);
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun *eeprom_data = (u8)(sfp_data >> 8);
1005*4882a593Smuzhiyun } else {
1006*4882a593Smuzhiyun status = IXGBE_ERR_PHY;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun out:
1010*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, gssr);
1011*4882a593Smuzhiyun return status;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun /**
1015*4882a593Smuzhiyun * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1016*4882a593Smuzhiyun * @hw: pointer to hardware structure
1017*4882a593Smuzhiyun * @byte_offset: EEPROM byte offset to read
1018*4882a593Smuzhiyun * @eeprom_data: value read
1019*4882a593Smuzhiyun *
1020*4882a593Smuzhiyun * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1021*4882a593Smuzhiyun **/
ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw * hw,u8 byte_offset,u8 * eeprom_data)1022*4882a593Smuzhiyun static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1023*4882a593Smuzhiyun u8 *eeprom_data)
1024*4882a593Smuzhiyun {
1025*4882a593Smuzhiyun return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1026*4882a593Smuzhiyun byte_offset, eeprom_data);
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun /**
1030*4882a593Smuzhiyun * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1031*4882a593Smuzhiyun * @hw: pointer to hardware structure
1032*4882a593Smuzhiyun * @byte_offset: byte offset at address 0xA2
1033*4882a593Smuzhiyun * @sff8472_data: value read
1034*4882a593Smuzhiyun *
1035*4882a593Smuzhiyun * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1036*4882a593Smuzhiyun **/
ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw * hw,u8 byte_offset,u8 * sff8472_data)1037*4882a593Smuzhiyun static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1038*4882a593Smuzhiyun u8 *sff8472_data)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1041*4882a593Smuzhiyun byte_offset, sff8472_data);
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /**
1045*4882a593Smuzhiyun * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1046*4882a593Smuzhiyun * port devices.
1047*4882a593Smuzhiyun * @hw: pointer to the HW structure
1048*4882a593Smuzhiyun *
1049*4882a593Smuzhiyun * Calls common function and corrects issue with some single port devices
1050*4882a593Smuzhiyun * that enable LAN1 but not LAN0.
1051*4882a593Smuzhiyun **/
ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw * hw)1052*4882a593Smuzhiyun static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun struct ixgbe_bus_info *bus = &hw->bus;
1055*4882a593Smuzhiyun u16 pci_gen = 0;
1056*4882a593Smuzhiyun u16 pci_ctrl2 = 0;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun ixgbe_set_lan_id_multi_port_pcie(hw);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun /* check if LAN0 is disabled */
1061*4882a593Smuzhiyun hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1062*4882a593Smuzhiyun if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun /* if LAN0 is completely disabled force function to 0 */
1067*4882a593Smuzhiyun if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1068*4882a593Smuzhiyun !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1069*4882a593Smuzhiyun !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun bus->func = 0;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun /**
1077*4882a593Smuzhiyun * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1078*4882a593Smuzhiyun * @hw: pointer to hardware structure
1079*4882a593Smuzhiyun * @num_pb: number of packet buffers to allocate
1080*4882a593Smuzhiyun * @headroom: reserve n KB of headroom
1081*4882a593Smuzhiyun * @strategy: packet buffer allocation strategy
1082*4882a593Smuzhiyun **/
ixgbe_set_rxpba_82598(struct ixgbe_hw * hw,int num_pb,u32 headroom,int strategy)1083*4882a593Smuzhiyun static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1084*4882a593Smuzhiyun u32 headroom, int strategy)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1087*4882a593Smuzhiyun u8 i = 0;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun if (!num_pb)
1090*4882a593Smuzhiyun return;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun /* Setup Rx packet buffer sizes */
1093*4882a593Smuzhiyun switch (strategy) {
1094*4882a593Smuzhiyun case PBA_STRATEGY_WEIGHTED:
1095*4882a593Smuzhiyun /* Setup the first four at 80KB */
1096*4882a593Smuzhiyun rxpktsize = IXGBE_RXPBSIZE_80KB;
1097*4882a593Smuzhiyun for (; i < 4; i++)
1098*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1099*4882a593Smuzhiyun /* Setup the last four at 48KB...don't re-init i */
1100*4882a593Smuzhiyun rxpktsize = IXGBE_RXPBSIZE_48KB;
1101*4882a593Smuzhiyun fallthrough;
1102*4882a593Smuzhiyun case PBA_STRATEGY_EQUAL:
1103*4882a593Smuzhiyun default:
1104*4882a593Smuzhiyun /* Divide the remaining Rx packet buffer evenly among the TCs */
1105*4882a593Smuzhiyun for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1106*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1107*4882a593Smuzhiyun break;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun /* Setup Tx packet buffer sizes */
1111*4882a593Smuzhiyun for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1112*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun static const struct ixgbe_mac_operations mac_ops_82598 = {
1116*4882a593Smuzhiyun .init_hw = &ixgbe_init_hw_generic,
1117*4882a593Smuzhiyun .reset_hw = &ixgbe_reset_hw_82598,
1118*4882a593Smuzhiyun .start_hw = &ixgbe_start_hw_82598,
1119*4882a593Smuzhiyun .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
1120*4882a593Smuzhiyun .get_media_type = &ixgbe_get_media_type_82598,
1121*4882a593Smuzhiyun .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
1122*4882a593Smuzhiyun .get_mac_addr = &ixgbe_get_mac_addr_generic,
1123*4882a593Smuzhiyun .stop_adapter = &ixgbe_stop_adapter_generic,
1124*4882a593Smuzhiyun .get_bus_info = &ixgbe_get_bus_info_generic,
1125*4882a593Smuzhiyun .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598,
1126*4882a593Smuzhiyun .read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
1127*4882a593Smuzhiyun .write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
1128*4882a593Smuzhiyun .setup_link = &ixgbe_setup_mac_link_82598,
1129*4882a593Smuzhiyun .set_rxpba = &ixgbe_set_rxpba_82598,
1130*4882a593Smuzhiyun .check_link = &ixgbe_check_mac_link_82598,
1131*4882a593Smuzhiyun .get_link_capabilities = &ixgbe_get_link_capabilities_82598,
1132*4882a593Smuzhiyun .led_on = &ixgbe_led_on_generic,
1133*4882a593Smuzhiyun .led_off = &ixgbe_led_off_generic,
1134*4882a593Smuzhiyun .init_led_link_act = ixgbe_init_led_link_act_generic,
1135*4882a593Smuzhiyun .blink_led_start = &ixgbe_blink_led_start_generic,
1136*4882a593Smuzhiyun .blink_led_stop = &ixgbe_blink_led_stop_generic,
1137*4882a593Smuzhiyun .set_rar = &ixgbe_set_rar_generic,
1138*4882a593Smuzhiyun .clear_rar = &ixgbe_clear_rar_generic,
1139*4882a593Smuzhiyun .set_vmdq = &ixgbe_set_vmdq_82598,
1140*4882a593Smuzhiyun .clear_vmdq = &ixgbe_clear_vmdq_82598,
1141*4882a593Smuzhiyun .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
1142*4882a593Smuzhiyun .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
1143*4882a593Smuzhiyun .enable_mc = &ixgbe_enable_mc_generic,
1144*4882a593Smuzhiyun .disable_mc = &ixgbe_disable_mc_generic,
1145*4882a593Smuzhiyun .clear_vfta = &ixgbe_clear_vfta_82598,
1146*4882a593Smuzhiyun .set_vfta = &ixgbe_set_vfta_82598,
1147*4882a593Smuzhiyun .fc_enable = &ixgbe_fc_enable_82598,
1148*4882a593Smuzhiyun .setup_fc = ixgbe_setup_fc_generic,
1149*4882a593Smuzhiyun .fc_autoneg = ixgbe_fc_autoneg,
1150*4882a593Smuzhiyun .set_fw_drv_ver = NULL,
1151*4882a593Smuzhiyun .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1152*4882a593Smuzhiyun .release_swfw_sync = &ixgbe_release_swfw_sync,
1153*4882a593Smuzhiyun .init_swfw_sync = NULL,
1154*4882a593Smuzhiyun .get_thermal_sensor_data = NULL,
1155*4882a593Smuzhiyun .init_thermal_sensor_thresh = NULL,
1156*4882a593Smuzhiyun .prot_autoc_read = &prot_autoc_read_generic,
1157*4882a593Smuzhiyun .prot_autoc_write = &prot_autoc_write_generic,
1158*4882a593Smuzhiyun .enable_rx = &ixgbe_enable_rx_generic,
1159*4882a593Smuzhiyun .disable_rx = &ixgbe_disable_rx_generic,
1160*4882a593Smuzhiyun };
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1163*4882a593Smuzhiyun .init_params = &ixgbe_init_eeprom_params_generic,
1164*4882a593Smuzhiyun .read = &ixgbe_read_eerd_generic,
1165*4882a593Smuzhiyun .write = &ixgbe_write_eeprom_generic,
1166*4882a593Smuzhiyun .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
1167*4882a593Smuzhiyun .read_buffer = &ixgbe_read_eerd_buffer_generic,
1168*4882a593Smuzhiyun .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
1169*4882a593Smuzhiyun .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1170*4882a593Smuzhiyun .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1171*4882a593Smuzhiyun };
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun static const struct ixgbe_phy_operations phy_ops_82598 = {
1174*4882a593Smuzhiyun .identify = &ixgbe_identify_phy_generic,
1175*4882a593Smuzhiyun .identify_sfp = &ixgbe_identify_module_generic,
1176*4882a593Smuzhiyun .init = &ixgbe_init_phy_ops_82598,
1177*4882a593Smuzhiyun .reset = &ixgbe_reset_phy_generic,
1178*4882a593Smuzhiyun .read_reg = &ixgbe_read_phy_reg_generic,
1179*4882a593Smuzhiyun .write_reg = &ixgbe_write_phy_reg_generic,
1180*4882a593Smuzhiyun .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
1181*4882a593Smuzhiyun .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
1182*4882a593Smuzhiyun .setup_link = &ixgbe_setup_phy_link_generic,
1183*4882a593Smuzhiyun .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
1184*4882a593Smuzhiyun .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
1185*4882a593Smuzhiyun .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
1186*4882a593Smuzhiyun .check_overtemp = &ixgbe_tn_check_overtemp,
1187*4882a593Smuzhiyun };
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun const struct ixgbe_info ixgbe_82598_info = {
1190*4882a593Smuzhiyun .mac = ixgbe_mac_82598EB,
1191*4882a593Smuzhiyun .get_invariants = &ixgbe_get_invariants_82598,
1192*4882a593Smuzhiyun .mac_ops = &mac_ops_82598,
1193*4882a593Smuzhiyun .eeprom_ops = &eeprom_ops_82598,
1194*4882a593Smuzhiyun .phy_ops = &phy_ops_82598,
1195*4882a593Smuzhiyun .mvals = ixgbe_mvals_8259X,
1196*4882a593Smuzhiyun };
1197