1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2018 Intel Corporation */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/delay.h>
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include "igc_hw.h"
7*4882a593Smuzhiyun #include "igc_i225.h"
8*4882a593Smuzhiyun #include "igc_mac.h"
9*4882a593Smuzhiyun #include "igc_base.h"
10*4882a593Smuzhiyun #include "igc.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /**
13*4882a593Smuzhiyun * igc_reset_hw_base - Reset hardware
14*4882a593Smuzhiyun * @hw: pointer to the HW structure
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * This resets the hardware into a known state. This is a
17*4882a593Smuzhiyun * function pointer entry point called by the api module.
18*4882a593Smuzhiyun */
igc_reset_hw_base(struct igc_hw * hw)19*4882a593Smuzhiyun static s32 igc_reset_hw_base(struct igc_hw *hw)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun s32 ret_val;
22*4882a593Smuzhiyun u32 ctrl;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* Prevent the PCI-E bus from sticking if there is no TLP connection
25*4882a593Smuzhiyun * on the last TLP read/write transaction when MAC is reset.
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun ret_val = igc_disable_pcie_master(hw);
28*4882a593Smuzhiyun if (ret_val)
29*4882a593Smuzhiyun hw_dbg("PCI-E Master disable polling has failed\n");
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun hw_dbg("Masking off all interrupts\n");
32*4882a593Smuzhiyun wr32(IGC_IMC, 0xffffffff);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun wr32(IGC_RCTL, 0);
35*4882a593Smuzhiyun wr32(IGC_TCTL, IGC_TCTL_PSP);
36*4882a593Smuzhiyun wrfl();
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun usleep_range(10000, 20000);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun ctrl = rd32(IGC_CTRL);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun hw_dbg("Issuing a global reset to MAC\n");
43*4882a593Smuzhiyun wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun ret_val = igc_get_auto_rd_done(hw);
46*4882a593Smuzhiyun if (ret_val) {
47*4882a593Smuzhiyun /* When auto config read does not complete, do not
48*4882a593Smuzhiyun * return with an error. This can happen in situations
49*4882a593Smuzhiyun * where there is no eeprom and prevents getting link.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun hw_dbg("Auto Read Done did not complete\n");
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Clear any pending interrupt events. */
55*4882a593Smuzhiyun wr32(IGC_IMC, 0xffffffff);
56*4882a593Smuzhiyun rd32(IGC_ICR);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun return ret_val;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /**
62*4882a593Smuzhiyun * igc_init_nvm_params_base - Init NVM func ptrs.
63*4882a593Smuzhiyun * @hw: pointer to the HW structure
64*4882a593Smuzhiyun */
igc_init_nvm_params_base(struct igc_hw * hw)65*4882a593Smuzhiyun static s32 igc_init_nvm_params_base(struct igc_hw *hw)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct igc_nvm_info *nvm = &hw->nvm;
68*4882a593Smuzhiyun u32 eecd = rd32(IGC_EECD);
69*4882a593Smuzhiyun u16 size;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun size = (u16)((eecd & IGC_EECD_SIZE_EX_MASK) >>
72*4882a593Smuzhiyun IGC_EECD_SIZE_EX_SHIFT);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Added to a constant, "size" becomes the left-shift value
75*4882a593Smuzhiyun * for setting word_size.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun size += NVM_WORD_SIZE_BASE_SHIFT;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* Just in case size is out of range, cap it to the largest
80*4882a593Smuzhiyun * EEPROM size supported
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun if (size > 15)
83*4882a593Smuzhiyun size = 15;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun nvm->type = igc_nvm_eeprom_spi;
86*4882a593Smuzhiyun nvm->word_size = BIT(size);
87*4882a593Smuzhiyun nvm->opcode_bits = 8;
88*4882a593Smuzhiyun nvm->delay_usec = 1;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8;
91*4882a593Smuzhiyun nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ?
92*4882a593Smuzhiyun 16 : 8;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (nvm->word_size == BIT(15))
95*4882a593Smuzhiyun nvm->page_size = 128;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun * igc_setup_copper_link_base - Configure copper link settings
102*4882a593Smuzhiyun * @hw: pointer to the HW structure
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun * Configures the link for auto-neg or forced speed and duplex. Then we check
105*4882a593Smuzhiyun * for link, once link is established calls to configure collision distance
106*4882a593Smuzhiyun * and flow control are called.
107*4882a593Smuzhiyun */
igc_setup_copper_link_base(struct igc_hw * hw)108*4882a593Smuzhiyun static s32 igc_setup_copper_link_base(struct igc_hw *hw)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun s32 ret_val = 0;
111*4882a593Smuzhiyun u32 ctrl;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun ctrl = rd32(IGC_CTRL);
114*4882a593Smuzhiyun ctrl |= IGC_CTRL_SLU;
115*4882a593Smuzhiyun ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX);
116*4882a593Smuzhiyun wr32(IGC_CTRL, ctrl);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun ret_val = igc_setup_copper_link(hw);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun return ret_val;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /**
124*4882a593Smuzhiyun * igc_init_mac_params_base - Init MAC func ptrs.
125*4882a593Smuzhiyun * @hw: pointer to the HW structure
126*4882a593Smuzhiyun */
igc_init_mac_params_base(struct igc_hw * hw)127*4882a593Smuzhiyun static s32 igc_init_mac_params_base(struct igc_hw *hw)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base;
130*4882a593Smuzhiyun struct igc_mac_info *mac = &hw->mac;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* Set mta register count */
133*4882a593Smuzhiyun mac->mta_reg_count = 128;
134*4882a593Smuzhiyun mac->rar_entry_count = IGC_RAR_ENTRIES;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* reset */
137*4882a593Smuzhiyun mac->ops.reset_hw = igc_reset_hw_base;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225;
140*4882a593Smuzhiyun mac->ops.release_swfw_sync = igc_release_swfw_sync_i225;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* Allow a single clear of the SW semaphore on I225 */
143*4882a593Smuzhiyun if (mac->type == igc_i225)
144*4882a593Smuzhiyun dev_spec->clear_semaphore_once = true;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* physical interface link setup */
147*4882a593Smuzhiyun mac->ops.setup_physical_interface = igc_setup_copper_link_base;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun return 0;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /**
153*4882a593Smuzhiyun * igc_init_phy_params_base - Init PHY func ptrs.
154*4882a593Smuzhiyun * @hw: pointer to the HW structure
155*4882a593Smuzhiyun */
igc_init_phy_params_base(struct igc_hw * hw)156*4882a593Smuzhiyun static s32 igc_init_phy_params_base(struct igc_hw *hw)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun struct igc_phy_info *phy = &hw->phy;
159*4882a593Smuzhiyun s32 ret_val = 0;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (hw->phy.media_type != igc_media_type_copper) {
162*4882a593Smuzhiyun phy->type = igc_phy_none;
163*4882a593Smuzhiyun goto out;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
167*4882a593Smuzhiyun phy->reset_delay_us = 100;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* set lan id */
170*4882a593Smuzhiyun hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
171*4882a593Smuzhiyun IGC_STATUS_FUNC_SHIFT;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /* Make sure the PHY is in a good state. Several people have reported
174*4882a593Smuzhiyun * firmware leaving the PHY's page select register set to something
175*4882a593Smuzhiyun * other than the default of zero, which causes the PHY ID read to
176*4882a593Smuzhiyun * access something other than the intended register.
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun ret_val = hw->phy.ops.reset(hw);
179*4882a593Smuzhiyun if (ret_val) {
180*4882a593Smuzhiyun hw_dbg("Error resetting the PHY\n");
181*4882a593Smuzhiyun goto out;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun ret_val = igc_get_phy_id(hw);
185*4882a593Smuzhiyun if (ret_val)
186*4882a593Smuzhiyun return ret_val;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun igc_check_for_copper_link(hw);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun phy->type = igc_phy_i225;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun out:
193*4882a593Smuzhiyun return ret_val;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
igc_get_invariants_base(struct igc_hw * hw)196*4882a593Smuzhiyun static s32 igc_get_invariants_base(struct igc_hw *hw)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun struct igc_mac_info *mac = &hw->mac;
199*4882a593Smuzhiyun s32 ret_val = 0;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun switch (hw->device_id) {
202*4882a593Smuzhiyun case IGC_DEV_ID_I225_LM:
203*4882a593Smuzhiyun case IGC_DEV_ID_I225_V:
204*4882a593Smuzhiyun case IGC_DEV_ID_I225_I:
205*4882a593Smuzhiyun case IGC_DEV_ID_I220_V:
206*4882a593Smuzhiyun case IGC_DEV_ID_I225_K:
207*4882a593Smuzhiyun case IGC_DEV_ID_I225_K2:
208*4882a593Smuzhiyun case IGC_DEV_ID_I225_LMVP:
209*4882a593Smuzhiyun case IGC_DEV_ID_I225_IT:
210*4882a593Smuzhiyun case IGC_DEV_ID_I226_LM:
211*4882a593Smuzhiyun case IGC_DEV_ID_I226_V:
212*4882a593Smuzhiyun case IGC_DEV_ID_I226_IT:
213*4882a593Smuzhiyun case IGC_DEV_ID_I221_V:
214*4882a593Smuzhiyun case IGC_DEV_ID_I226_BLANK_NVM:
215*4882a593Smuzhiyun case IGC_DEV_ID_I225_BLANK_NVM:
216*4882a593Smuzhiyun mac->type = igc_i225;
217*4882a593Smuzhiyun break;
218*4882a593Smuzhiyun default:
219*4882a593Smuzhiyun return -IGC_ERR_MAC_INIT;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun hw->phy.media_type = igc_media_type_copper;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* mac initialization and operations */
225*4882a593Smuzhiyun ret_val = igc_init_mac_params_base(hw);
226*4882a593Smuzhiyun if (ret_val)
227*4882a593Smuzhiyun goto out;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* NVM initialization */
230*4882a593Smuzhiyun ret_val = igc_init_nvm_params_base(hw);
231*4882a593Smuzhiyun switch (hw->mac.type) {
232*4882a593Smuzhiyun case igc_i225:
233*4882a593Smuzhiyun ret_val = igc_init_nvm_params_i225(hw);
234*4882a593Smuzhiyun break;
235*4882a593Smuzhiyun default:
236*4882a593Smuzhiyun break;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* setup PHY parameters */
240*4882a593Smuzhiyun ret_val = igc_init_phy_params_base(hw);
241*4882a593Smuzhiyun if (ret_val)
242*4882a593Smuzhiyun goto out;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun out:
245*4882a593Smuzhiyun return ret_val;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /**
249*4882a593Smuzhiyun * igc_acquire_phy_base - Acquire rights to access PHY
250*4882a593Smuzhiyun * @hw: pointer to the HW structure
251*4882a593Smuzhiyun *
252*4882a593Smuzhiyun * Acquire access rights to the correct PHY. This is a
253*4882a593Smuzhiyun * function pointer entry point called by the api module.
254*4882a593Smuzhiyun */
igc_acquire_phy_base(struct igc_hw * hw)255*4882a593Smuzhiyun static s32 igc_acquire_phy_base(struct igc_hw *hw)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun u16 mask = IGC_SWFW_PHY0_SM;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return hw->mac.ops.acquire_swfw_sync(hw, mask);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /**
263*4882a593Smuzhiyun * igc_release_phy_base - Release rights to access PHY
264*4882a593Smuzhiyun * @hw: pointer to the HW structure
265*4882a593Smuzhiyun *
266*4882a593Smuzhiyun * A wrapper to release access rights to the correct PHY. This is a
267*4882a593Smuzhiyun * function pointer entry point called by the api module.
268*4882a593Smuzhiyun */
igc_release_phy_base(struct igc_hw * hw)269*4882a593Smuzhiyun static void igc_release_phy_base(struct igc_hw *hw)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun u16 mask = IGC_SWFW_PHY0_SM;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, mask);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun * igc_init_hw_base - Initialize hardware
278*4882a593Smuzhiyun * @hw: pointer to the HW structure
279*4882a593Smuzhiyun *
280*4882a593Smuzhiyun * This inits the hardware readying it for operation.
281*4882a593Smuzhiyun */
igc_init_hw_base(struct igc_hw * hw)282*4882a593Smuzhiyun static s32 igc_init_hw_base(struct igc_hw *hw)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun struct igc_mac_info *mac = &hw->mac;
285*4882a593Smuzhiyun u16 i, rar_count = mac->rar_entry_count;
286*4882a593Smuzhiyun s32 ret_val = 0;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* Setup the receive address */
289*4882a593Smuzhiyun igc_init_rx_addrs(hw, rar_count);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* Zero out the Multicast HASH table */
292*4882a593Smuzhiyun hw_dbg("Zeroing the MTA\n");
293*4882a593Smuzhiyun for (i = 0; i < mac->mta_reg_count; i++)
294*4882a593Smuzhiyun array_wr32(IGC_MTA, i, 0);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* Zero out the Unicast HASH table */
297*4882a593Smuzhiyun hw_dbg("Zeroing the UTA\n");
298*4882a593Smuzhiyun for (i = 0; i < mac->uta_reg_count; i++)
299*4882a593Smuzhiyun array_wr32(IGC_UTA, i, 0);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* Setup link and flow control */
302*4882a593Smuzhiyun ret_val = igc_setup_link(hw);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* Clear all of the statistics registers (clear on read). It is
305*4882a593Smuzhiyun * important that we do this after we have tried to establish link
306*4882a593Smuzhiyun * because the symbol error count will increment wildly if there
307*4882a593Smuzhiyun * is no link.
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun igc_clear_hw_cntrs_base(hw);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun return ret_val;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /**
315*4882a593Smuzhiyun * igc_power_down_phy_copper_base - Remove link during PHY power down
316*4882a593Smuzhiyun * @hw: pointer to the HW structure
317*4882a593Smuzhiyun *
318*4882a593Smuzhiyun * In the case of a PHY power down to save power, or to turn off link during a
319*4882a593Smuzhiyun * driver unload, or wake on lan is not enabled, remove the link.
320*4882a593Smuzhiyun */
igc_power_down_phy_copper_base(struct igc_hw * hw)321*4882a593Smuzhiyun void igc_power_down_phy_copper_base(struct igc_hw *hw)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun /* If the management interface is not enabled, then power down */
324*4882a593Smuzhiyun if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw)))
325*4882a593Smuzhiyun igc_power_down_phy_copper(hw);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable
330*4882a593Smuzhiyun * @hw: pointer to the HW structure
331*4882a593Smuzhiyun *
332*4882a593Smuzhiyun * After Rx enable, if manageability is enabled then there is likely some
333*4882a593Smuzhiyun * bad data at the start of the fifo and possibly in the DMA fifo. This
334*4882a593Smuzhiyun * function clears the fifos and flushes any packets that came in as rx was
335*4882a593Smuzhiyun * being enabled.
336*4882a593Smuzhiyun */
igc_rx_fifo_flush_base(struct igc_hw * hw)337*4882a593Smuzhiyun void igc_rx_fifo_flush_base(struct igc_hw *hw)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
340*4882a593Smuzhiyun int i, ms_wait;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* disable IPv6 options as per hardware errata */
343*4882a593Smuzhiyun rfctl = rd32(IGC_RFCTL);
344*4882a593Smuzhiyun rfctl |= IGC_RFCTL_IPV6_EX_DIS;
345*4882a593Smuzhiyun wr32(IGC_RFCTL, rfctl);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN))
348*4882a593Smuzhiyun return;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* Disable all Rx queues */
351*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
352*4882a593Smuzhiyun rxdctl[i] = rd32(IGC_RXDCTL(i));
353*4882a593Smuzhiyun wr32(IGC_RXDCTL(i),
354*4882a593Smuzhiyun rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun /* Poll all queues to verify they have shut down */
357*4882a593Smuzhiyun for (ms_wait = 0; ms_wait < 10; ms_wait++) {
358*4882a593Smuzhiyun usleep_range(1000, 2000);
359*4882a593Smuzhiyun rx_enabled = 0;
360*4882a593Smuzhiyun for (i = 0; i < 4; i++)
361*4882a593Smuzhiyun rx_enabled |= rd32(IGC_RXDCTL(i));
362*4882a593Smuzhiyun if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE))
363*4882a593Smuzhiyun break;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (ms_wait == 10)
367*4882a593Smuzhiyun hw_dbg("Queue disable timed out after 10ms\n");
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
370*4882a593Smuzhiyun * incoming packets are rejected. Set enable and wait 2ms so that
371*4882a593Smuzhiyun * any packet that was coming in as RCTL.EN was set is flushed
372*4882a593Smuzhiyun */
373*4882a593Smuzhiyun wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun rlpml = rd32(IGC_RLPML);
376*4882a593Smuzhiyun wr32(IGC_RLPML, 0);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun rctl = rd32(IGC_RCTL);
379*4882a593Smuzhiyun temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP);
380*4882a593Smuzhiyun temp_rctl |= IGC_RCTL_LPE;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun wr32(IGC_RCTL, temp_rctl);
383*4882a593Smuzhiyun wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN);
384*4882a593Smuzhiyun wrfl();
385*4882a593Smuzhiyun usleep_range(2000, 3000);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* Enable Rx queues that were previously enabled and restore our
388*4882a593Smuzhiyun * previous state
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun for (i = 0; i < 4; i++)
391*4882a593Smuzhiyun wr32(IGC_RXDCTL(i), rxdctl[i]);
392*4882a593Smuzhiyun wr32(IGC_RCTL, rctl);
393*4882a593Smuzhiyun wrfl();
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun wr32(IGC_RLPML, rlpml);
396*4882a593Smuzhiyun wr32(IGC_RFCTL, rfctl);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* Flush receive errors generated by workaround */
399*4882a593Smuzhiyun rd32(IGC_ROC);
400*4882a593Smuzhiyun rd32(IGC_RNBC);
401*4882a593Smuzhiyun rd32(IGC_MPC);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun static struct igc_mac_operations igc_mac_ops_base = {
405*4882a593Smuzhiyun .init_hw = igc_init_hw_base,
406*4882a593Smuzhiyun .check_for_link = igc_check_for_copper_link,
407*4882a593Smuzhiyun .rar_set = igc_rar_set,
408*4882a593Smuzhiyun .read_mac_addr = igc_read_mac_addr,
409*4882a593Smuzhiyun .get_speed_and_duplex = igc_get_speed_and_duplex_copper,
410*4882a593Smuzhiyun };
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun static const struct igc_phy_operations igc_phy_ops_base = {
413*4882a593Smuzhiyun .acquire = igc_acquire_phy_base,
414*4882a593Smuzhiyun .release = igc_release_phy_base,
415*4882a593Smuzhiyun .reset = igc_phy_hw_reset,
416*4882a593Smuzhiyun .read_reg = igc_read_phy_reg_gpy,
417*4882a593Smuzhiyun .write_reg = igc_write_phy_reg_gpy,
418*4882a593Smuzhiyun };
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun const struct igc_info igc_base_info = {
421*4882a593Smuzhiyun .get_invariants = igc_get_invariants_base,
422*4882a593Smuzhiyun .mac_ops = &igc_mac_ops_base,
423*4882a593Smuzhiyun .phy_ops = &igc_phy_ops_base,
424*4882a593Smuzhiyun };
425