1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2018 Intel Corporation */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/pci.h>
5*4882a593Smuzhiyun #include <linux/delay.h>
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include "igc_mac.h"
8*4882a593Smuzhiyun #include "igc_hw.h"
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /**
11*4882a593Smuzhiyun * igc_disable_pcie_master - Disables PCI-express master access
12*4882a593Smuzhiyun * @hw: pointer to the HW structure
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Returns 0 (0) if successful, else returns -10
15*4882a593Smuzhiyun * (-IGC_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
16*4882a593Smuzhiyun * the master requests to be disabled.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * Disables PCI-Express master access and verifies there are no pending
19*4882a593Smuzhiyun * requests.
20*4882a593Smuzhiyun */
igc_disable_pcie_master(struct igc_hw * hw)21*4882a593Smuzhiyun s32 igc_disable_pcie_master(struct igc_hw *hw)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun s32 timeout = MASTER_DISABLE_TIMEOUT;
24*4882a593Smuzhiyun s32 ret_val = 0;
25*4882a593Smuzhiyun u32 ctrl;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun ctrl = rd32(IGC_CTRL);
28*4882a593Smuzhiyun ctrl |= IGC_CTRL_GIO_MASTER_DISABLE;
29*4882a593Smuzhiyun wr32(IGC_CTRL, ctrl);
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun while (timeout) {
32*4882a593Smuzhiyun if (!(rd32(IGC_STATUS) &
33*4882a593Smuzhiyun IGC_STATUS_GIO_MASTER_ENABLE))
34*4882a593Smuzhiyun break;
35*4882a593Smuzhiyun usleep_range(2000, 3000);
36*4882a593Smuzhiyun timeout--;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun if (!timeout) {
40*4882a593Smuzhiyun hw_dbg("Master requests are pending.\n");
41*4882a593Smuzhiyun ret_val = -IGC_ERR_MASTER_REQUESTS_PENDING;
42*4882a593Smuzhiyun goto out;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun out:
46*4882a593Smuzhiyun return ret_val;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /**
50*4882a593Smuzhiyun * igc_init_rx_addrs - Initialize receive addresses
51*4882a593Smuzhiyun * @hw: pointer to the HW structure
52*4882a593Smuzhiyun * @rar_count: receive address registers
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * Setup the receive address registers by setting the base receive address
55*4882a593Smuzhiyun * register to the devices MAC address and clearing all the other receive
56*4882a593Smuzhiyun * address registers to 0.
57*4882a593Smuzhiyun */
igc_init_rx_addrs(struct igc_hw * hw,u16 rar_count)58*4882a593Smuzhiyun void igc_init_rx_addrs(struct igc_hw *hw, u16 rar_count)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun u8 mac_addr[ETH_ALEN] = {0};
61*4882a593Smuzhiyun u32 i;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* Setup the receive address */
64*4882a593Smuzhiyun hw_dbg("Programming MAC Address into RAR[0]\n");
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* Zero out the other (rar_entry_count - 1) receive addresses */
69*4882a593Smuzhiyun hw_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
70*4882a593Smuzhiyun for (i = 1; i < rar_count; i++)
71*4882a593Smuzhiyun hw->mac.ops.rar_set(hw, mac_addr, i);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun * igc_set_fc_watermarks - Set flow control high/low watermarks
76*4882a593Smuzhiyun * @hw: pointer to the HW structure
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * Sets the flow control high/low threshold (watermark) registers. If
79*4882a593Smuzhiyun * flow control XON frame transmission is enabled, then set XON frame
80*4882a593Smuzhiyun * transmission as well.
81*4882a593Smuzhiyun */
igc_set_fc_watermarks(struct igc_hw * hw)82*4882a593Smuzhiyun static s32 igc_set_fc_watermarks(struct igc_hw *hw)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun u32 fcrtl = 0, fcrth = 0;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* Set the flow control receive threshold registers. Normally,
87*4882a593Smuzhiyun * these registers will be set to a default threshold that may be
88*4882a593Smuzhiyun * adjusted later by the driver's runtime code. However, if the
89*4882a593Smuzhiyun * ability to transmit pause frames is not enabled, then these
90*4882a593Smuzhiyun * registers will be set to 0.
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun if (hw->fc.current_mode & igc_fc_tx_pause) {
93*4882a593Smuzhiyun /* We need to set up the Receive Threshold high and low water
94*4882a593Smuzhiyun * marks as well as (optionally) enabling the transmission of
95*4882a593Smuzhiyun * XON frames.
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun fcrtl = hw->fc.low_water;
98*4882a593Smuzhiyun if (hw->fc.send_xon)
99*4882a593Smuzhiyun fcrtl |= IGC_FCRTL_XONE;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun fcrth = hw->fc.high_water;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun wr32(IGC_FCRTL, fcrtl);
104*4882a593Smuzhiyun wr32(IGC_FCRTH, fcrth);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun return 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun * igc_setup_link - Setup flow control and link settings
111*4882a593Smuzhiyun * @hw: pointer to the HW structure
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * Determines which flow control settings to use, then configures flow
114*4882a593Smuzhiyun * control. Calls the appropriate media-specific link configuration
115*4882a593Smuzhiyun * function. Assuming the adapter has a valid link partner, a valid link
116*4882a593Smuzhiyun * should be established. Assumes the hardware has previously been reset
117*4882a593Smuzhiyun * and the transmitter and receiver are not enabled.
118*4882a593Smuzhiyun */
igc_setup_link(struct igc_hw * hw)119*4882a593Smuzhiyun s32 igc_setup_link(struct igc_hw *hw)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun s32 ret_val = 0;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* In the case of the phy reset being blocked, we already have a link.
124*4882a593Smuzhiyun * We do not need to set it up again.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun if (igc_check_reset_block(hw))
127*4882a593Smuzhiyun goto out;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* If requested flow control is set to default, set flow control
130*4882a593Smuzhiyun * to the both 'rx' and 'tx' pause frames.
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun if (hw->fc.requested_mode == igc_fc_default)
133*4882a593Smuzhiyun hw->fc.requested_mode = igc_fc_full;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* We want to save off the original Flow Control configuration just
136*4882a593Smuzhiyun * in case we get disconnected and then reconnected into a different
137*4882a593Smuzhiyun * hub or switch with different Flow Control capabilities.
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun hw->fc.current_mode = hw->fc.requested_mode;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* Call the necessary media_type subroutine to configure the link. */
144*4882a593Smuzhiyun ret_val = hw->mac.ops.setup_physical_interface(hw);
145*4882a593Smuzhiyun if (ret_val)
146*4882a593Smuzhiyun goto out;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /* Initialize the flow control address, type, and PAUSE timer
149*4882a593Smuzhiyun * registers to their default values. This is done even if flow
150*4882a593Smuzhiyun * control is disabled, because it does not hurt anything to
151*4882a593Smuzhiyun * initialize these registers.
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun hw_dbg("Initializing the Flow Control address, type and timer regs\n");
154*4882a593Smuzhiyun wr32(IGC_FCT, FLOW_CONTROL_TYPE);
155*4882a593Smuzhiyun wr32(IGC_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
156*4882a593Smuzhiyun wr32(IGC_FCAL, FLOW_CONTROL_ADDRESS_LOW);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun wr32(IGC_FCTTV, hw->fc.pause_time);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun ret_val = igc_set_fc_watermarks(hw);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun out:
163*4882a593Smuzhiyun return ret_val;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun * igc_force_mac_fc - Force the MAC's flow control settings
168*4882a593Smuzhiyun * @hw: pointer to the HW structure
169*4882a593Smuzhiyun *
170*4882a593Smuzhiyun * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
171*4882a593Smuzhiyun * device control register to reflect the adapter settings. TFCE and RFCE
172*4882a593Smuzhiyun * need to be explicitly set by software when a copper PHY is used because
173*4882a593Smuzhiyun * autonegotiation is managed by the PHY rather than the MAC. Software must
174*4882a593Smuzhiyun * also configure these bits when link is forced on a fiber connection.
175*4882a593Smuzhiyun */
igc_force_mac_fc(struct igc_hw * hw)176*4882a593Smuzhiyun s32 igc_force_mac_fc(struct igc_hw *hw)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun s32 ret_val = 0;
179*4882a593Smuzhiyun u32 ctrl;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun ctrl = rd32(IGC_CTRL);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* Because we didn't get link via the internal auto-negotiation
184*4882a593Smuzhiyun * mechanism (we either forced link or we got link via PHY
185*4882a593Smuzhiyun * auto-neg), we have to manually enable/disable transmit an
186*4882a593Smuzhiyun * receive flow control.
187*4882a593Smuzhiyun *
188*4882a593Smuzhiyun * The "Case" statement below enables/disable flow control
189*4882a593Smuzhiyun * according to the "hw->fc.current_mode" parameter.
190*4882a593Smuzhiyun *
191*4882a593Smuzhiyun * The possible values of the "fc" parameter are:
192*4882a593Smuzhiyun * 0: Flow control is completely disabled
193*4882a593Smuzhiyun * 1: Rx flow control is enabled (we can receive pause
194*4882a593Smuzhiyun * frames but not send pause frames).
195*4882a593Smuzhiyun * 2: Tx flow control is enabled (we can send pause frames
196*4882a593Smuzhiyun * frames but we do not receive pause frames).
197*4882a593Smuzhiyun * 3: Both Rx and TX flow control (symmetric) is enabled.
198*4882a593Smuzhiyun * other: No other values should be possible at this point.
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun switch (hw->fc.current_mode) {
203*4882a593Smuzhiyun case igc_fc_none:
204*4882a593Smuzhiyun ctrl &= (~(IGC_CTRL_TFCE | IGC_CTRL_RFCE));
205*4882a593Smuzhiyun break;
206*4882a593Smuzhiyun case igc_fc_rx_pause:
207*4882a593Smuzhiyun ctrl &= (~IGC_CTRL_TFCE);
208*4882a593Smuzhiyun ctrl |= IGC_CTRL_RFCE;
209*4882a593Smuzhiyun break;
210*4882a593Smuzhiyun case igc_fc_tx_pause:
211*4882a593Smuzhiyun ctrl &= (~IGC_CTRL_RFCE);
212*4882a593Smuzhiyun ctrl |= IGC_CTRL_TFCE;
213*4882a593Smuzhiyun break;
214*4882a593Smuzhiyun case igc_fc_full:
215*4882a593Smuzhiyun ctrl |= (IGC_CTRL_TFCE | IGC_CTRL_RFCE);
216*4882a593Smuzhiyun break;
217*4882a593Smuzhiyun default:
218*4882a593Smuzhiyun hw_dbg("Flow control param set incorrectly\n");
219*4882a593Smuzhiyun ret_val = -IGC_ERR_CONFIG;
220*4882a593Smuzhiyun goto out;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun wr32(IGC_CTRL, ctrl);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun out:
226*4882a593Smuzhiyun return ret_val;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /**
230*4882a593Smuzhiyun * igc_clear_hw_cntrs_base - Clear base hardware counters
231*4882a593Smuzhiyun * @hw: pointer to the HW structure
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * Clears the base hardware counters by reading the counter registers.
234*4882a593Smuzhiyun */
igc_clear_hw_cntrs_base(struct igc_hw * hw)235*4882a593Smuzhiyun void igc_clear_hw_cntrs_base(struct igc_hw *hw)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun rd32(IGC_CRCERRS);
238*4882a593Smuzhiyun rd32(IGC_MPC);
239*4882a593Smuzhiyun rd32(IGC_SCC);
240*4882a593Smuzhiyun rd32(IGC_ECOL);
241*4882a593Smuzhiyun rd32(IGC_MCC);
242*4882a593Smuzhiyun rd32(IGC_LATECOL);
243*4882a593Smuzhiyun rd32(IGC_COLC);
244*4882a593Smuzhiyun rd32(IGC_RERC);
245*4882a593Smuzhiyun rd32(IGC_DC);
246*4882a593Smuzhiyun rd32(IGC_RLEC);
247*4882a593Smuzhiyun rd32(IGC_XONRXC);
248*4882a593Smuzhiyun rd32(IGC_XONTXC);
249*4882a593Smuzhiyun rd32(IGC_XOFFRXC);
250*4882a593Smuzhiyun rd32(IGC_XOFFTXC);
251*4882a593Smuzhiyun rd32(IGC_FCRUC);
252*4882a593Smuzhiyun rd32(IGC_GPRC);
253*4882a593Smuzhiyun rd32(IGC_BPRC);
254*4882a593Smuzhiyun rd32(IGC_MPRC);
255*4882a593Smuzhiyun rd32(IGC_GPTC);
256*4882a593Smuzhiyun rd32(IGC_GORCL);
257*4882a593Smuzhiyun rd32(IGC_GORCH);
258*4882a593Smuzhiyun rd32(IGC_GOTCL);
259*4882a593Smuzhiyun rd32(IGC_GOTCH);
260*4882a593Smuzhiyun rd32(IGC_RNBC);
261*4882a593Smuzhiyun rd32(IGC_RUC);
262*4882a593Smuzhiyun rd32(IGC_RFC);
263*4882a593Smuzhiyun rd32(IGC_ROC);
264*4882a593Smuzhiyun rd32(IGC_RJC);
265*4882a593Smuzhiyun rd32(IGC_TORL);
266*4882a593Smuzhiyun rd32(IGC_TORH);
267*4882a593Smuzhiyun rd32(IGC_TOTL);
268*4882a593Smuzhiyun rd32(IGC_TOTH);
269*4882a593Smuzhiyun rd32(IGC_TPR);
270*4882a593Smuzhiyun rd32(IGC_TPT);
271*4882a593Smuzhiyun rd32(IGC_MPTC);
272*4882a593Smuzhiyun rd32(IGC_BPTC);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun rd32(IGC_PRC64);
275*4882a593Smuzhiyun rd32(IGC_PRC127);
276*4882a593Smuzhiyun rd32(IGC_PRC255);
277*4882a593Smuzhiyun rd32(IGC_PRC511);
278*4882a593Smuzhiyun rd32(IGC_PRC1023);
279*4882a593Smuzhiyun rd32(IGC_PRC1522);
280*4882a593Smuzhiyun rd32(IGC_PTC64);
281*4882a593Smuzhiyun rd32(IGC_PTC127);
282*4882a593Smuzhiyun rd32(IGC_PTC255);
283*4882a593Smuzhiyun rd32(IGC_PTC511);
284*4882a593Smuzhiyun rd32(IGC_PTC1023);
285*4882a593Smuzhiyun rd32(IGC_PTC1522);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun rd32(IGC_ALGNERRC);
288*4882a593Smuzhiyun rd32(IGC_RXERRC);
289*4882a593Smuzhiyun rd32(IGC_TNCRS);
290*4882a593Smuzhiyun rd32(IGC_HTDPMC);
291*4882a593Smuzhiyun rd32(IGC_TSCTC);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun rd32(IGC_MGTPRC);
294*4882a593Smuzhiyun rd32(IGC_MGTPDC);
295*4882a593Smuzhiyun rd32(IGC_MGTPTC);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun rd32(IGC_IAC);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun rd32(IGC_RPTHC);
300*4882a593Smuzhiyun rd32(IGC_TLPIC);
301*4882a593Smuzhiyun rd32(IGC_RLPIC);
302*4882a593Smuzhiyun rd32(IGC_HGPTC);
303*4882a593Smuzhiyun rd32(IGC_RXDMTC);
304*4882a593Smuzhiyun rd32(IGC_HGORCL);
305*4882a593Smuzhiyun rd32(IGC_HGORCH);
306*4882a593Smuzhiyun rd32(IGC_HGOTCL);
307*4882a593Smuzhiyun rd32(IGC_HGOTCH);
308*4882a593Smuzhiyun rd32(IGC_LENERRS);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /**
312*4882a593Smuzhiyun * igc_rar_set - Set receive address register
313*4882a593Smuzhiyun * @hw: pointer to the HW structure
314*4882a593Smuzhiyun * @addr: pointer to the receive address
315*4882a593Smuzhiyun * @index: receive address array register
316*4882a593Smuzhiyun *
317*4882a593Smuzhiyun * Sets the receive address array register at index to the address passed
318*4882a593Smuzhiyun * in by addr.
319*4882a593Smuzhiyun */
igc_rar_set(struct igc_hw * hw,u8 * addr,u32 index)320*4882a593Smuzhiyun void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun u32 rar_low, rar_high;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* HW expects these in little endian so we reverse the byte order
325*4882a593Smuzhiyun * from network order (big endian) to little endian
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun rar_low = ((u32)addr[0] |
328*4882a593Smuzhiyun ((u32)addr[1] << 8) |
329*4882a593Smuzhiyun ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /* If MAC address zero, no need to set the AV bit */
334*4882a593Smuzhiyun if (rar_low || rar_high)
335*4882a593Smuzhiyun rar_high |= IGC_RAH_AV;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* Some bridges will combine consecutive 32-bit writes into
338*4882a593Smuzhiyun * a single burst write, which will malfunction on some parts.
339*4882a593Smuzhiyun * The flushes avoid this.
340*4882a593Smuzhiyun */
341*4882a593Smuzhiyun wr32(IGC_RAL(index), rar_low);
342*4882a593Smuzhiyun wrfl();
343*4882a593Smuzhiyun wr32(IGC_RAH(index), rar_high);
344*4882a593Smuzhiyun wrfl();
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /**
348*4882a593Smuzhiyun * igc_check_for_copper_link - Check for link (Copper)
349*4882a593Smuzhiyun * @hw: pointer to the HW structure
350*4882a593Smuzhiyun *
351*4882a593Smuzhiyun * Checks to see of the link status of the hardware has changed. If a
352*4882a593Smuzhiyun * change in link status has been detected, then we read the PHY registers
353*4882a593Smuzhiyun * to get the current speed/duplex if link exists.
354*4882a593Smuzhiyun */
igc_check_for_copper_link(struct igc_hw * hw)355*4882a593Smuzhiyun s32 igc_check_for_copper_link(struct igc_hw *hw)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun struct igc_mac_info *mac = &hw->mac;
358*4882a593Smuzhiyun bool link = false;
359*4882a593Smuzhiyun s32 ret_val;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* We only want to go out to the PHY registers to see if Auto-Neg
362*4882a593Smuzhiyun * has completed and/or if our link status has changed. The
363*4882a593Smuzhiyun * get_link_status flag is set upon receiving a Link Status
364*4882a593Smuzhiyun * Change or Rx Sequence Error interrupt.
365*4882a593Smuzhiyun */
366*4882a593Smuzhiyun if (!mac->get_link_status) {
367*4882a593Smuzhiyun ret_val = 0;
368*4882a593Smuzhiyun goto out;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* First we want to see if the MII Status Register reports
372*4882a593Smuzhiyun * link. If so, then we want to get the current speed/duplex
373*4882a593Smuzhiyun * of the PHY.
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun ret_val = igc_phy_has_link(hw, 1, 0, &link);
376*4882a593Smuzhiyun if (ret_val)
377*4882a593Smuzhiyun goto out;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (!link)
380*4882a593Smuzhiyun goto out; /* No link detected */
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun mac->get_link_status = false;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /* Check if there was DownShift, must be checked
385*4882a593Smuzhiyun * immediately after link-up
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun igc_check_downshift(hw);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* If we are forcing speed/duplex, then we simply return since
390*4882a593Smuzhiyun * we have already determined whether we have link or not.
391*4882a593Smuzhiyun */
392*4882a593Smuzhiyun if (!mac->autoneg) {
393*4882a593Smuzhiyun ret_val = -IGC_ERR_CONFIG;
394*4882a593Smuzhiyun goto out;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* Auto-Neg is enabled. Auto Speed Detection takes care
398*4882a593Smuzhiyun * of MAC speed/duplex configuration. So we only need to
399*4882a593Smuzhiyun * configure Collision Distance in the MAC.
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun igc_config_collision_dist(hw);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* Configure Flow Control now that Auto-Neg has completed.
404*4882a593Smuzhiyun * First, we need to restore the desired flow control
405*4882a593Smuzhiyun * settings because we may have had to re-autoneg with a
406*4882a593Smuzhiyun * different link partner.
407*4882a593Smuzhiyun */
408*4882a593Smuzhiyun ret_val = igc_config_fc_after_link_up(hw);
409*4882a593Smuzhiyun if (ret_val)
410*4882a593Smuzhiyun hw_dbg("Error configuring flow control\n");
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun out:
413*4882a593Smuzhiyun /* Now that we are aware of our link settings, we can set the LTR
414*4882a593Smuzhiyun * thresholds.
415*4882a593Smuzhiyun */
416*4882a593Smuzhiyun ret_val = igc_set_ltr_i225(hw, link);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun return ret_val;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /**
422*4882a593Smuzhiyun * igc_config_collision_dist - Configure collision distance
423*4882a593Smuzhiyun * @hw: pointer to the HW structure
424*4882a593Smuzhiyun *
425*4882a593Smuzhiyun * Configures the collision distance to the default value and is used
426*4882a593Smuzhiyun * during link setup. Currently no func pointer exists and all
427*4882a593Smuzhiyun * implementations are handled in the generic version of this function.
428*4882a593Smuzhiyun */
igc_config_collision_dist(struct igc_hw * hw)429*4882a593Smuzhiyun void igc_config_collision_dist(struct igc_hw *hw)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun u32 tctl;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun tctl = rd32(IGC_TCTL);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun tctl &= ~IGC_TCTL_COLD;
436*4882a593Smuzhiyun tctl |= IGC_COLLISION_DISTANCE << IGC_COLD_SHIFT;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun wr32(IGC_TCTL, tctl);
439*4882a593Smuzhiyun wrfl();
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /**
443*4882a593Smuzhiyun * igc_config_fc_after_link_up - Configures flow control after link
444*4882a593Smuzhiyun * @hw: pointer to the HW structure
445*4882a593Smuzhiyun *
446*4882a593Smuzhiyun * Checks the status of auto-negotiation after link up to ensure that the
447*4882a593Smuzhiyun * speed and duplex were not forced. If the link needed to be forced, then
448*4882a593Smuzhiyun * flow control needs to be forced also. If auto-negotiation is enabled
449*4882a593Smuzhiyun * and did not fail, then we configure flow control based on our link
450*4882a593Smuzhiyun * partner.
451*4882a593Smuzhiyun */
igc_config_fc_after_link_up(struct igc_hw * hw)452*4882a593Smuzhiyun s32 igc_config_fc_after_link_up(struct igc_hw *hw)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
455*4882a593Smuzhiyun struct igc_mac_info *mac = &hw->mac;
456*4882a593Smuzhiyun u16 speed, duplex;
457*4882a593Smuzhiyun s32 ret_val = 0;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* Check for the case where we have fiber media and auto-neg failed
460*4882a593Smuzhiyun * so we had to force link. In this case, we need to force the
461*4882a593Smuzhiyun * configuration of the MAC to match the "fc" parameter.
462*4882a593Smuzhiyun */
463*4882a593Smuzhiyun if (mac->autoneg_failed)
464*4882a593Smuzhiyun ret_val = igc_force_mac_fc(hw);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (ret_val) {
467*4882a593Smuzhiyun hw_dbg("Error forcing flow control settings\n");
468*4882a593Smuzhiyun goto out;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /* Check for the case where we have copper media and auto-neg is
472*4882a593Smuzhiyun * enabled. In this case, we need to check and see if Auto-Neg
473*4882a593Smuzhiyun * has completed, and if so, how the PHY and link partner has
474*4882a593Smuzhiyun * flow control configured.
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun if (mac->autoneg) {
477*4882a593Smuzhiyun /* Read the MII Status Register and check to see if AutoNeg
478*4882a593Smuzhiyun * has completed. We read this twice because this reg has
479*4882a593Smuzhiyun * some "sticky" (latched) bits.
480*4882a593Smuzhiyun */
481*4882a593Smuzhiyun ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
482*4882a593Smuzhiyun &mii_status_reg);
483*4882a593Smuzhiyun if (ret_val)
484*4882a593Smuzhiyun goto out;
485*4882a593Smuzhiyun ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
486*4882a593Smuzhiyun &mii_status_reg);
487*4882a593Smuzhiyun if (ret_val)
488*4882a593Smuzhiyun goto out;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
491*4882a593Smuzhiyun hw_dbg("Copper PHY and Auto Neg has not completed.\n");
492*4882a593Smuzhiyun goto out;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* The AutoNeg process has completed, so we now need to
496*4882a593Smuzhiyun * read both the Auto Negotiation Advertisement
497*4882a593Smuzhiyun * Register (Address 4) and the Auto_Negotiation Base
498*4882a593Smuzhiyun * Page Ability Register (Address 5) to determine how
499*4882a593Smuzhiyun * flow control was negotiated.
500*4882a593Smuzhiyun */
501*4882a593Smuzhiyun ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
502*4882a593Smuzhiyun &mii_nway_adv_reg);
503*4882a593Smuzhiyun if (ret_val)
504*4882a593Smuzhiyun goto out;
505*4882a593Smuzhiyun ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
506*4882a593Smuzhiyun &mii_nway_lp_ability_reg);
507*4882a593Smuzhiyun if (ret_val)
508*4882a593Smuzhiyun goto out;
509*4882a593Smuzhiyun /* Two bits in the Auto Negotiation Advertisement Register
510*4882a593Smuzhiyun * (Address 4) and two bits in the Auto Negotiation Base
511*4882a593Smuzhiyun * Page Ability Register (Address 5) determine flow control
512*4882a593Smuzhiyun * for both the PHY and the link partner. The following
513*4882a593Smuzhiyun * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
514*4882a593Smuzhiyun * 1999, describes these PAUSE resolution bits and how flow
515*4882a593Smuzhiyun * control is determined based upon these settings.
516*4882a593Smuzhiyun * NOTE: DC = Don't Care
517*4882a593Smuzhiyun *
518*4882a593Smuzhiyun * LOCAL DEVICE | LINK PARTNER
519*4882a593Smuzhiyun * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
520*4882a593Smuzhiyun *-------|---------|-------|---------|--------------------
521*4882a593Smuzhiyun * 0 | 0 | DC | DC | igc_fc_none
522*4882a593Smuzhiyun * 0 | 1 | 0 | DC | igc_fc_none
523*4882a593Smuzhiyun * 0 | 1 | 1 | 0 | igc_fc_none
524*4882a593Smuzhiyun * 0 | 1 | 1 | 1 | igc_fc_tx_pause
525*4882a593Smuzhiyun * 1 | 0 | 0 | DC | igc_fc_none
526*4882a593Smuzhiyun * 1 | DC | 1 | DC | igc_fc_full
527*4882a593Smuzhiyun * 1 | 1 | 0 | 0 | igc_fc_none
528*4882a593Smuzhiyun * 1 | 1 | 0 | 1 | igc_fc_rx_pause
529*4882a593Smuzhiyun *
530*4882a593Smuzhiyun * Are both PAUSE bits set to 1? If so, this implies
531*4882a593Smuzhiyun * Symmetric Flow Control is enabled at both ends. The
532*4882a593Smuzhiyun * ASM_DIR bits are irrelevant per the spec.
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * For Symmetric Flow Control:
535*4882a593Smuzhiyun *
536*4882a593Smuzhiyun * LOCAL DEVICE | LINK PARTNER
537*4882a593Smuzhiyun * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
538*4882a593Smuzhiyun *-------|---------|-------|---------|--------------------
539*4882a593Smuzhiyun * 1 | DC | 1 | DC | IGC_fc_full
540*4882a593Smuzhiyun *
541*4882a593Smuzhiyun */
542*4882a593Smuzhiyun if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
543*4882a593Smuzhiyun (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
544*4882a593Smuzhiyun /* Now we need to check if the user selected RX ONLY
545*4882a593Smuzhiyun * of pause frames. In this case, we had to advertise
546*4882a593Smuzhiyun * FULL flow control because we could not advertise RX
547*4882a593Smuzhiyun * ONLY. Hence, we must now check to see if we need to
548*4882a593Smuzhiyun * turn OFF the TRANSMISSION of PAUSE frames.
549*4882a593Smuzhiyun */
550*4882a593Smuzhiyun if (hw->fc.requested_mode == igc_fc_full) {
551*4882a593Smuzhiyun hw->fc.current_mode = igc_fc_full;
552*4882a593Smuzhiyun hw_dbg("Flow Control = FULL.\n");
553*4882a593Smuzhiyun } else {
554*4882a593Smuzhiyun hw->fc.current_mode = igc_fc_rx_pause;
555*4882a593Smuzhiyun hw_dbg("Flow Control = RX PAUSE frames only.\n");
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* For receiving PAUSE frames ONLY.
560*4882a593Smuzhiyun *
561*4882a593Smuzhiyun * LOCAL DEVICE | LINK PARTNER
562*4882a593Smuzhiyun * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
563*4882a593Smuzhiyun *-------|---------|-------|---------|--------------------
564*4882a593Smuzhiyun * 0 | 1 | 1 | 1 | igc_fc_tx_pause
565*4882a593Smuzhiyun */
566*4882a593Smuzhiyun else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
567*4882a593Smuzhiyun (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
568*4882a593Smuzhiyun (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
569*4882a593Smuzhiyun (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
570*4882a593Smuzhiyun hw->fc.current_mode = igc_fc_tx_pause;
571*4882a593Smuzhiyun hw_dbg("Flow Control = TX PAUSE frames only.\n");
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun /* For transmitting PAUSE frames ONLY.
574*4882a593Smuzhiyun *
575*4882a593Smuzhiyun * LOCAL DEVICE | LINK PARTNER
576*4882a593Smuzhiyun * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
577*4882a593Smuzhiyun *-------|---------|-------|---------|--------------------
578*4882a593Smuzhiyun * 1 | 1 | 0 | 1 | igc_fc_rx_pause
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
581*4882a593Smuzhiyun (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
582*4882a593Smuzhiyun !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
583*4882a593Smuzhiyun (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
584*4882a593Smuzhiyun hw->fc.current_mode = igc_fc_rx_pause;
585*4882a593Smuzhiyun hw_dbg("Flow Control = RX PAUSE frames only.\n");
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun /* Per the IEEE spec, at this point flow control should be
588*4882a593Smuzhiyun * disabled. However, we want to consider that we could
589*4882a593Smuzhiyun * be connected to a legacy switch that doesn't advertise
590*4882a593Smuzhiyun * desired flow control, but can be forced on the link
591*4882a593Smuzhiyun * partner. So if we advertised no flow control, that is
592*4882a593Smuzhiyun * what we will resolve to. If we advertised some kind of
593*4882a593Smuzhiyun * receive capability (Rx Pause Only or Full Flow Control)
594*4882a593Smuzhiyun * and the link partner advertised none, we will configure
595*4882a593Smuzhiyun * ourselves to enable Rx Flow Control only. We can do
596*4882a593Smuzhiyun * this safely for two reasons: If the link partner really
597*4882a593Smuzhiyun * didn't want flow control enabled, and we enable Rx, no
598*4882a593Smuzhiyun * harm done since we won't be receiving any PAUSE frames
599*4882a593Smuzhiyun * anyway. If the intent on the link partner was to have
600*4882a593Smuzhiyun * flow control enabled, then by us enabling RX only, we
601*4882a593Smuzhiyun * can at least receive pause frames and process them.
602*4882a593Smuzhiyun * This is a good idea because in most cases, since we are
603*4882a593Smuzhiyun * predominantly a server NIC, more times than not we will
604*4882a593Smuzhiyun * be asked to delay transmission of packets than asking
605*4882a593Smuzhiyun * our link partner to pause transmission of frames.
606*4882a593Smuzhiyun */
607*4882a593Smuzhiyun else if ((hw->fc.requested_mode == igc_fc_none) ||
608*4882a593Smuzhiyun (hw->fc.requested_mode == igc_fc_tx_pause) ||
609*4882a593Smuzhiyun (hw->fc.strict_ieee)) {
610*4882a593Smuzhiyun hw->fc.current_mode = igc_fc_none;
611*4882a593Smuzhiyun hw_dbg("Flow Control = NONE.\n");
612*4882a593Smuzhiyun } else {
613*4882a593Smuzhiyun hw->fc.current_mode = igc_fc_rx_pause;
614*4882a593Smuzhiyun hw_dbg("Flow Control = RX PAUSE frames only.\n");
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* Now we need to do one last check... If we auto-
618*4882a593Smuzhiyun * negotiated to HALF DUPLEX, flow control should not be
619*4882a593Smuzhiyun * enabled per IEEE 802.3 spec.
620*4882a593Smuzhiyun */
621*4882a593Smuzhiyun ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
622*4882a593Smuzhiyun if (ret_val) {
623*4882a593Smuzhiyun hw_dbg("Error getting link speed and duplex\n");
624*4882a593Smuzhiyun goto out;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun if (duplex == HALF_DUPLEX)
628*4882a593Smuzhiyun hw->fc.current_mode = igc_fc_none;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /* Now we call a subroutine to actually force the MAC
631*4882a593Smuzhiyun * controller to use the correct flow control settings.
632*4882a593Smuzhiyun */
633*4882a593Smuzhiyun ret_val = igc_force_mac_fc(hw);
634*4882a593Smuzhiyun if (ret_val) {
635*4882a593Smuzhiyun hw_dbg("Error forcing flow control settings\n");
636*4882a593Smuzhiyun goto out;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun out:
641*4882a593Smuzhiyun return ret_val;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /**
645*4882a593Smuzhiyun * igc_get_auto_rd_done - Check for auto read completion
646*4882a593Smuzhiyun * @hw: pointer to the HW structure
647*4882a593Smuzhiyun *
648*4882a593Smuzhiyun * Check EEPROM for Auto Read done bit.
649*4882a593Smuzhiyun */
igc_get_auto_rd_done(struct igc_hw * hw)650*4882a593Smuzhiyun s32 igc_get_auto_rd_done(struct igc_hw *hw)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun s32 ret_val = 0;
653*4882a593Smuzhiyun s32 i = 0;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun while (i < AUTO_READ_DONE_TIMEOUT) {
656*4882a593Smuzhiyun if (rd32(IGC_EECD) & IGC_EECD_AUTO_RD)
657*4882a593Smuzhiyun break;
658*4882a593Smuzhiyun usleep_range(1000, 2000);
659*4882a593Smuzhiyun i++;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun if (i == AUTO_READ_DONE_TIMEOUT) {
663*4882a593Smuzhiyun hw_dbg("Auto read by HW from NVM has not completed.\n");
664*4882a593Smuzhiyun ret_val = -IGC_ERR_RESET;
665*4882a593Smuzhiyun goto out;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun out:
669*4882a593Smuzhiyun return ret_val;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /**
673*4882a593Smuzhiyun * igc_get_speed_and_duplex_copper - Retrieve current speed/duplex
674*4882a593Smuzhiyun * @hw: pointer to the HW structure
675*4882a593Smuzhiyun * @speed: stores the current speed
676*4882a593Smuzhiyun * @duplex: stores the current duplex
677*4882a593Smuzhiyun *
678*4882a593Smuzhiyun * Read the status register for the current speed/duplex and store the current
679*4882a593Smuzhiyun * speed and duplex for copper connections.
680*4882a593Smuzhiyun */
igc_get_speed_and_duplex_copper(struct igc_hw * hw,u16 * speed,u16 * duplex)681*4882a593Smuzhiyun s32 igc_get_speed_and_duplex_copper(struct igc_hw *hw, u16 *speed,
682*4882a593Smuzhiyun u16 *duplex)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun u32 status;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun status = rd32(IGC_STATUS);
687*4882a593Smuzhiyun if (status & IGC_STATUS_SPEED_1000) {
688*4882a593Smuzhiyun /* For I225, STATUS will indicate 1G speed in both 1 Gbps
689*4882a593Smuzhiyun * and 2.5 Gbps link modes. An additional bit is used
690*4882a593Smuzhiyun * to differentiate between 1 Gbps and 2.5 Gbps.
691*4882a593Smuzhiyun */
692*4882a593Smuzhiyun if (hw->mac.type == igc_i225 &&
693*4882a593Smuzhiyun (status & IGC_STATUS_SPEED_2500)) {
694*4882a593Smuzhiyun *speed = SPEED_2500;
695*4882a593Smuzhiyun hw_dbg("2500 Mbs, ");
696*4882a593Smuzhiyun } else {
697*4882a593Smuzhiyun *speed = SPEED_1000;
698*4882a593Smuzhiyun hw_dbg("1000 Mbs, ");
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun } else if (status & IGC_STATUS_SPEED_100) {
701*4882a593Smuzhiyun *speed = SPEED_100;
702*4882a593Smuzhiyun hw_dbg("100 Mbs, ");
703*4882a593Smuzhiyun } else {
704*4882a593Smuzhiyun *speed = SPEED_10;
705*4882a593Smuzhiyun hw_dbg("10 Mbs, ");
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun if (status & IGC_STATUS_FD) {
709*4882a593Smuzhiyun *duplex = FULL_DUPLEX;
710*4882a593Smuzhiyun hw_dbg("Full Duplex\n");
711*4882a593Smuzhiyun } else {
712*4882a593Smuzhiyun *duplex = HALF_DUPLEX;
713*4882a593Smuzhiyun hw_dbg("Half Duplex\n");
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun return 0;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /**
720*4882a593Smuzhiyun * igc_put_hw_semaphore - Release hardware semaphore
721*4882a593Smuzhiyun * @hw: pointer to the HW structure
722*4882a593Smuzhiyun *
723*4882a593Smuzhiyun * Release hardware semaphore used to access the PHY or NVM
724*4882a593Smuzhiyun */
igc_put_hw_semaphore(struct igc_hw * hw)725*4882a593Smuzhiyun void igc_put_hw_semaphore(struct igc_hw *hw)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun u32 swsm;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun swsm = rd32(IGC_SWSM);
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun swsm &= ~(IGC_SWSM_SMBI | IGC_SWSM_SWESMBI);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun wr32(IGC_SWSM, swsm);
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /**
737*4882a593Smuzhiyun * igc_enable_mng_pass_thru - Enable processing of ARP's
738*4882a593Smuzhiyun * @hw: pointer to the HW structure
739*4882a593Smuzhiyun *
740*4882a593Smuzhiyun * Verifies the hardware needs to leave interface enabled so that frames can
741*4882a593Smuzhiyun * be directed to and from the management interface.
742*4882a593Smuzhiyun */
igc_enable_mng_pass_thru(struct igc_hw * hw)743*4882a593Smuzhiyun bool igc_enable_mng_pass_thru(struct igc_hw *hw)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun bool ret_val = false;
746*4882a593Smuzhiyun u32 fwsm, factps;
747*4882a593Smuzhiyun u32 manc;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun if (!hw->mac.asf_firmware_present)
750*4882a593Smuzhiyun goto out;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun manc = rd32(IGC_MANC);
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun if (!(manc & IGC_MANC_RCV_TCO_EN))
755*4882a593Smuzhiyun goto out;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun if (hw->mac.arc_subsystem_valid) {
758*4882a593Smuzhiyun fwsm = rd32(IGC_FWSM);
759*4882a593Smuzhiyun factps = rd32(IGC_FACTPS);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun if (!(factps & IGC_FACTPS_MNGCG) &&
762*4882a593Smuzhiyun ((fwsm & IGC_FWSM_MODE_MASK) ==
763*4882a593Smuzhiyun (igc_mng_mode_pt << IGC_FWSM_MODE_SHIFT))) {
764*4882a593Smuzhiyun ret_val = true;
765*4882a593Smuzhiyun goto out;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun } else {
768*4882a593Smuzhiyun if ((manc & IGC_MANC_SMBUS_EN) &&
769*4882a593Smuzhiyun !(manc & IGC_MANC_ASF_EN)) {
770*4882a593Smuzhiyun ret_val = true;
771*4882a593Smuzhiyun goto out;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun out:
776*4882a593Smuzhiyun return ret_val;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /**
780*4882a593Smuzhiyun * igc_hash_mc_addr - Generate a multicast hash value
781*4882a593Smuzhiyun * @hw: pointer to the HW structure
782*4882a593Smuzhiyun * @mc_addr: pointer to a multicast address
783*4882a593Smuzhiyun *
784*4882a593Smuzhiyun * Generates a multicast address hash value which is used to determine
785*4882a593Smuzhiyun * the multicast filter table array address and new table value. See
786*4882a593Smuzhiyun * igc_mta_set()
787*4882a593Smuzhiyun **/
igc_hash_mc_addr(struct igc_hw * hw,u8 * mc_addr)788*4882a593Smuzhiyun static u32 igc_hash_mc_addr(struct igc_hw *hw, u8 *mc_addr)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun u32 hash_value, hash_mask;
791*4882a593Smuzhiyun u8 bit_shift = 0;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /* Register count multiplied by bits per register */
794*4882a593Smuzhiyun hash_mask = (hw->mac.mta_reg_count * 32) - 1;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
797*4882a593Smuzhiyun * where 0xFF would still fall within the hash mask.
798*4882a593Smuzhiyun */
799*4882a593Smuzhiyun while (hash_mask >> bit_shift != 0xFF)
800*4882a593Smuzhiyun bit_shift++;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun /* The portion of the address that is used for the hash table
803*4882a593Smuzhiyun * is determined by the mc_filter_type setting.
804*4882a593Smuzhiyun * The algorithm is such that there is a total of 8 bits of shifting.
805*4882a593Smuzhiyun * The bit_shift for a mc_filter_type of 0 represents the number of
806*4882a593Smuzhiyun * left-shifts where the MSB of mc_addr[5] would still fall within
807*4882a593Smuzhiyun * the hash_mask. Case 0 does this exactly. Since there are a total
808*4882a593Smuzhiyun * of 8 bits of shifting, then mc_addr[4] will shift right the
809*4882a593Smuzhiyun * remaining number of bits. Thus 8 - bit_shift. The rest of the
810*4882a593Smuzhiyun * cases are a variation of this algorithm...essentially raising the
811*4882a593Smuzhiyun * number of bits to shift mc_addr[5] left, while still keeping the
812*4882a593Smuzhiyun * 8-bit shifting total.
813*4882a593Smuzhiyun *
814*4882a593Smuzhiyun * For example, given the following Destination MAC Address and an
815*4882a593Smuzhiyun * MTA register count of 128 (thus a 4096-bit vector and 0xFFF mask),
816*4882a593Smuzhiyun * we can see that the bit_shift for case 0 is 4. These are the hash
817*4882a593Smuzhiyun * values resulting from each mc_filter_type...
818*4882a593Smuzhiyun * [0] [1] [2] [3] [4] [5]
819*4882a593Smuzhiyun * 01 AA 00 12 34 56
820*4882a593Smuzhiyun * LSB MSB
821*4882a593Smuzhiyun *
822*4882a593Smuzhiyun * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
823*4882a593Smuzhiyun * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
824*4882a593Smuzhiyun * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
825*4882a593Smuzhiyun * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
826*4882a593Smuzhiyun */
827*4882a593Smuzhiyun switch (hw->mac.mc_filter_type) {
828*4882a593Smuzhiyun default:
829*4882a593Smuzhiyun case 0:
830*4882a593Smuzhiyun break;
831*4882a593Smuzhiyun case 1:
832*4882a593Smuzhiyun bit_shift += 1;
833*4882a593Smuzhiyun break;
834*4882a593Smuzhiyun case 2:
835*4882a593Smuzhiyun bit_shift += 2;
836*4882a593Smuzhiyun break;
837*4882a593Smuzhiyun case 3:
838*4882a593Smuzhiyun bit_shift += 4;
839*4882a593Smuzhiyun break;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
843*4882a593Smuzhiyun (((u16)mc_addr[5]) << bit_shift)));
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun return hash_value;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /**
849*4882a593Smuzhiyun * igc_update_mc_addr_list - Update Multicast addresses
850*4882a593Smuzhiyun * @hw: pointer to the HW structure
851*4882a593Smuzhiyun * @mc_addr_list: array of multicast addresses to program
852*4882a593Smuzhiyun * @mc_addr_count: number of multicast addresses to program
853*4882a593Smuzhiyun *
854*4882a593Smuzhiyun * Updates entire Multicast Table Array.
855*4882a593Smuzhiyun * The caller must have a packed mc_addr_list of multicast addresses.
856*4882a593Smuzhiyun **/
igc_update_mc_addr_list(struct igc_hw * hw,u8 * mc_addr_list,u32 mc_addr_count)857*4882a593Smuzhiyun void igc_update_mc_addr_list(struct igc_hw *hw,
858*4882a593Smuzhiyun u8 *mc_addr_list, u32 mc_addr_count)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun u32 hash_value, hash_bit, hash_reg;
861*4882a593Smuzhiyun int i;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* clear mta_shadow */
864*4882a593Smuzhiyun memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /* update mta_shadow from mc_addr_list */
867*4882a593Smuzhiyun for (i = 0; (u32)i < mc_addr_count; i++) {
868*4882a593Smuzhiyun hash_value = igc_hash_mc_addr(hw, mc_addr_list);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
871*4882a593Smuzhiyun hash_bit = hash_value & 0x1F;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
874*4882a593Smuzhiyun mc_addr_list += ETH_ALEN;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /* replace the entire MTA table */
878*4882a593Smuzhiyun for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
879*4882a593Smuzhiyun array_wr32(IGC_MTA, i, hw->mac.mta_shadow[i]);
880*4882a593Smuzhiyun wrfl();
881*4882a593Smuzhiyun }
882