1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 1999 - 2018 Intel Corporation. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include "ixgbe_x540.h"
5*4882a593Smuzhiyun #include "ixgbe_type.h"
6*4882a593Smuzhiyun #include "ixgbe_common.h"
7*4882a593Smuzhiyun #include "ixgbe_phy.h"
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
10*4882a593Smuzhiyun static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
11*4882a593Smuzhiyun static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *);
12*4882a593Smuzhiyun static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *);
13*4882a593Smuzhiyun static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *);
14*4882a593Smuzhiyun
ixgbe_get_invariants_X550_x(struct ixgbe_hw * hw)15*4882a593Smuzhiyun static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
18*4882a593Smuzhiyun struct ixgbe_phy_info *phy = &hw->phy;
19*4882a593Smuzhiyun struct ixgbe_link_info *link = &hw->link;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* Start with X540 invariants, since so simular */
22*4882a593Smuzhiyun ixgbe_get_invariants_X540(hw);
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
25*4882a593Smuzhiyun phy->ops.set_phy_power = NULL;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun link->addr = IXGBE_CS4227;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun return 0;
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw * hw)32*4882a593Smuzhiyun static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun struct ixgbe_phy_info *phy = &hw->phy;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* Start with X540 invariants, since so similar */
37*4882a593Smuzhiyun ixgbe_get_invariants_X540(hw);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun phy->ops.set_phy_power = NULL;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun return 0;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
ixgbe_get_invariants_X550_a(struct ixgbe_hw * hw)44*4882a593Smuzhiyun static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
47*4882a593Smuzhiyun struct ixgbe_phy_info *phy = &hw->phy;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Start with X540 invariants, since so simular */
50*4882a593Smuzhiyun ixgbe_get_invariants_X540(hw);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
53*4882a593Smuzhiyun phy->ops.set_phy_power = NULL;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun return 0;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw * hw)58*4882a593Smuzhiyun static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun struct ixgbe_phy_info *phy = &hw->phy;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* Start with X540 invariants, since so similar */
63*4882a593Smuzhiyun ixgbe_get_invariants_X540(hw);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun phy->ops.set_phy_power = NULL;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun return 0;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
71*4882a593Smuzhiyun * @hw: pointer to hardware structure
72*4882a593Smuzhiyun **/
ixgbe_setup_mux_ctl(struct ixgbe_hw * hw)73*4882a593Smuzhiyun static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (hw->bus.lan_id) {
78*4882a593Smuzhiyun esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1);
79*4882a593Smuzhiyun esdp |= IXGBE_ESDP_SDP1_DIR;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR);
82*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
83*4882a593Smuzhiyun IXGBE_WRITE_FLUSH(hw);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /**
87*4882a593Smuzhiyun * ixgbe_read_cs4227 - Read CS4227 register
88*4882a593Smuzhiyun * @hw: pointer to hardware structure
89*4882a593Smuzhiyun * @reg: register number to write
90*4882a593Smuzhiyun * @value: pointer to receive value read
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * Returns status code
93*4882a593Smuzhiyun */
ixgbe_read_cs4227(struct ixgbe_hw * hw,u16 reg,u16 * value)94*4882a593Smuzhiyun static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /**
100*4882a593Smuzhiyun * ixgbe_write_cs4227 - Write CS4227 register
101*4882a593Smuzhiyun * @hw: pointer to hardware structure
102*4882a593Smuzhiyun * @reg: register number to write
103*4882a593Smuzhiyun * @value: value to write to register
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * Returns status code
106*4882a593Smuzhiyun */
ixgbe_write_cs4227(struct ixgbe_hw * hw,u16 reg,u16 value)107*4882a593Smuzhiyun static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun * ixgbe_read_pe - Read register from port expander
114*4882a593Smuzhiyun * @hw: pointer to hardware structure
115*4882a593Smuzhiyun * @reg: register number to read
116*4882a593Smuzhiyun * @value: pointer to receive read value
117*4882a593Smuzhiyun *
118*4882a593Smuzhiyun * Returns status code
119*4882a593Smuzhiyun */
ixgbe_read_pe(struct ixgbe_hw * hw,u8 reg,u8 * value)120*4882a593Smuzhiyun static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun s32 status;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value);
125*4882a593Smuzhiyun if (status)
126*4882a593Smuzhiyun hw_err(hw, "port expander access failed with %d\n", status);
127*4882a593Smuzhiyun return status;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun * ixgbe_write_pe - Write register to port expander
132*4882a593Smuzhiyun * @hw: pointer to hardware structure
133*4882a593Smuzhiyun * @reg: register number to write
134*4882a593Smuzhiyun * @value: value to write
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * Returns status code
137*4882a593Smuzhiyun */
ixgbe_write_pe(struct ixgbe_hw * hw,u8 reg,u8 value)138*4882a593Smuzhiyun static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun s32 status;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE,
143*4882a593Smuzhiyun value);
144*4882a593Smuzhiyun if (status)
145*4882a593Smuzhiyun hw_err(hw, "port expander access failed with %d\n", status);
146*4882a593Smuzhiyun return status;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /**
150*4882a593Smuzhiyun * ixgbe_reset_cs4227 - Reset CS4227 using port expander
151*4882a593Smuzhiyun * @hw: pointer to hardware structure
152*4882a593Smuzhiyun *
153*4882a593Smuzhiyun * This function assumes that the caller has acquired the proper semaphore.
154*4882a593Smuzhiyun * Returns error code
155*4882a593Smuzhiyun */
ixgbe_reset_cs4227(struct ixgbe_hw * hw)156*4882a593Smuzhiyun static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun s32 status;
159*4882a593Smuzhiyun u32 retry;
160*4882a593Smuzhiyun u16 value;
161*4882a593Smuzhiyun u8 reg;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* Trigger hard reset. */
164*4882a593Smuzhiyun status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
165*4882a593Smuzhiyun if (status)
166*4882a593Smuzhiyun return status;
167*4882a593Smuzhiyun reg |= IXGBE_PE_BIT1;
168*4882a593Smuzhiyun status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
169*4882a593Smuzhiyun if (status)
170*4882a593Smuzhiyun return status;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®);
173*4882a593Smuzhiyun if (status)
174*4882a593Smuzhiyun return status;
175*4882a593Smuzhiyun reg &= ~IXGBE_PE_BIT1;
176*4882a593Smuzhiyun status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg);
177*4882a593Smuzhiyun if (status)
178*4882a593Smuzhiyun return status;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
181*4882a593Smuzhiyun if (status)
182*4882a593Smuzhiyun return status;
183*4882a593Smuzhiyun reg &= ~IXGBE_PE_BIT1;
184*4882a593Smuzhiyun status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
185*4882a593Smuzhiyun if (status)
186*4882a593Smuzhiyun return status;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun usleep_range(IXGBE_CS4227_RESET_HOLD, IXGBE_CS4227_RESET_HOLD + 100);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®);
191*4882a593Smuzhiyun if (status)
192*4882a593Smuzhiyun return status;
193*4882a593Smuzhiyun reg |= IXGBE_PE_BIT1;
194*4882a593Smuzhiyun status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg);
195*4882a593Smuzhiyun if (status)
196*4882a593Smuzhiyun return status;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* Wait for the reset to complete. */
199*4882a593Smuzhiyun msleep(IXGBE_CS4227_RESET_DELAY);
200*4882a593Smuzhiyun for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
201*4882a593Smuzhiyun status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS,
202*4882a593Smuzhiyun &value);
203*4882a593Smuzhiyun if (!status && value == IXGBE_CS4227_EEPROM_LOAD_OK)
204*4882a593Smuzhiyun break;
205*4882a593Smuzhiyun msleep(IXGBE_CS4227_CHECK_DELAY);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun if (retry == IXGBE_CS4227_RETRIES) {
208*4882a593Smuzhiyun hw_err(hw, "CS4227 reset did not complete\n");
209*4882a593Smuzhiyun return IXGBE_ERR_PHY;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value);
213*4882a593Smuzhiyun if (status || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) {
214*4882a593Smuzhiyun hw_err(hw, "CS4227 EEPROM did not load successfully\n");
215*4882a593Smuzhiyun return IXGBE_ERR_PHY;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return 0;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /**
222*4882a593Smuzhiyun * ixgbe_check_cs4227 - Check CS4227 and reset as needed
223*4882a593Smuzhiyun * @hw: pointer to hardware structure
224*4882a593Smuzhiyun */
ixgbe_check_cs4227(struct ixgbe_hw * hw)225*4882a593Smuzhiyun static void ixgbe_check_cs4227(struct ixgbe_hw *hw)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun u32 swfw_mask = hw->phy.phy_semaphore_mask;
228*4882a593Smuzhiyun s32 status;
229*4882a593Smuzhiyun u16 value;
230*4882a593Smuzhiyun u8 retry;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) {
233*4882a593Smuzhiyun status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
234*4882a593Smuzhiyun if (status) {
235*4882a593Smuzhiyun hw_err(hw, "semaphore failed with %d\n", status);
236*4882a593Smuzhiyun msleep(IXGBE_CS4227_CHECK_DELAY);
237*4882a593Smuzhiyun continue;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Get status of reset flow. */
241*4882a593Smuzhiyun status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value);
242*4882a593Smuzhiyun if (!status && value == IXGBE_CS4227_RESET_COMPLETE)
243*4882a593Smuzhiyun goto out;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (status || value != IXGBE_CS4227_RESET_PENDING)
246*4882a593Smuzhiyun break;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* Reset is pending. Wait and check again. */
249*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, swfw_mask);
250*4882a593Smuzhiyun msleep(IXGBE_CS4227_CHECK_DELAY);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun /* If still pending, assume other instance failed. */
253*4882a593Smuzhiyun if (retry == IXGBE_CS4227_RETRIES) {
254*4882a593Smuzhiyun status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
255*4882a593Smuzhiyun if (status) {
256*4882a593Smuzhiyun hw_err(hw, "semaphore failed with %d\n", status);
257*4882a593Smuzhiyun return;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* Reset the CS4227. */
262*4882a593Smuzhiyun status = ixgbe_reset_cs4227(hw);
263*4882a593Smuzhiyun if (status) {
264*4882a593Smuzhiyun hw_err(hw, "CS4227 reset failed: %d", status);
265*4882a593Smuzhiyun goto out;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* Reset takes so long, temporarily release semaphore in case the
269*4882a593Smuzhiyun * other driver instance is waiting for the reset indication.
270*4882a593Smuzhiyun */
271*4882a593Smuzhiyun ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
272*4882a593Smuzhiyun IXGBE_CS4227_RESET_PENDING);
273*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, swfw_mask);
274*4882a593Smuzhiyun usleep_range(10000, 12000);
275*4882a593Smuzhiyun status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
276*4882a593Smuzhiyun if (status) {
277*4882a593Smuzhiyun hw_err(hw, "semaphore failed with %d", status);
278*4882a593Smuzhiyun return;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /* Record completion for next time. */
282*4882a593Smuzhiyun status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH,
283*4882a593Smuzhiyun IXGBE_CS4227_RESET_COMPLETE);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun out:
286*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, swfw_mask);
287*4882a593Smuzhiyun msleep(hw->eeprom.semaphore_delay);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /** ixgbe_identify_phy_x550em - Get PHY type based on device id
291*4882a593Smuzhiyun * @hw: pointer to hardware structure
292*4882a593Smuzhiyun *
293*4882a593Smuzhiyun * Returns error code
294*4882a593Smuzhiyun */
ixgbe_identify_phy_x550em(struct ixgbe_hw * hw)295*4882a593Smuzhiyun static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun switch (hw->device_id) {
298*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_SFP:
299*4882a593Smuzhiyun if (hw->bus.lan_id)
300*4882a593Smuzhiyun hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
301*4882a593Smuzhiyun else
302*4882a593Smuzhiyun hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
303*4882a593Smuzhiyun return ixgbe_identify_module_generic(hw);
304*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_SFP:
305*4882a593Smuzhiyun /* set up for CS4227 usage */
306*4882a593Smuzhiyun hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
307*4882a593Smuzhiyun ixgbe_setup_mux_ctl(hw);
308*4882a593Smuzhiyun ixgbe_check_cs4227(hw);
309*4882a593Smuzhiyun fallthrough;
310*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_SFP_N:
311*4882a593Smuzhiyun return ixgbe_identify_module_generic(hw);
312*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_KX4:
313*4882a593Smuzhiyun hw->phy.type = ixgbe_phy_x550em_kx4;
314*4882a593Smuzhiyun break;
315*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_XFI:
316*4882a593Smuzhiyun hw->phy.type = ixgbe_phy_x550em_xfi;
317*4882a593Smuzhiyun break;
318*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_KR:
319*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_KR:
320*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_KR_L:
321*4882a593Smuzhiyun hw->phy.type = ixgbe_phy_x550em_kr;
322*4882a593Smuzhiyun break;
323*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_10G_T:
324*4882a593Smuzhiyun if (hw->bus.lan_id)
325*4882a593Smuzhiyun hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
326*4882a593Smuzhiyun else
327*4882a593Smuzhiyun hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
328*4882a593Smuzhiyun fallthrough;
329*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_10G_T:
330*4882a593Smuzhiyun return ixgbe_identify_phy_generic(hw);
331*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_1G_T:
332*4882a593Smuzhiyun hw->phy.type = ixgbe_phy_ext_1g_t;
333*4882a593Smuzhiyun break;
334*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_1G_T:
335*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_1G_T_L:
336*4882a593Smuzhiyun hw->phy.type = ixgbe_phy_fw;
337*4882a593Smuzhiyun hw->phy.ops.read_reg = NULL;
338*4882a593Smuzhiyun hw->phy.ops.write_reg = NULL;
339*4882a593Smuzhiyun if (hw->bus.lan_id)
340*4882a593Smuzhiyun hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
341*4882a593Smuzhiyun else
342*4882a593Smuzhiyun hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
343*4882a593Smuzhiyun break;
344*4882a593Smuzhiyun default:
345*4882a593Smuzhiyun break;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun return 0;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
ixgbe_read_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)350*4882a593Smuzhiyun static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
351*4882a593Smuzhiyun u32 device_type, u16 *phy_data)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun return IXGBE_NOT_IMPLEMENTED;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
ixgbe_write_phy_reg_x550em(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)356*4882a593Smuzhiyun static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
357*4882a593Smuzhiyun u32 device_type, u16 phy_data)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun return IXGBE_NOT_IMPLEMENTED;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /**
363*4882a593Smuzhiyun * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation
364*4882a593Smuzhiyun * @hw: pointer to the hardware structure
365*4882a593Smuzhiyun * @addr: I2C bus address to read from
366*4882a593Smuzhiyun * @reg: I2C device register to read from
367*4882a593Smuzhiyun * @val: pointer to location to receive read value
368*4882a593Smuzhiyun *
369*4882a593Smuzhiyun * Returns an error code on error.
370*4882a593Smuzhiyun **/
ixgbe_read_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)371*4882a593Smuzhiyun static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr,
372*4882a593Smuzhiyun u16 reg, u16 *val)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /**
378*4882a593Smuzhiyun * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation
379*4882a593Smuzhiyun * @hw: pointer to the hardware structure
380*4882a593Smuzhiyun * @addr: I2C bus address to read from
381*4882a593Smuzhiyun * @reg: I2C device register to read from
382*4882a593Smuzhiyun * @val: pointer to location to receive read value
383*4882a593Smuzhiyun *
384*4882a593Smuzhiyun * Returns an error code on error.
385*4882a593Smuzhiyun **/
386*4882a593Smuzhiyun static s32
ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 * val)387*4882a593Smuzhiyun ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr,
388*4882a593Smuzhiyun u16 reg, u16 *val)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /**
394*4882a593Smuzhiyun * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation
395*4882a593Smuzhiyun * @hw: pointer to the hardware structure
396*4882a593Smuzhiyun * @addr: I2C bus address to write to
397*4882a593Smuzhiyun * @reg: I2C device register to write to
398*4882a593Smuzhiyun * @val: value to write
399*4882a593Smuzhiyun *
400*4882a593Smuzhiyun * Returns an error code on error.
401*4882a593Smuzhiyun **/
ixgbe_write_i2c_combined_generic(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)402*4882a593Smuzhiyun static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw,
403*4882a593Smuzhiyun u8 addr, u16 reg, u16 val)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /**
409*4882a593Smuzhiyun * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation
410*4882a593Smuzhiyun * @hw: pointer to the hardware structure
411*4882a593Smuzhiyun * @addr: I2C bus address to write to
412*4882a593Smuzhiyun * @reg: I2C device register to write to
413*4882a593Smuzhiyun * @val: value to write
414*4882a593Smuzhiyun *
415*4882a593Smuzhiyun * Returns an error code on error.
416*4882a593Smuzhiyun **/
417*4882a593Smuzhiyun static s32
ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw * hw,u8 addr,u16 reg,u16 val)418*4882a593Smuzhiyun ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
419*4882a593Smuzhiyun u8 addr, u16 reg, u16 val)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /**
425*4882a593Smuzhiyun * ixgbe_fw_phy_activity - Perform an activity on a PHY
426*4882a593Smuzhiyun * @hw: pointer to hardware structure
427*4882a593Smuzhiyun * @activity: activity to perform
428*4882a593Smuzhiyun * @data: Pointer to 4 32-bit words of data
429*4882a593Smuzhiyun */
ixgbe_fw_phy_activity(struct ixgbe_hw * hw,u16 activity,u32 (* data)[FW_PHY_ACT_DATA_COUNT])430*4882a593Smuzhiyun s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
431*4882a593Smuzhiyun u32 (*data)[FW_PHY_ACT_DATA_COUNT])
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun union {
434*4882a593Smuzhiyun struct ixgbe_hic_phy_activity_req cmd;
435*4882a593Smuzhiyun struct ixgbe_hic_phy_activity_resp rsp;
436*4882a593Smuzhiyun } hic;
437*4882a593Smuzhiyun u16 retries = FW_PHY_ACT_RETRIES;
438*4882a593Smuzhiyun s32 rc;
439*4882a593Smuzhiyun u32 i;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun do {
442*4882a593Smuzhiyun memset(&hic, 0, sizeof(hic));
443*4882a593Smuzhiyun hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
444*4882a593Smuzhiyun hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
445*4882a593Smuzhiyun hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
446*4882a593Smuzhiyun hic.cmd.port_number = hw->bus.lan_id;
447*4882a593Smuzhiyun hic.cmd.activity_id = cpu_to_le16(activity);
448*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i)
449*4882a593Smuzhiyun hic.cmd.data[i] = cpu_to_be32((*data)[i]);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
452*4882a593Smuzhiyun IXGBE_HI_COMMAND_TIMEOUT,
453*4882a593Smuzhiyun true);
454*4882a593Smuzhiyun if (rc)
455*4882a593Smuzhiyun return rc;
456*4882a593Smuzhiyun if (hic.rsp.hdr.cmd_or_resp.ret_status ==
457*4882a593Smuzhiyun FW_CEM_RESP_STATUS_SUCCESS) {
458*4882a593Smuzhiyun for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
459*4882a593Smuzhiyun (*data)[i] = be32_to_cpu(hic.rsp.data[i]);
460*4882a593Smuzhiyun return 0;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun usleep_range(20, 30);
463*4882a593Smuzhiyun --retries;
464*4882a593Smuzhiyun } while (retries > 0);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun return IXGBE_ERR_HOST_INTERFACE_COMMAND;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun static const struct {
470*4882a593Smuzhiyun u16 fw_speed;
471*4882a593Smuzhiyun ixgbe_link_speed phy_speed;
472*4882a593Smuzhiyun } ixgbe_fw_map[] = {
473*4882a593Smuzhiyun { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
474*4882a593Smuzhiyun { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
475*4882a593Smuzhiyun { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
476*4882a593Smuzhiyun { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
477*4882a593Smuzhiyun { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
478*4882a593Smuzhiyun { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
479*4882a593Smuzhiyun };
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /**
482*4882a593Smuzhiyun * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
483*4882a593Smuzhiyun * @hw: pointer to hardware structure
484*4882a593Smuzhiyun *
485*4882a593Smuzhiyun * Returns error code
486*4882a593Smuzhiyun */
ixgbe_get_phy_id_fw(struct ixgbe_hw * hw)487*4882a593Smuzhiyun static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
490*4882a593Smuzhiyun u16 phy_speeds;
491*4882a593Smuzhiyun u16 phy_id_lo;
492*4882a593Smuzhiyun s32 rc;
493*4882a593Smuzhiyun u16 i;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (hw->phy.id)
496*4882a593Smuzhiyun return 0;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
499*4882a593Smuzhiyun if (rc)
500*4882a593Smuzhiyun return rc;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun hw->phy.speeds_supported = 0;
503*4882a593Smuzhiyun phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
504*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) {
505*4882a593Smuzhiyun if (phy_speeds & ixgbe_fw_map[i].fw_speed)
506*4882a593Smuzhiyun hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
510*4882a593Smuzhiyun phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
511*4882a593Smuzhiyun hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
512*4882a593Smuzhiyun hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
513*4882a593Smuzhiyun if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
514*4882a593Smuzhiyun return IXGBE_ERR_PHY_ADDR_INVALID;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun hw->phy.autoneg_advertised = hw->phy.speeds_supported;
517*4882a593Smuzhiyun hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
518*4882a593Smuzhiyun IXGBE_LINK_SPEED_1GB_FULL;
519*4882a593Smuzhiyun hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
520*4882a593Smuzhiyun return 0;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /**
524*4882a593Smuzhiyun * ixgbe_identify_phy_fw - Get PHY type based on firmware command
525*4882a593Smuzhiyun * @hw: pointer to hardware structure
526*4882a593Smuzhiyun *
527*4882a593Smuzhiyun * Returns error code
528*4882a593Smuzhiyun */
ixgbe_identify_phy_fw(struct ixgbe_hw * hw)529*4882a593Smuzhiyun static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun if (hw->bus.lan_id)
532*4882a593Smuzhiyun hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
533*4882a593Smuzhiyun else
534*4882a593Smuzhiyun hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun hw->phy.type = ixgbe_phy_fw;
537*4882a593Smuzhiyun hw->phy.ops.read_reg = NULL;
538*4882a593Smuzhiyun hw->phy.ops.write_reg = NULL;
539*4882a593Smuzhiyun return ixgbe_get_phy_id_fw(hw);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /**
543*4882a593Smuzhiyun * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
544*4882a593Smuzhiyun * @hw: pointer to hardware structure
545*4882a593Smuzhiyun *
546*4882a593Smuzhiyun * Returns error code
547*4882a593Smuzhiyun */
ixgbe_shutdown_fw_phy(struct ixgbe_hw * hw)548*4882a593Smuzhiyun static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
553*4882a593Smuzhiyun return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /**
557*4882a593Smuzhiyun * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
558*4882a593Smuzhiyun * @hw: pointer to hardware structure
559*4882a593Smuzhiyun */
ixgbe_setup_fw_link(struct ixgbe_hw * hw)560*4882a593Smuzhiyun static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
563*4882a593Smuzhiyun s32 rc;
564*4882a593Smuzhiyun u16 i;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
567*4882a593Smuzhiyun return 0;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
570*4882a593Smuzhiyun hw_err(hw, "rx_pause not valid in strict IEEE mode\n");
571*4882a593Smuzhiyun return IXGBE_ERR_INVALID_LINK_SETTINGS;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun switch (hw->fc.requested_mode) {
575*4882a593Smuzhiyun case ixgbe_fc_full:
576*4882a593Smuzhiyun setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
577*4882a593Smuzhiyun FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
578*4882a593Smuzhiyun break;
579*4882a593Smuzhiyun case ixgbe_fc_rx_pause:
580*4882a593Smuzhiyun setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
581*4882a593Smuzhiyun FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
582*4882a593Smuzhiyun break;
583*4882a593Smuzhiyun case ixgbe_fc_tx_pause:
584*4882a593Smuzhiyun setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
585*4882a593Smuzhiyun FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
586*4882a593Smuzhiyun break;
587*4882a593Smuzhiyun default:
588*4882a593Smuzhiyun break;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) {
592*4882a593Smuzhiyun if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
593*4882a593Smuzhiyun setup[0] |= ixgbe_fw_map[i].fw_speed;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (hw->phy.eee_speeds_advertised)
598*4882a593Smuzhiyun setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
601*4882a593Smuzhiyun if (rc)
602*4882a593Smuzhiyun return rc;
603*4882a593Smuzhiyun if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
604*4882a593Smuzhiyun return IXGBE_ERR_OVERTEMP;
605*4882a593Smuzhiyun return 0;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /**
609*4882a593Smuzhiyun * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs
610*4882a593Smuzhiyun * @hw: pointer to hardware structure
611*4882a593Smuzhiyun *
612*4882a593Smuzhiyun * Called at init time to set up flow control.
613*4882a593Smuzhiyun */
ixgbe_fc_autoneg_fw(struct ixgbe_hw * hw)614*4882a593Smuzhiyun static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun if (hw->fc.requested_mode == ixgbe_fc_default)
617*4882a593Smuzhiyun hw->fc.requested_mode = ixgbe_fc_full;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun return ixgbe_setup_fw_link(hw);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
623*4882a593Smuzhiyun * @hw: pointer to hardware structure
624*4882a593Smuzhiyun *
625*4882a593Smuzhiyun * Initializes the EEPROM parameters ixgbe_eeprom_info within the
626*4882a593Smuzhiyun * ixgbe_hw struct in order to set up EEPROM access.
627*4882a593Smuzhiyun **/
ixgbe_init_eeprom_params_X550(struct ixgbe_hw * hw)628*4882a593Smuzhiyun static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
631*4882a593Smuzhiyun u32 eec;
632*4882a593Smuzhiyun u16 eeprom_size;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (eeprom->type == ixgbe_eeprom_uninitialized) {
635*4882a593Smuzhiyun eeprom->semaphore_delay = 10;
636*4882a593Smuzhiyun eeprom->type = ixgbe_flash;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
639*4882a593Smuzhiyun eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
640*4882a593Smuzhiyun IXGBE_EEC_SIZE_SHIFT);
641*4882a593Smuzhiyun eeprom->word_size = BIT(eeprom_size +
642*4882a593Smuzhiyun IXGBE_EEPROM_WORD_SIZE_SHIFT);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
645*4882a593Smuzhiyun eeprom->type, eeprom->word_size);
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun return 0;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun /**
652*4882a593Smuzhiyun * ixgbe_iosf_wait - Wait for IOSF command completion
653*4882a593Smuzhiyun * @hw: pointer to hardware structure
654*4882a593Smuzhiyun * @ctrl: pointer to location to receive final IOSF control value
655*4882a593Smuzhiyun *
656*4882a593Smuzhiyun * Return: failing status on timeout
657*4882a593Smuzhiyun *
658*4882a593Smuzhiyun * Note: ctrl can be NULL if the IOSF control register value is not needed
659*4882a593Smuzhiyun */
ixgbe_iosf_wait(struct ixgbe_hw * hw,u32 * ctrl)660*4882a593Smuzhiyun static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun u32 i, command;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /* Check every 10 usec to see if the address cycle completed.
665*4882a593Smuzhiyun * The SB IOSF BUSY bit will clear when the operation is
666*4882a593Smuzhiyun * complete.
667*4882a593Smuzhiyun */
668*4882a593Smuzhiyun for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
669*4882a593Smuzhiyun command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
670*4882a593Smuzhiyun if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
671*4882a593Smuzhiyun break;
672*4882a593Smuzhiyun udelay(10);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun if (ctrl)
675*4882a593Smuzhiyun *ctrl = command;
676*4882a593Smuzhiyun if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
677*4882a593Smuzhiyun hw_dbg(hw, "IOSF wait timed out\n");
678*4882a593Smuzhiyun return IXGBE_ERR_PHY;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun return 0;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
685*4882a593Smuzhiyun * IOSF device
686*4882a593Smuzhiyun * @hw: pointer to hardware structure
687*4882a593Smuzhiyun * @reg_addr: 32 bit PHY register to write
688*4882a593Smuzhiyun * @device_type: 3 bit device type
689*4882a593Smuzhiyun * @phy_data: Pointer to read data from the register
690*4882a593Smuzhiyun **/
ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 * data)691*4882a593Smuzhiyun static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
692*4882a593Smuzhiyun u32 device_type, u32 *data)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
695*4882a593Smuzhiyun u32 command, error;
696*4882a593Smuzhiyun s32 ret;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
699*4882a593Smuzhiyun if (ret)
700*4882a593Smuzhiyun return ret;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun ret = ixgbe_iosf_wait(hw, NULL);
703*4882a593Smuzhiyun if (ret)
704*4882a593Smuzhiyun goto out;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
707*4882a593Smuzhiyun (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /* Write IOSF control register */
710*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun ret = ixgbe_iosf_wait(hw, &command);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
715*4882a593Smuzhiyun error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
716*4882a593Smuzhiyun IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
717*4882a593Smuzhiyun hw_dbg(hw, "Failed to read, error %x\n", error);
718*4882a593Smuzhiyun return IXGBE_ERR_PHY;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (!ret)
722*4882a593Smuzhiyun *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun out:
725*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, gssr);
726*4882a593Smuzhiyun return ret;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /**
730*4882a593Smuzhiyun * ixgbe_get_phy_token - Get the token for shared PHY access
731*4882a593Smuzhiyun * @hw: Pointer to hardware structure
732*4882a593Smuzhiyun */
ixgbe_get_phy_token(struct ixgbe_hw * hw)733*4882a593Smuzhiyun static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun struct ixgbe_hic_phy_token_req token_cmd;
736*4882a593Smuzhiyun s32 status;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
739*4882a593Smuzhiyun token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
740*4882a593Smuzhiyun token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
741*4882a593Smuzhiyun token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
742*4882a593Smuzhiyun token_cmd.port_number = hw->bus.lan_id;
743*4882a593Smuzhiyun token_cmd.command_type = FW_PHY_TOKEN_REQ;
744*4882a593Smuzhiyun token_cmd.pad = 0;
745*4882a593Smuzhiyun status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
746*4882a593Smuzhiyun IXGBE_HI_COMMAND_TIMEOUT,
747*4882a593Smuzhiyun true);
748*4882a593Smuzhiyun if (status)
749*4882a593Smuzhiyun return status;
750*4882a593Smuzhiyun if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
751*4882a593Smuzhiyun return 0;
752*4882a593Smuzhiyun if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
753*4882a593Smuzhiyun return IXGBE_ERR_FW_RESP_INVALID;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun return IXGBE_ERR_TOKEN_RETRY;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun /**
759*4882a593Smuzhiyun * ixgbe_put_phy_token - Put the token for shared PHY access
760*4882a593Smuzhiyun * @hw: Pointer to hardware structure
761*4882a593Smuzhiyun */
ixgbe_put_phy_token(struct ixgbe_hw * hw)762*4882a593Smuzhiyun static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun struct ixgbe_hic_phy_token_req token_cmd;
765*4882a593Smuzhiyun s32 status;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
768*4882a593Smuzhiyun token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
769*4882a593Smuzhiyun token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
770*4882a593Smuzhiyun token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
771*4882a593Smuzhiyun token_cmd.port_number = hw->bus.lan_id;
772*4882a593Smuzhiyun token_cmd.command_type = FW_PHY_TOKEN_REL;
773*4882a593Smuzhiyun token_cmd.pad = 0;
774*4882a593Smuzhiyun status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
775*4882a593Smuzhiyun IXGBE_HI_COMMAND_TIMEOUT,
776*4882a593Smuzhiyun true);
777*4882a593Smuzhiyun if (status)
778*4882a593Smuzhiyun return status;
779*4882a593Smuzhiyun if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
780*4882a593Smuzhiyun return 0;
781*4882a593Smuzhiyun return IXGBE_ERR_FW_RESP_INVALID;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /**
785*4882a593Smuzhiyun * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register
786*4882a593Smuzhiyun * @hw: pointer to hardware structure
787*4882a593Smuzhiyun * @reg_addr: 32 bit PHY register to write
788*4882a593Smuzhiyun * @device_type: 3 bit device type
789*4882a593Smuzhiyun * @data: Data to write to the register
790*4882a593Smuzhiyun **/
ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,__always_unused u32 device_type,u32 data)791*4882a593Smuzhiyun static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
792*4882a593Smuzhiyun __always_unused u32 device_type,
793*4882a593Smuzhiyun u32 data)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun struct ixgbe_hic_internal_phy_req write_cmd;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun memset(&write_cmd, 0, sizeof(write_cmd));
798*4882a593Smuzhiyun write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
799*4882a593Smuzhiyun write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
800*4882a593Smuzhiyun write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
801*4882a593Smuzhiyun write_cmd.port_number = hw->bus.lan_id;
802*4882a593Smuzhiyun write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
803*4882a593Smuzhiyun write_cmd.address = cpu_to_be16(reg_addr);
804*4882a593Smuzhiyun write_cmd.write_data = cpu_to_be32(data);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd),
807*4882a593Smuzhiyun IXGBE_HI_COMMAND_TIMEOUT, false);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun /**
811*4882a593Smuzhiyun * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register
812*4882a593Smuzhiyun * @hw: pointer to hardware structure
813*4882a593Smuzhiyun * @reg_addr: 32 bit PHY register to write
814*4882a593Smuzhiyun * @device_type: 3 bit device type
815*4882a593Smuzhiyun * @data: Pointer to read data from the register
816*4882a593Smuzhiyun **/
ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,__always_unused u32 device_type,u32 * data)817*4882a593Smuzhiyun static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
818*4882a593Smuzhiyun __always_unused u32 device_type,
819*4882a593Smuzhiyun u32 *data)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun union {
822*4882a593Smuzhiyun struct ixgbe_hic_internal_phy_req cmd;
823*4882a593Smuzhiyun struct ixgbe_hic_internal_phy_resp rsp;
824*4882a593Smuzhiyun } hic;
825*4882a593Smuzhiyun s32 status;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun memset(&hic, 0, sizeof(hic));
828*4882a593Smuzhiyun hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
829*4882a593Smuzhiyun hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
830*4882a593Smuzhiyun hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
831*4882a593Smuzhiyun hic.cmd.port_number = hw->bus.lan_id;
832*4882a593Smuzhiyun hic.cmd.command_type = FW_INT_PHY_REQ_READ;
833*4882a593Smuzhiyun hic.cmd.address = cpu_to_be16(reg_addr);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
836*4882a593Smuzhiyun IXGBE_HI_COMMAND_TIMEOUT, true);
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /* Extract the register value from the response. */
839*4882a593Smuzhiyun *data = be32_to_cpu(hic.rsp.read_data);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun return status;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun /** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
845*4882a593Smuzhiyun * @hw: pointer to hardware structure
846*4882a593Smuzhiyun * @offset: offset of word in the EEPROM to read
847*4882a593Smuzhiyun * @words: number of words
848*4882a593Smuzhiyun * @data: word(s) read from the EEPROM
849*4882a593Smuzhiyun *
850*4882a593Smuzhiyun * Reads a 16 bit word(s) from the EEPROM using the hostif.
851*4882a593Smuzhiyun **/
ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)852*4882a593Smuzhiyun static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
853*4882a593Smuzhiyun u16 offset, u16 words, u16 *data)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
856*4882a593Smuzhiyun struct ixgbe_hic_read_shadow_ram buffer;
857*4882a593Smuzhiyun u32 current_word = 0;
858*4882a593Smuzhiyun u16 words_to_read;
859*4882a593Smuzhiyun s32 status;
860*4882a593Smuzhiyun u32 i;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /* Take semaphore for the entire operation. */
863*4882a593Smuzhiyun status = hw->mac.ops.acquire_swfw_sync(hw, mask);
864*4882a593Smuzhiyun if (status) {
865*4882a593Smuzhiyun hw_dbg(hw, "EEPROM read buffer - semaphore failed\n");
866*4882a593Smuzhiyun return status;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun while (words) {
870*4882a593Smuzhiyun if (words > FW_MAX_READ_BUFFER_SIZE / 2)
871*4882a593Smuzhiyun words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
872*4882a593Smuzhiyun else
873*4882a593Smuzhiyun words_to_read = words;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
876*4882a593Smuzhiyun buffer.hdr.req.buf_lenh = 0;
877*4882a593Smuzhiyun buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
878*4882a593Smuzhiyun buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /* convert offset from words to bytes */
881*4882a593Smuzhiyun buffer.address = (__force u32)cpu_to_be32((offset +
882*4882a593Smuzhiyun current_word) * 2);
883*4882a593Smuzhiyun buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
884*4882a593Smuzhiyun buffer.pad2 = 0;
885*4882a593Smuzhiyun buffer.pad3 = 0;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
888*4882a593Smuzhiyun IXGBE_HI_COMMAND_TIMEOUT);
889*4882a593Smuzhiyun if (status) {
890*4882a593Smuzhiyun hw_dbg(hw, "Host interface command failed\n");
891*4882a593Smuzhiyun goto out;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun for (i = 0; i < words_to_read; i++) {
895*4882a593Smuzhiyun u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) +
896*4882a593Smuzhiyun 2 * i;
897*4882a593Smuzhiyun u32 value = IXGBE_READ_REG(hw, reg);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun data[current_word] = (u16)(value & 0xffff);
900*4882a593Smuzhiyun current_word++;
901*4882a593Smuzhiyun i++;
902*4882a593Smuzhiyun if (i < words_to_read) {
903*4882a593Smuzhiyun value >>= 16;
904*4882a593Smuzhiyun data[current_word] = (u16)(value & 0xffff);
905*4882a593Smuzhiyun current_word++;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun words -= words_to_read;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun out:
912*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, mask);
913*4882a593Smuzhiyun return status;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun /** ixgbe_checksum_ptr_x550 - Checksum one pointer region
917*4882a593Smuzhiyun * @hw: pointer to hardware structure
918*4882a593Smuzhiyun * @ptr: pointer offset in eeprom
919*4882a593Smuzhiyun * @size: size of section pointed by ptr, if 0 first word will be used as size
920*4882a593Smuzhiyun * @csum: address of checksum to update
921*4882a593Smuzhiyun *
922*4882a593Smuzhiyun * Returns error status for any failure
923*4882a593Smuzhiyun **/
ixgbe_checksum_ptr_x550(struct ixgbe_hw * hw,u16 ptr,u16 size,u16 * csum,u16 * buffer,u32 buffer_size)924*4882a593Smuzhiyun static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr,
925*4882a593Smuzhiyun u16 size, u16 *csum, u16 *buffer,
926*4882a593Smuzhiyun u32 buffer_size)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun u16 buf[256];
929*4882a593Smuzhiyun s32 status;
930*4882a593Smuzhiyun u16 length, bufsz, i, start;
931*4882a593Smuzhiyun u16 *local_buffer;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun bufsz = ARRAY_SIZE(buf);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /* Read a chunk at the pointer location */
936*4882a593Smuzhiyun if (!buffer) {
937*4882a593Smuzhiyun status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf);
938*4882a593Smuzhiyun if (status) {
939*4882a593Smuzhiyun hw_dbg(hw, "Failed to read EEPROM image\n");
940*4882a593Smuzhiyun return status;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun local_buffer = buf;
943*4882a593Smuzhiyun } else {
944*4882a593Smuzhiyun if (buffer_size < ptr)
945*4882a593Smuzhiyun return IXGBE_ERR_PARAM;
946*4882a593Smuzhiyun local_buffer = &buffer[ptr];
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun if (size) {
950*4882a593Smuzhiyun start = 0;
951*4882a593Smuzhiyun length = size;
952*4882a593Smuzhiyun } else {
953*4882a593Smuzhiyun start = 1;
954*4882a593Smuzhiyun length = local_buffer[0];
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun /* Skip pointer section if length is invalid. */
957*4882a593Smuzhiyun if (length == 0xFFFF || length == 0 ||
958*4882a593Smuzhiyun (ptr + length) >= hw->eeprom.word_size)
959*4882a593Smuzhiyun return 0;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun if (buffer && ((u32)start + (u32)length > buffer_size))
963*4882a593Smuzhiyun return IXGBE_ERR_PARAM;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun for (i = start; length; i++, length--) {
966*4882a593Smuzhiyun if (i == bufsz && !buffer) {
967*4882a593Smuzhiyun ptr += bufsz;
968*4882a593Smuzhiyun i = 0;
969*4882a593Smuzhiyun if (length < bufsz)
970*4882a593Smuzhiyun bufsz = length;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /* Read a chunk at the pointer location */
973*4882a593Smuzhiyun status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr,
974*4882a593Smuzhiyun bufsz, buf);
975*4882a593Smuzhiyun if (status) {
976*4882a593Smuzhiyun hw_dbg(hw, "Failed to read EEPROM image\n");
977*4882a593Smuzhiyun return status;
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun *csum += local_buffer[i];
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun return 0;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun /** ixgbe_calc_checksum_X550 - Calculates and returns the checksum
986*4882a593Smuzhiyun * @hw: pointer to hardware structure
987*4882a593Smuzhiyun * @buffer: pointer to buffer containing calculated checksum
988*4882a593Smuzhiyun * @buffer_size: size of buffer
989*4882a593Smuzhiyun *
990*4882a593Smuzhiyun * Returns a negative error code on error, or the 16-bit checksum
991*4882a593Smuzhiyun **/
ixgbe_calc_checksum_X550(struct ixgbe_hw * hw,u16 * buffer,u32 buffer_size)992*4882a593Smuzhiyun static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer,
993*4882a593Smuzhiyun u32 buffer_size)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1];
996*4882a593Smuzhiyun u16 *local_buffer;
997*4882a593Smuzhiyun s32 status;
998*4882a593Smuzhiyun u16 checksum = 0;
999*4882a593Smuzhiyun u16 pointer, i, size;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun hw->eeprom.ops.init_params(hw);
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun if (!buffer) {
1004*4882a593Smuzhiyun /* Read pointer area */
1005*4882a593Smuzhiyun status = ixgbe_read_ee_hostif_buffer_X550(hw, 0,
1006*4882a593Smuzhiyun IXGBE_EEPROM_LAST_WORD + 1,
1007*4882a593Smuzhiyun eeprom_ptrs);
1008*4882a593Smuzhiyun if (status) {
1009*4882a593Smuzhiyun hw_dbg(hw, "Failed to read EEPROM image\n");
1010*4882a593Smuzhiyun return status;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun local_buffer = eeprom_ptrs;
1013*4882a593Smuzhiyun } else {
1014*4882a593Smuzhiyun if (buffer_size < IXGBE_EEPROM_LAST_WORD)
1015*4882a593Smuzhiyun return IXGBE_ERR_PARAM;
1016*4882a593Smuzhiyun local_buffer = buffer;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun /* For X550 hardware include 0x0-0x41 in the checksum, skip the
1020*4882a593Smuzhiyun * checksum word itself
1021*4882a593Smuzhiyun */
1022*4882a593Smuzhiyun for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++)
1023*4882a593Smuzhiyun if (i != IXGBE_EEPROM_CHECKSUM)
1024*4882a593Smuzhiyun checksum += local_buffer[i];
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun /* Include all data from pointers 0x3, 0x6-0xE. This excludes the
1027*4882a593Smuzhiyun * FW, PHY module, and PCIe Expansion/Option ROM pointers.
1028*4882a593Smuzhiyun */
1029*4882a593Smuzhiyun for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) {
1030*4882a593Smuzhiyun if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
1031*4882a593Smuzhiyun continue;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun pointer = local_buffer[i];
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun /* Skip pointer section if the pointer is invalid. */
1036*4882a593Smuzhiyun if (pointer == 0xFFFF || pointer == 0 ||
1037*4882a593Smuzhiyun pointer >= hw->eeprom.word_size)
1038*4882a593Smuzhiyun continue;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun switch (i) {
1041*4882a593Smuzhiyun case IXGBE_PCIE_GENERAL_PTR:
1042*4882a593Smuzhiyun size = IXGBE_IXGBE_PCIE_GENERAL_SIZE;
1043*4882a593Smuzhiyun break;
1044*4882a593Smuzhiyun case IXGBE_PCIE_CONFIG0_PTR:
1045*4882a593Smuzhiyun case IXGBE_PCIE_CONFIG1_PTR:
1046*4882a593Smuzhiyun size = IXGBE_PCIE_CONFIG_SIZE;
1047*4882a593Smuzhiyun break;
1048*4882a593Smuzhiyun default:
1049*4882a593Smuzhiyun size = 0;
1050*4882a593Smuzhiyun break;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum,
1054*4882a593Smuzhiyun buffer, buffer_size);
1055*4882a593Smuzhiyun if (status)
1056*4882a593Smuzhiyun return status;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun checksum = (u16)IXGBE_EEPROM_SUM - checksum;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun return (s32)checksum;
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun /** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum
1065*4882a593Smuzhiyun * @hw: pointer to hardware structure
1066*4882a593Smuzhiyun *
1067*4882a593Smuzhiyun * Returns a negative error code on error, or the 16-bit checksum
1068*4882a593Smuzhiyun **/
ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw * hw)1069*4882a593Smuzhiyun static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
1070*4882a593Smuzhiyun {
1071*4882a593Smuzhiyun return ixgbe_calc_checksum_X550(hw, NULL, 0);
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun /** ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command
1075*4882a593Smuzhiyun * @hw: pointer to hardware structure
1076*4882a593Smuzhiyun * @offset: offset of word in the EEPROM to read
1077*4882a593Smuzhiyun * @data: word read from the EEPROM
1078*4882a593Smuzhiyun *
1079*4882a593Smuzhiyun * Reads a 16 bit word from the EEPROM using the hostif.
1080*4882a593Smuzhiyun **/
ixgbe_read_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 * data)1081*4882a593Smuzhiyun static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
1084*4882a593Smuzhiyun struct ixgbe_hic_read_shadow_ram buffer;
1085*4882a593Smuzhiyun s32 status;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
1088*4882a593Smuzhiyun buffer.hdr.req.buf_lenh = 0;
1089*4882a593Smuzhiyun buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
1090*4882a593Smuzhiyun buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun /* convert offset from words to bytes */
1093*4882a593Smuzhiyun buffer.address = (__force u32)cpu_to_be32(offset * 2);
1094*4882a593Smuzhiyun /* one word */
1095*4882a593Smuzhiyun buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun status = hw->mac.ops.acquire_swfw_sync(hw, mask);
1098*4882a593Smuzhiyun if (status)
1099*4882a593Smuzhiyun return status;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
1102*4882a593Smuzhiyun IXGBE_HI_COMMAND_TIMEOUT);
1103*4882a593Smuzhiyun if (!status) {
1104*4882a593Smuzhiyun *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
1105*4882a593Smuzhiyun FW_NVM_DATA_OFFSET);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, mask);
1109*4882a593Smuzhiyun return status;
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun /** ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum
1113*4882a593Smuzhiyun * @hw: pointer to hardware structure
1114*4882a593Smuzhiyun * @checksum_val: calculated checksum
1115*4882a593Smuzhiyun *
1116*4882a593Smuzhiyun * Performs checksum calculation and validates the EEPROM checksum. If the
1117*4882a593Smuzhiyun * caller does not need checksum_val, the value can be NULL.
1118*4882a593Smuzhiyun **/
ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw * hw,u16 * checksum_val)1119*4882a593Smuzhiyun static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw,
1120*4882a593Smuzhiyun u16 *checksum_val)
1121*4882a593Smuzhiyun {
1122*4882a593Smuzhiyun s32 status;
1123*4882a593Smuzhiyun u16 checksum;
1124*4882a593Smuzhiyun u16 read_checksum = 0;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun /* Read the first word from the EEPROM. If this times out or fails, do
1127*4882a593Smuzhiyun * not continue or we could be in for a very long wait while every
1128*4882a593Smuzhiyun * EEPROM read fails
1129*4882a593Smuzhiyun */
1130*4882a593Smuzhiyun status = hw->eeprom.ops.read(hw, 0, &checksum);
1131*4882a593Smuzhiyun if (status) {
1132*4882a593Smuzhiyun hw_dbg(hw, "EEPROM read failed\n");
1133*4882a593Smuzhiyun return status;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun status = hw->eeprom.ops.calc_checksum(hw);
1137*4882a593Smuzhiyun if (status < 0)
1138*4882a593Smuzhiyun return status;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun checksum = (u16)(status & 0xffff);
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
1143*4882a593Smuzhiyun &read_checksum);
1144*4882a593Smuzhiyun if (status)
1145*4882a593Smuzhiyun return status;
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun /* Verify read checksum from EEPROM is the same as
1148*4882a593Smuzhiyun * calculated checksum
1149*4882a593Smuzhiyun */
1150*4882a593Smuzhiyun if (read_checksum != checksum) {
1151*4882a593Smuzhiyun status = IXGBE_ERR_EEPROM_CHECKSUM;
1152*4882a593Smuzhiyun hw_dbg(hw, "Invalid EEPROM checksum");
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun /* If the user cares, return the calculated checksum */
1156*4882a593Smuzhiyun if (checksum_val)
1157*4882a593Smuzhiyun *checksum_val = checksum;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun return status;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun /** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
1163*4882a593Smuzhiyun * @hw: pointer to hardware structure
1164*4882a593Smuzhiyun * @offset: offset of word in the EEPROM to write
1165*4882a593Smuzhiyun * @data: word write to the EEPROM
1166*4882a593Smuzhiyun *
1167*4882a593Smuzhiyun * Write a 16 bit word to the EEPROM using the hostif.
1168*4882a593Smuzhiyun **/
ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw * hw,u16 offset,u16 data)1169*4882a593Smuzhiyun static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
1170*4882a593Smuzhiyun u16 data)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun s32 status;
1173*4882a593Smuzhiyun struct ixgbe_hic_write_shadow_ram buffer;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD;
1176*4882a593Smuzhiyun buffer.hdr.req.buf_lenh = 0;
1177*4882a593Smuzhiyun buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN;
1178*4882a593Smuzhiyun buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun /* one word */
1181*4882a593Smuzhiyun buffer.length = cpu_to_be16(sizeof(u16));
1182*4882a593Smuzhiyun buffer.data = data;
1183*4882a593Smuzhiyun buffer.address = cpu_to_be32(offset * 2);
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
1186*4882a593Smuzhiyun IXGBE_HI_COMMAND_TIMEOUT, false);
1187*4882a593Smuzhiyun return status;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun /** ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
1191*4882a593Smuzhiyun * @hw: pointer to hardware structure
1192*4882a593Smuzhiyun * @offset: offset of word in the EEPROM to write
1193*4882a593Smuzhiyun * @data: word write to the EEPROM
1194*4882a593Smuzhiyun *
1195*4882a593Smuzhiyun * Write a 16 bit word to the EEPROM using the hostif.
1196*4882a593Smuzhiyun **/
ixgbe_write_ee_hostif_X550(struct ixgbe_hw * hw,u16 offset,u16 data)1197*4882a593Smuzhiyun static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun s32 status = 0;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
1202*4882a593Smuzhiyun status = ixgbe_write_ee_hostif_data_X550(hw, offset, data);
1203*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1204*4882a593Smuzhiyun } else {
1205*4882a593Smuzhiyun hw_dbg(hw, "write ee hostif failed to get semaphore");
1206*4882a593Smuzhiyun status = IXGBE_ERR_SWFW_SYNC;
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun return status;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun /** ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device
1213*4882a593Smuzhiyun * @hw: pointer to hardware structure
1214*4882a593Smuzhiyun *
1215*4882a593Smuzhiyun * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash.
1216*4882a593Smuzhiyun **/
ixgbe_update_flash_X550(struct ixgbe_hw * hw)1217*4882a593Smuzhiyun static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
1218*4882a593Smuzhiyun {
1219*4882a593Smuzhiyun s32 status = 0;
1220*4882a593Smuzhiyun union ixgbe_hic_hdr2 buffer;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD;
1223*4882a593Smuzhiyun buffer.req.buf_lenh = 0;
1224*4882a593Smuzhiyun buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
1225*4882a593Smuzhiyun buffer.req.checksum = FW_DEFAULT_CHECKSUM;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
1228*4882a593Smuzhiyun IXGBE_HI_COMMAND_TIMEOUT, false);
1229*4882a593Smuzhiyun return status;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun /**
1233*4882a593Smuzhiyun * ixgbe_get_bus_info_X550em - Set PCI bus info
1234*4882a593Smuzhiyun * @hw: pointer to hardware structure
1235*4882a593Smuzhiyun *
1236*4882a593Smuzhiyun * Sets bus link width and speed to unknown because X550em is
1237*4882a593Smuzhiyun * not a PCI device.
1238*4882a593Smuzhiyun **/
ixgbe_get_bus_info_X550em(struct ixgbe_hw * hw)1239*4882a593Smuzhiyun static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
1240*4882a593Smuzhiyun {
1241*4882a593Smuzhiyun hw->bus.type = ixgbe_bus_type_internal;
1242*4882a593Smuzhiyun hw->bus.width = ixgbe_bus_width_unknown;
1243*4882a593Smuzhiyun hw->bus.speed = ixgbe_bus_speed_unknown;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun hw->mac.ops.set_lan_id(hw);
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun return 0;
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun /**
1251*4882a593Smuzhiyun * ixgbe_fw_recovery_mode - Check FW NVM recovery mode
1252*4882a593Smuzhiyun * @hw: pointer t hardware structure
1253*4882a593Smuzhiyun *
1254*4882a593Smuzhiyun * Returns true if in FW NVM recovery mode.
1255*4882a593Smuzhiyun */
ixgbe_fw_recovery_mode_X550(struct ixgbe_hw * hw)1256*4882a593Smuzhiyun static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun u32 fwsm;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
1261*4882a593Smuzhiyun return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE);
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun /** ixgbe_disable_rx_x550 - Disable RX unit
1265*4882a593Smuzhiyun *
1266*4882a593Smuzhiyun * Enables the Rx DMA unit for x550
1267*4882a593Smuzhiyun **/
ixgbe_disable_rx_x550(struct ixgbe_hw * hw)1268*4882a593Smuzhiyun static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
1269*4882a593Smuzhiyun {
1270*4882a593Smuzhiyun u32 rxctrl, pfdtxgswc;
1271*4882a593Smuzhiyun s32 status;
1272*4882a593Smuzhiyun struct ixgbe_hic_disable_rxen fw_cmd;
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1275*4882a593Smuzhiyun if (rxctrl & IXGBE_RXCTRL_RXEN) {
1276*4882a593Smuzhiyun pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
1277*4882a593Smuzhiyun if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
1278*4882a593Smuzhiyun pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
1279*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
1280*4882a593Smuzhiyun hw->mac.set_lben = true;
1281*4882a593Smuzhiyun } else {
1282*4882a593Smuzhiyun hw->mac.set_lben = false;
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
1286*4882a593Smuzhiyun fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
1287*4882a593Smuzhiyun fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
1288*4882a593Smuzhiyun fw_cmd.port_number = hw->bus.lan_id;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun status = ixgbe_host_interface_command(hw, &fw_cmd,
1291*4882a593Smuzhiyun sizeof(struct ixgbe_hic_disable_rxen),
1292*4882a593Smuzhiyun IXGBE_HI_COMMAND_TIMEOUT, true);
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun /* If we fail - disable RX using register write */
1295*4882a593Smuzhiyun if (status) {
1296*4882a593Smuzhiyun rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1297*4882a593Smuzhiyun if (rxctrl & IXGBE_RXCTRL_RXEN) {
1298*4882a593Smuzhiyun rxctrl &= ~IXGBE_RXCTRL_RXEN;
1299*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun /** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
1306*4882a593Smuzhiyun * @hw: pointer to hardware structure
1307*4882a593Smuzhiyun *
1308*4882a593Smuzhiyun * After writing EEPROM to shadow RAM using EEWR register, software calculates
1309*4882a593Smuzhiyun * checksum and updates the EEPROM and instructs the hardware to update
1310*4882a593Smuzhiyun * the flash.
1311*4882a593Smuzhiyun **/
ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw * hw)1312*4882a593Smuzhiyun static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun s32 status;
1315*4882a593Smuzhiyun u16 checksum = 0;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun /* Read the first word from the EEPROM. If this times out or fails, do
1318*4882a593Smuzhiyun * not continue or we could be in for a very long wait while every
1319*4882a593Smuzhiyun * EEPROM read fails
1320*4882a593Smuzhiyun */
1321*4882a593Smuzhiyun status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum);
1322*4882a593Smuzhiyun if (status) {
1323*4882a593Smuzhiyun hw_dbg(hw, "EEPROM read failed\n");
1324*4882a593Smuzhiyun return status;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun status = ixgbe_calc_eeprom_checksum_X550(hw);
1328*4882a593Smuzhiyun if (status < 0)
1329*4882a593Smuzhiyun return status;
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun checksum = (u16)(status & 0xffff);
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM,
1334*4882a593Smuzhiyun checksum);
1335*4882a593Smuzhiyun if (status)
1336*4882a593Smuzhiyun return status;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun status = ixgbe_update_flash_X550(hw);
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun return status;
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun /** ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif
1344*4882a593Smuzhiyun * @hw: pointer to hardware structure
1345*4882a593Smuzhiyun * @offset: offset of word in the EEPROM to write
1346*4882a593Smuzhiyun * @words: number of words
1347*4882a593Smuzhiyun * @data: word(s) write to the EEPROM
1348*4882a593Smuzhiyun *
1349*4882a593Smuzhiyun *
1350*4882a593Smuzhiyun * Write a 16 bit word(s) to the EEPROM using the hostif.
1351*4882a593Smuzhiyun **/
ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)1352*4882a593Smuzhiyun static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
1353*4882a593Smuzhiyun u16 offset, u16 words,
1354*4882a593Smuzhiyun u16 *data)
1355*4882a593Smuzhiyun {
1356*4882a593Smuzhiyun s32 status = 0;
1357*4882a593Smuzhiyun u32 i = 0;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /* Take semaphore for the entire operation. */
1360*4882a593Smuzhiyun status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1361*4882a593Smuzhiyun if (status) {
1362*4882a593Smuzhiyun hw_dbg(hw, "EEPROM write buffer - semaphore failed\n");
1363*4882a593Smuzhiyun return status;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun for (i = 0; i < words; i++) {
1367*4882a593Smuzhiyun status = ixgbe_write_ee_hostif_data_X550(hw, offset + i,
1368*4882a593Smuzhiyun data[i]);
1369*4882a593Smuzhiyun if (status) {
1370*4882a593Smuzhiyun hw_dbg(hw, "Eeprom buffered write failed\n");
1371*4882a593Smuzhiyun break;
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun return status;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun /** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the
1381*4882a593Smuzhiyun * IOSF device
1382*4882a593Smuzhiyun *
1383*4882a593Smuzhiyun * @hw: pointer to hardware structure
1384*4882a593Smuzhiyun * @reg_addr: 32 bit PHY register to write
1385*4882a593Smuzhiyun * @device_type: 3 bit device type
1386*4882a593Smuzhiyun * @data: Data to write to the register
1387*4882a593Smuzhiyun **/
ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u32 data)1388*4882a593Smuzhiyun static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
1389*4882a593Smuzhiyun u32 device_type, u32 data)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
1392*4882a593Smuzhiyun u32 command, error;
1393*4882a593Smuzhiyun s32 ret;
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
1396*4882a593Smuzhiyun if (ret)
1397*4882a593Smuzhiyun return ret;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun ret = ixgbe_iosf_wait(hw, NULL);
1400*4882a593Smuzhiyun if (ret)
1401*4882a593Smuzhiyun goto out;
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
1404*4882a593Smuzhiyun (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun /* Write IOSF control register */
1407*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun /* Write IOSF data register */
1410*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun ret = ixgbe_iosf_wait(hw, &command);
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
1415*4882a593Smuzhiyun error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
1416*4882a593Smuzhiyun IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT;
1417*4882a593Smuzhiyun hw_dbg(hw, "Failed to write, error %x\n", error);
1418*4882a593Smuzhiyun return IXGBE_ERR_PHY;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun out:
1422*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, gssr);
1423*4882a593Smuzhiyun return ret;
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun /**
1427*4882a593Smuzhiyun * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration
1428*4882a593Smuzhiyun * @hw: pointer to hardware structure
1429*4882a593Smuzhiyun *
1430*4882a593Smuzhiyun * iXfI configuration needed for ixgbe_mac_X550EM_x devices.
1431*4882a593Smuzhiyun **/
ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw * hw)1432*4882a593Smuzhiyun static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
1433*4882a593Smuzhiyun {
1434*4882a593Smuzhiyun s32 status;
1435*4882a593Smuzhiyun u32 reg_val;
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun /* Disable training protocol FSM. */
1438*4882a593Smuzhiyun status = ixgbe_read_iosf_sb_reg_x550(hw,
1439*4882a593Smuzhiyun IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1440*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1441*4882a593Smuzhiyun if (status)
1442*4882a593Smuzhiyun return status;
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
1445*4882a593Smuzhiyun status = ixgbe_write_iosf_sb_reg_x550(hw,
1446*4882a593Smuzhiyun IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
1447*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1448*4882a593Smuzhiyun if (status)
1449*4882a593Smuzhiyun return status;
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun /* Disable Flex from training TXFFE. */
1452*4882a593Smuzhiyun status = ixgbe_read_iosf_sb_reg_x550(hw,
1453*4882a593Smuzhiyun IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
1454*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1455*4882a593Smuzhiyun if (status)
1456*4882a593Smuzhiyun return status;
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
1459*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
1460*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
1461*4882a593Smuzhiyun status = ixgbe_write_iosf_sb_reg_x550(hw,
1462*4882a593Smuzhiyun IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
1463*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1464*4882a593Smuzhiyun if (status)
1465*4882a593Smuzhiyun return status;
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun status = ixgbe_read_iosf_sb_reg_x550(hw,
1468*4882a593Smuzhiyun IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
1469*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1470*4882a593Smuzhiyun if (status)
1471*4882a593Smuzhiyun return status;
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
1474*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
1475*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
1476*4882a593Smuzhiyun status = ixgbe_write_iosf_sb_reg_x550(hw,
1477*4882a593Smuzhiyun IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
1478*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1479*4882a593Smuzhiyun if (status)
1480*4882a593Smuzhiyun return status;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun /* Enable override for coefficients. */
1483*4882a593Smuzhiyun status = ixgbe_read_iosf_sb_reg_x550(hw,
1484*4882a593Smuzhiyun IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
1485*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1486*4882a593Smuzhiyun if (status)
1487*4882a593Smuzhiyun return status;
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN;
1490*4882a593Smuzhiyun reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
1491*4882a593Smuzhiyun reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
1492*4882a593Smuzhiyun reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
1493*4882a593Smuzhiyun status = ixgbe_write_iosf_sb_reg_x550(hw,
1494*4882a593Smuzhiyun IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
1495*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1496*4882a593Smuzhiyun return status;
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun /**
1500*4882a593Smuzhiyun * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
1501*4882a593Smuzhiyun * internal PHY
1502*4882a593Smuzhiyun * @hw: pointer to hardware structure
1503*4882a593Smuzhiyun **/
ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw * hw)1504*4882a593Smuzhiyun static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun s32 status;
1507*4882a593Smuzhiyun u32 link_ctrl;
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun /* Restart auto-negotiation. */
1510*4882a593Smuzhiyun status = hw->mac.ops.read_iosf_sb_reg(hw,
1511*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1512*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun if (status) {
1515*4882a593Smuzhiyun hw_dbg(hw, "Auto-negotiation did not complete\n");
1516*4882a593Smuzhiyun return status;
1517*4882a593Smuzhiyun }
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
1520*4882a593Smuzhiyun status = hw->mac.ops.write_iosf_sb_reg(hw,
1521*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1522*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun if (hw->mac.type == ixgbe_mac_x550em_a) {
1525*4882a593Smuzhiyun u32 flx_mask_st20;
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun /* Indicate to FW that AN restart has been asserted */
1528*4882a593Smuzhiyun status = hw->mac.ops.read_iosf_sb_reg(hw,
1529*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1530*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun if (status) {
1533*4882a593Smuzhiyun hw_dbg(hw, "Auto-negotiation did not complete\n");
1534*4882a593Smuzhiyun return status;
1535*4882a593Smuzhiyun }
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
1538*4882a593Smuzhiyun status = hw->mac.ops.write_iosf_sb_reg(hw,
1539*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1540*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun return status;
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun /** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
1547*4882a593Smuzhiyun * @hw: pointer to hardware structure
1548*4882a593Smuzhiyun * @speed: the link speed to force
1549*4882a593Smuzhiyun *
1550*4882a593Smuzhiyun * Configures the integrated KR PHY to use iXFI mode. Used to connect an
1551*4882a593Smuzhiyun * internal and external PHY at a specific speed, without autonegotiation.
1552*4882a593Smuzhiyun **/
ixgbe_setup_ixfi_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed)1553*4882a593Smuzhiyun static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
1556*4882a593Smuzhiyun s32 status;
1557*4882a593Smuzhiyun u32 reg_val;
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun /* iXFI is only supported with X552 */
1560*4882a593Smuzhiyun if (mac->type != ixgbe_mac_X550EM_x)
1561*4882a593Smuzhiyun return IXGBE_ERR_LINK_SETUP;
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun /* Disable AN and force speed to 10G Serial. */
1564*4882a593Smuzhiyun status = ixgbe_read_iosf_sb_reg_x550(hw,
1565*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1566*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1567*4882a593Smuzhiyun if (status)
1568*4882a593Smuzhiyun return status;
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1571*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun /* Select forced link speed for internal PHY. */
1574*4882a593Smuzhiyun switch (*speed) {
1575*4882a593Smuzhiyun case IXGBE_LINK_SPEED_10GB_FULL:
1576*4882a593Smuzhiyun reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
1577*4882a593Smuzhiyun break;
1578*4882a593Smuzhiyun case IXGBE_LINK_SPEED_1GB_FULL:
1579*4882a593Smuzhiyun reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1580*4882a593Smuzhiyun break;
1581*4882a593Smuzhiyun default:
1582*4882a593Smuzhiyun /* Other link speeds are not supported by internal KR PHY. */
1583*4882a593Smuzhiyun return IXGBE_ERR_LINK_SETUP;
1584*4882a593Smuzhiyun }
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun status = ixgbe_write_iosf_sb_reg_x550(hw,
1587*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1588*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1589*4882a593Smuzhiyun if (status)
1590*4882a593Smuzhiyun return status;
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun /* Additional configuration needed for x550em_x */
1593*4882a593Smuzhiyun if (hw->mac.type == ixgbe_mac_X550EM_x) {
1594*4882a593Smuzhiyun status = ixgbe_setup_ixfi_x550em_x(hw);
1595*4882a593Smuzhiyun if (status)
1596*4882a593Smuzhiyun return status;
1597*4882a593Smuzhiyun }
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun /* Toggle port SW reset by AN reset. */
1600*4882a593Smuzhiyun status = ixgbe_restart_an_internal_phy_x550em(hw);
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun return status;
1603*4882a593Smuzhiyun }
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun /**
1606*4882a593Smuzhiyun * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported
1607*4882a593Smuzhiyun * @hw: pointer to hardware structure
1608*4882a593Smuzhiyun * @linear: true if SFP module is linear
1609*4882a593Smuzhiyun */
ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw * hw,bool * linear)1610*4882a593Smuzhiyun static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
1611*4882a593Smuzhiyun {
1612*4882a593Smuzhiyun switch (hw->phy.sfp_type) {
1613*4882a593Smuzhiyun case ixgbe_sfp_type_not_present:
1614*4882a593Smuzhiyun return IXGBE_ERR_SFP_NOT_PRESENT;
1615*4882a593Smuzhiyun case ixgbe_sfp_type_da_cu_core0:
1616*4882a593Smuzhiyun case ixgbe_sfp_type_da_cu_core1:
1617*4882a593Smuzhiyun *linear = true;
1618*4882a593Smuzhiyun break;
1619*4882a593Smuzhiyun case ixgbe_sfp_type_srlr_core0:
1620*4882a593Smuzhiyun case ixgbe_sfp_type_srlr_core1:
1621*4882a593Smuzhiyun case ixgbe_sfp_type_da_act_lmt_core0:
1622*4882a593Smuzhiyun case ixgbe_sfp_type_da_act_lmt_core1:
1623*4882a593Smuzhiyun case ixgbe_sfp_type_1g_sx_core0:
1624*4882a593Smuzhiyun case ixgbe_sfp_type_1g_sx_core1:
1625*4882a593Smuzhiyun case ixgbe_sfp_type_1g_lx_core0:
1626*4882a593Smuzhiyun case ixgbe_sfp_type_1g_lx_core1:
1627*4882a593Smuzhiyun *linear = false;
1628*4882a593Smuzhiyun break;
1629*4882a593Smuzhiyun case ixgbe_sfp_type_unknown:
1630*4882a593Smuzhiyun case ixgbe_sfp_type_1g_cu_core0:
1631*4882a593Smuzhiyun case ixgbe_sfp_type_1g_cu_core1:
1632*4882a593Smuzhiyun default:
1633*4882a593Smuzhiyun return IXGBE_ERR_SFP_NOT_SUPPORTED;
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun return 0;
1637*4882a593Smuzhiyun }
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun /**
1640*4882a593Smuzhiyun * ixgbe_setup_mac_link_sfp_x550em - Configure the KR PHY for SFP.
1641*4882a593Smuzhiyun * @hw: pointer to hardware structure
1642*4882a593Smuzhiyun * @speed: the link speed to force
1643*4882a593Smuzhiyun * @autoneg_wait_to_complete: unused
1644*4882a593Smuzhiyun *
1645*4882a593Smuzhiyun * Configures the extern PHY and the integrated KR PHY for SFP support.
1646*4882a593Smuzhiyun */
1647*4882a593Smuzhiyun static s32
ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,__always_unused bool autoneg_wait_to_complete)1648*4882a593Smuzhiyun ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
1649*4882a593Smuzhiyun ixgbe_link_speed speed,
1650*4882a593Smuzhiyun __always_unused bool autoneg_wait_to_complete)
1651*4882a593Smuzhiyun {
1652*4882a593Smuzhiyun s32 status;
1653*4882a593Smuzhiyun u16 reg_slice, reg_val;
1654*4882a593Smuzhiyun bool setup_linear = false;
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun /* Check if SFP module is supported and linear */
1657*4882a593Smuzhiyun status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun /* If no SFP module present, then return success. Return success since
1660*4882a593Smuzhiyun * there is no reason to configure CS4227 and SFP not present error is
1661*4882a593Smuzhiyun * not accepted in the setup MAC link flow.
1662*4882a593Smuzhiyun */
1663*4882a593Smuzhiyun if (status == IXGBE_ERR_SFP_NOT_PRESENT)
1664*4882a593Smuzhiyun return 0;
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun if (status)
1667*4882a593Smuzhiyun return status;
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun /* Configure internal PHY for KR/KX. */
1670*4882a593Smuzhiyun ixgbe_setup_kr_speed_x550em(hw, speed);
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun /* Configure CS4227 LINE side to proper mode. */
1673*4882a593Smuzhiyun reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12);
1674*4882a593Smuzhiyun if (setup_linear)
1675*4882a593Smuzhiyun reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
1676*4882a593Smuzhiyun else
1677*4882a593Smuzhiyun reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun status = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
1680*4882a593Smuzhiyun reg_val);
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun return status;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun /**
1686*4882a593Smuzhiyun * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
1687*4882a593Smuzhiyun * @hw: pointer to hardware structure
1688*4882a593Smuzhiyun * @speed: the link speed to force
1689*4882a593Smuzhiyun *
1690*4882a593Smuzhiyun * Configures the integrated PHY for native SFI mode. Used to connect the
1691*4882a593Smuzhiyun * internal PHY directly to an SFP cage, without autonegotiation.
1692*4882a593Smuzhiyun **/
ixgbe_setup_sfi_x550a(struct ixgbe_hw * hw,ixgbe_link_speed * speed)1693*4882a593Smuzhiyun static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
1696*4882a593Smuzhiyun s32 status;
1697*4882a593Smuzhiyun u32 reg_val;
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun /* Disable all AN and force speed to 10G Serial. */
1700*4882a593Smuzhiyun status = mac->ops.read_iosf_sb_reg(hw,
1701*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1702*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
1703*4882a593Smuzhiyun if (status)
1704*4882a593Smuzhiyun return status;
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
1707*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
1708*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
1709*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
1710*4882a593Smuzhiyun
1711*4882a593Smuzhiyun /* Select forced link speed for internal PHY. */
1712*4882a593Smuzhiyun switch (*speed) {
1713*4882a593Smuzhiyun case IXGBE_LINK_SPEED_10GB_FULL:
1714*4882a593Smuzhiyun reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
1715*4882a593Smuzhiyun break;
1716*4882a593Smuzhiyun case IXGBE_LINK_SPEED_1GB_FULL:
1717*4882a593Smuzhiyun reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
1718*4882a593Smuzhiyun break;
1719*4882a593Smuzhiyun default:
1720*4882a593Smuzhiyun /* Other link speeds are not supported by internal PHY. */
1721*4882a593Smuzhiyun return IXGBE_ERR_LINK_SETUP;
1722*4882a593Smuzhiyun }
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun status = mac->ops.write_iosf_sb_reg(hw,
1725*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1726*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun /* Toggle port SW reset by AN reset. */
1729*4882a593Smuzhiyun status = ixgbe_restart_an_internal_phy_x550em(hw);
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun return status;
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun /**
1735*4882a593Smuzhiyun * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
1736*4882a593Smuzhiyun * @hw: pointer to hardware structure
1737*4882a593Smuzhiyun * @speed: link speed
1738*4882a593Smuzhiyun * @autoneg_wait_to_complete: unused
1739*4882a593Smuzhiyun *
1740*4882a593Smuzhiyun * Configure the the integrated PHY for native SFP support.
1741*4882a593Smuzhiyun */
1742*4882a593Smuzhiyun static s32
ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw * hw,ixgbe_link_speed speed,__always_unused bool autoneg_wait_to_complete)1743*4882a593Smuzhiyun ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1744*4882a593Smuzhiyun __always_unused bool autoneg_wait_to_complete)
1745*4882a593Smuzhiyun {
1746*4882a593Smuzhiyun bool setup_linear = false;
1747*4882a593Smuzhiyun u32 reg_phy_int;
1748*4882a593Smuzhiyun s32 ret_val;
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun /* Check if SFP module is supported and linear */
1751*4882a593Smuzhiyun ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun /* If no SFP module present, then return success. Return success since
1754*4882a593Smuzhiyun * SFP not present error is not excepted in the setup MAC link flow.
1755*4882a593Smuzhiyun */
1756*4882a593Smuzhiyun if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
1757*4882a593Smuzhiyun return 0;
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun if (ret_val)
1760*4882a593Smuzhiyun return ret_val;
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun /* Configure internal PHY for native SFI based on module type */
1763*4882a593Smuzhiyun ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
1764*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1765*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int);
1766*4882a593Smuzhiyun if (ret_val)
1767*4882a593Smuzhiyun return ret_val;
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
1770*4882a593Smuzhiyun if (!setup_linear)
1771*4882a593Smuzhiyun reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
1774*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1775*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
1776*4882a593Smuzhiyun if (ret_val)
1777*4882a593Smuzhiyun return ret_val;
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun /* Setup SFI internal link. */
1780*4882a593Smuzhiyun return ixgbe_setup_sfi_x550a(hw, &speed);
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun /**
1784*4882a593Smuzhiyun * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
1785*4882a593Smuzhiyun * @hw: pointer to hardware structure
1786*4882a593Smuzhiyun * @speed: link speed
1787*4882a593Smuzhiyun * @autoneg_wait_to_complete: unused
1788*4882a593Smuzhiyun *
1789*4882a593Smuzhiyun * Configure the the integrated PHY for SFP support.
1790*4882a593Smuzhiyun */
1791*4882a593Smuzhiyun static s32
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw * hw,ixgbe_link_speed speed,__always_unused bool autoneg_wait_to_complete)1792*4882a593Smuzhiyun ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1793*4882a593Smuzhiyun __always_unused bool autoneg_wait_to_complete)
1794*4882a593Smuzhiyun {
1795*4882a593Smuzhiyun u32 reg_slice, slice_offset;
1796*4882a593Smuzhiyun bool setup_linear = false;
1797*4882a593Smuzhiyun u16 reg_phy_ext;
1798*4882a593Smuzhiyun s32 ret_val;
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun /* Check if SFP module is supported and linear */
1801*4882a593Smuzhiyun ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun /* If no SFP module present, then return success. Return success since
1804*4882a593Smuzhiyun * SFP not present error is not excepted in the setup MAC link flow.
1805*4882a593Smuzhiyun */
1806*4882a593Smuzhiyun if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT)
1807*4882a593Smuzhiyun return 0;
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun if (ret_val)
1810*4882a593Smuzhiyun return ret_val;
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun /* Configure internal PHY for KR/KX. */
1813*4882a593Smuzhiyun ixgbe_setup_kr_speed_x550em(hw, speed);
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun if (hw->phy.mdio.prtad == MDIO_PRTAD_NONE)
1816*4882a593Smuzhiyun return IXGBE_ERR_PHY_ADDR_INVALID;
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun /* Get external PHY SKU id */
1819*4882a593Smuzhiyun ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
1820*4882a593Smuzhiyun IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
1821*4882a593Smuzhiyun if (ret_val)
1822*4882a593Smuzhiyun return ret_val;
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun /* When configuring quad port CS4223, the MAC instance is part
1825*4882a593Smuzhiyun * of the slice offset.
1826*4882a593Smuzhiyun */
1827*4882a593Smuzhiyun if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
1828*4882a593Smuzhiyun slice_offset = (hw->bus.lan_id +
1829*4882a593Smuzhiyun (hw->bus.instance_id << 1)) << 12;
1830*4882a593Smuzhiyun else
1831*4882a593Smuzhiyun slice_offset = hw->bus.lan_id << 12;
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun /* Configure CS4227/CS4223 LINE side to proper mode. */
1834*4882a593Smuzhiyun reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
1835*4882a593Smuzhiyun
1836*4882a593Smuzhiyun ret_val = hw->phy.ops.read_reg(hw, reg_slice,
1837*4882a593Smuzhiyun IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
1838*4882a593Smuzhiyun if (ret_val)
1839*4882a593Smuzhiyun return ret_val;
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
1842*4882a593Smuzhiyun (IXGBE_CS4227_EDC_MODE_SR << 1));
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun if (setup_linear)
1845*4882a593Smuzhiyun reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
1846*4882a593Smuzhiyun else
1847*4882a593Smuzhiyun reg_phy_ext |= (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun ret_val = hw->phy.ops.write_reg(hw, reg_slice,
1850*4882a593Smuzhiyun IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
1851*4882a593Smuzhiyun if (ret_val)
1852*4882a593Smuzhiyun return ret_val;
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun /* Flush previous write with a read */
1855*4882a593Smuzhiyun return hw->phy.ops.read_reg(hw, reg_slice,
1856*4882a593Smuzhiyun IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext);
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun
1859*4882a593Smuzhiyun /**
1860*4882a593Smuzhiyun * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
1861*4882a593Smuzhiyun * @hw: pointer to hardware structure
1862*4882a593Smuzhiyun * @speed: new link speed
1863*4882a593Smuzhiyun * @autoneg_wait: true when waiting for completion is needed
1864*4882a593Smuzhiyun *
1865*4882a593Smuzhiyun * Setup internal/external PHY link speed based on link speed, then set
1866*4882a593Smuzhiyun * external PHY auto advertised link speed.
1867*4882a593Smuzhiyun *
1868*4882a593Smuzhiyun * Returns error status for any failure
1869*4882a593Smuzhiyun **/
ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)1870*4882a593Smuzhiyun static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
1871*4882a593Smuzhiyun ixgbe_link_speed speed,
1872*4882a593Smuzhiyun bool autoneg_wait)
1873*4882a593Smuzhiyun {
1874*4882a593Smuzhiyun s32 status;
1875*4882a593Smuzhiyun ixgbe_link_speed force_speed;
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun /* Setup internal/external PHY link speed to iXFI (10G), unless
1878*4882a593Smuzhiyun * only 1G is auto advertised then setup KX link.
1879*4882a593Smuzhiyun */
1880*4882a593Smuzhiyun if (speed & IXGBE_LINK_SPEED_10GB_FULL)
1881*4882a593Smuzhiyun force_speed = IXGBE_LINK_SPEED_10GB_FULL;
1882*4882a593Smuzhiyun else
1883*4882a593Smuzhiyun force_speed = IXGBE_LINK_SPEED_1GB_FULL;
1884*4882a593Smuzhiyun
1885*4882a593Smuzhiyun /* If X552 and internal link mode is XFI, then setup XFI internal link.
1886*4882a593Smuzhiyun */
1887*4882a593Smuzhiyun if (hw->mac.type == ixgbe_mac_X550EM_x &&
1888*4882a593Smuzhiyun !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
1889*4882a593Smuzhiyun status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun if (status)
1892*4882a593Smuzhiyun return status;
1893*4882a593Smuzhiyun }
1894*4882a593Smuzhiyun
1895*4882a593Smuzhiyun return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun /** ixgbe_check_link_t_X550em - Determine link and speed status
1899*4882a593Smuzhiyun * @hw: pointer to hardware structure
1900*4882a593Smuzhiyun * @speed: pointer to link speed
1901*4882a593Smuzhiyun * @link_up: true when link is up
1902*4882a593Smuzhiyun * @link_up_wait_to_complete: bool used to wait for link up or not
1903*4882a593Smuzhiyun *
1904*4882a593Smuzhiyun * Check that both the MAC and X557 external PHY have link.
1905*4882a593Smuzhiyun **/
ixgbe_check_link_t_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)1906*4882a593Smuzhiyun static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
1907*4882a593Smuzhiyun ixgbe_link_speed *speed,
1908*4882a593Smuzhiyun bool *link_up,
1909*4882a593Smuzhiyun bool link_up_wait_to_complete)
1910*4882a593Smuzhiyun {
1911*4882a593Smuzhiyun u32 status;
1912*4882a593Smuzhiyun u16 i, autoneg_status;
1913*4882a593Smuzhiyun
1914*4882a593Smuzhiyun if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
1915*4882a593Smuzhiyun return IXGBE_ERR_CONFIG;
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun status = ixgbe_check_mac_link_generic(hw, speed, link_up,
1918*4882a593Smuzhiyun link_up_wait_to_complete);
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun /* If check link fails or MAC link is not up, then return */
1921*4882a593Smuzhiyun if (status || !(*link_up))
1922*4882a593Smuzhiyun return status;
1923*4882a593Smuzhiyun
1924*4882a593Smuzhiyun /* MAC link is up, so check external PHY link.
1925*4882a593Smuzhiyun * Link status is latching low, and can only be used to detect link
1926*4882a593Smuzhiyun * drop, and not the current status of the link without performing
1927*4882a593Smuzhiyun * back-to-back reads.
1928*4882a593Smuzhiyun */
1929*4882a593Smuzhiyun for (i = 0; i < 2; i++) {
1930*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
1931*4882a593Smuzhiyun &autoneg_status);
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun if (status)
1934*4882a593Smuzhiyun return status;
1935*4882a593Smuzhiyun }
1936*4882a593Smuzhiyun
1937*4882a593Smuzhiyun /* If external PHY link is not up, then indicate link not up */
1938*4882a593Smuzhiyun if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
1939*4882a593Smuzhiyun *link_up = false;
1940*4882a593Smuzhiyun
1941*4882a593Smuzhiyun return 0;
1942*4882a593Smuzhiyun }
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun /**
1945*4882a593Smuzhiyun * ixgbe_setup_sgmii - Set up link for sgmii
1946*4882a593Smuzhiyun * @hw: pointer to hardware structure
1947*4882a593Smuzhiyun * @speed: unused
1948*4882a593Smuzhiyun * @autoneg_wait_to_complete: unused
1949*4882a593Smuzhiyun */
1950*4882a593Smuzhiyun static s32
ixgbe_setup_sgmii(struct ixgbe_hw * hw,__always_unused ixgbe_link_speed speed,__always_unused bool autoneg_wait_to_complete)1951*4882a593Smuzhiyun ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
1952*4882a593Smuzhiyun __always_unused bool autoneg_wait_to_complete)
1953*4882a593Smuzhiyun {
1954*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
1955*4882a593Smuzhiyun u32 lval, sval, flx_val;
1956*4882a593Smuzhiyun s32 rc;
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun rc = mac->ops.read_iosf_sb_reg(hw,
1959*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1960*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
1961*4882a593Smuzhiyun if (rc)
1962*4882a593Smuzhiyun return rc;
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
1965*4882a593Smuzhiyun lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
1966*4882a593Smuzhiyun lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
1967*4882a593Smuzhiyun lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
1968*4882a593Smuzhiyun lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
1969*4882a593Smuzhiyun rc = mac->ops.write_iosf_sb_reg(hw,
1970*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
1971*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
1972*4882a593Smuzhiyun if (rc)
1973*4882a593Smuzhiyun return rc;
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyun rc = mac->ops.read_iosf_sb_reg(hw,
1976*4882a593Smuzhiyun IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1977*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
1978*4882a593Smuzhiyun if (rc)
1979*4882a593Smuzhiyun return rc;
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
1982*4882a593Smuzhiyun sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
1983*4882a593Smuzhiyun rc = mac->ops.write_iosf_sb_reg(hw,
1984*4882a593Smuzhiyun IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
1985*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
1986*4882a593Smuzhiyun if (rc)
1987*4882a593Smuzhiyun return rc;
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun rc = mac->ops.read_iosf_sb_reg(hw,
1990*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1991*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1992*4882a593Smuzhiyun if (rc)
1993*4882a593Smuzhiyun return rc;
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun rc = mac->ops.read_iosf_sb_reg(hw,
1996*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
1997*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
1998*4882a593Smuzhiyun if (rc)
1999*4882a593Smuzhiyun return rc;
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2002*4882a593Smuzhiyun flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
2003*4882a593Smuzhiyun flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2004*4882a593Smuzhiyun flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2005*4882a593Smuzhiyun flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2006*4882a593Smuzhiyun
2007*4882a593Smuzhiyun rc = mac->ops.write_iosf_sb_reg(hw,
2008*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2009*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
2010*4882a593Smuzhiyun if (rc)
2011*4882a593Smuzhiyun return rc;
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyun rc = ixgbe_restart_an_internal_phy_x550em(hw);
2014*4882a593Smuzhiyun return rc;
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun /**
2018*4882a593Smuzhiyun * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
2019*4882a593Smuzhiyun * @hw: pointer to hardware structure
2020*4882a593Smuzhiyun * @speed: the link speed to force
2021*4882a593Smuzhiyun * @autoneg_wait: true when waiting for completion is needed
2022*4882a593Smuzhiyun */
ixgbe_setup_sgmii_fw(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)2023*4882a593Smuzhiyun static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
2024*4882a593Smuzhiyun bool autoneg_wait)
2025*4882a593Smuzhiyun {
2026*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
2027*4882a593Smuzhiyun u32 lval, sval, flx_val;
2028*4882a593Smuzhiyun s32 rc;
2029*4882a593Smuzhiyun
2030*4882a593Smuzhiyun rc = mac->ops.read_iosf_sb_reg(hw,
2031*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2032*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
2033*4882a593Smuzhiyun if (rc)
2034*4882a593Smuzhiyun return rc;
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2037*4882a593Smuzhiyun lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
2038*4882a593Smuzhiyun lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
2039*4882a593Smuzhiyun lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
2040*4882a593Smuzhiyun lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
2041*4882a593Smuzhiyun rc = mac->ops.write_iosf_sb_reg(hw,
2042*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2043*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
2044*4882a593Smuzhiyun if (rc)
2045*4882a593Smuzhiyun return rc;
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun rc = mac->ops.read_iosf_sb_reg(hw,
2048*4882a593Smuzhiyun IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
2049*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
2050*4882a593Smuzhiyun if (rc)
2051*4882a593Smuzhiyun return rc;
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
2054*4882a593Smuzhiyun sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
2055*4882a593Smuzhiyun rc = mac->ops.write_iosf_sb_reg(hw,
2056*4882a593Smuzhiyun IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
2057*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
2058*4882a593Smuzhiyun if (rc)
2059*4882a593Smuzhiyun return rc;
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun rc = mac->ops.write_iosf_sb_reg(hw,
2062*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2063*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
2064*4882a593Smuzhiyun if (rc)
2065*4882a593Smuzhiyun return rc;
2066*4882a593Smuzhiyun
2067*4882a593Smuzhiyun rc = mac->ops.read_iosf_sb_reg(hw,
2068*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2069*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
2070*4882a593Smuzhiyun if (rc)
2071*4882a593Smuzhiyun return rc;
2072*4882a593Smuzhiyun
2073*4882a593Smuzhiyun flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2074*4882a593Smuzhiyun flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2075*4882a593Smuzhiyun flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2076*4882a593Smuzhiyun flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2077*4882a593Smuzhiyun flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyun rc = mac->ops.write_iosf_sb_reg(hw,
2080*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2081*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
2082*4882a593Smuzhiyun if (rc)
2083*4882a593Smuzhiyun return rc;
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun ixgbe_restart_an_internal_phy_x550em(hw);
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyun return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
2088*4882a593Smuzhiyun }
2089*4882a593Smuzhiyun
2090*4882a593Smuzhiyun /**
2091*4882a593Smuzhiyun * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
2092*4882a593Smuzhiyun * @hw: pointer to hardware structure
2093*4882a593Smuzhiyun *
2094*4882a593Smuzhiyun * Enable flow control according to IEEE clause 37.
2095*4882a593Smuzhiyun */
ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw * hw)2096*4882a593Smuzhiyun static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
2097*4882a593Smuzhiyun {
2098*4882a593Smuzhiyun s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
2099*4882a593Smuzhiyun u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
2100*4882a593Smuzhiyun ixgbe_link_speed speed;
2101*4882a593Smuzhiyun bool link_up;
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun /* AN should have completed when the cable was plugged in.
2104*4882a593Smuzhiyun * Look for reasons to bail out. Bail out if:
2105*4882a593Smuzhiyun * - FC autoneg is disabled, or if
2106*4882a593Smuzhiyun * - link is not up.
2107*4882a593Smuzhiyun */
2108*4882a593Smuzhiyun if (hw->fc.disable_fc_autoneg)
2109*4882a593Smuzhiyun goto out;
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun hw->mac.ops.check_link(hw, &speed, &link_up, false);
2112*4882a593Smuzhiyun if (!link_up)
2113*4882a593Smuzhiyun goto out;
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun /* Check if auto-negotiation has completed */
2116*4882a593Smuzhiyun status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
2117*4882a593Smuzhiyun if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
2118*4882a593Smuzhiyun status = IXGBE_ERR_FC_NOT_NEGOTIATED;
2119*4882a593Smuzhiyun goto out;
2120*4882a593Smuzhiyun }
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun /* Negotiate the flow control */
2123*4882a593Smuzhiyun status = ixgbe_negotiate_fc(hw, info[0], info[0],
2124*4882a593Smuzhiyun FW_PHY_ACT_GET_LINK_INFO_FC_RX,
2125*4882a593Smuzhiyun FW_PHY_ACT_GET_LINK_INFO_FC_TX,
2126*4882a593Smuzhiyun FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
2127*4882a593Smuzhiyun FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun out:
2130*4882a593Smuzhiyun if (!status) {
2131*4882a593Smuzhiyun hw->fc.fc_was_autonegged = true;
2132*4882a593Smuzhiyun } else {
2133*4882a593Smuzhiyun hw->fc.fc_was_autonegged = false;
2134*4882a593Smuzhiyun hw->fc.current_mode = hw->fc.requested_mode;
2135*4882a593Smuzhiyun }
2136*4882a593Smuzhiyun }
2137*4882a593Smuzhiyun
2138*4882a593Smuzhiyun /** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers
2139*4882a593Smuzhiyun * @hw: pointer to hardware structure
2140*4882a593Smuzhiyun **/
ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw * hw)2141*4882a593Smuzhiyun static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw)
2142*4882a593Smuzhiyun {
2143*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun switch (mac->ops.get_media_type(hw)) {
2146*4882a593Smuzhiyun case ixgbe_media_type_fiber:
2147*4882a593Smuzhiyun mac->ops.setup_fc = NULL;
2148*4882a593Smuzhiyun mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
2149*4882a593Smuzhiyun break;
2150*4882a593Smuzhiyun case ixgbe_media_type_copper:
2151*4882a593Smuzhiyun if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T &&
2152*4882a593Smuzhiyun hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) {
2153*4882a593Smuzhiyun mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2154*4882a593Smuzhiyun break;
2155*4882a593Smuzhiyun }
2156*4882a593Smuzhiyun mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
2157*4882a593Smuzhiyun mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
2158*4882a593Smuzhiyun mac->ops.setup_link = ixgbe_setup_sgmii_fw;
2159*4882a593Smuzhiyun mac->ops.check_link = ixgbe_check_mac_link_generic;
2160*4882a593Smuzhiyun break;
2161*4882a593Smuzhiyun case ixgbe_media_type_backplane:
2162*4882a593Smuzhiyun mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
2163*4882a593Smuzhiyun mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
2164*4882a593Smuzhiyun break;
2165*4882a593Smuzhiyun default:
2166*4882a593Smuzhiyun break;
2167*4882a593Smuzhiyun }
2168*4882a593Smuzhiyun }
2169*4882a593Smuzhiyun
2170*4882a593Smuzhiyun /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
2171*4882a593Smuzhiyun * @hw: pointer to hardware structure
2172*4882a593Smuzhiyun **/
ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw * hw)2173*4882a593Smuzhiyun static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
2174*4882a593Smuzhiyun {
2175*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
2176*4882a593Smuzhiyun
2177*4882a593Smuzhiyun mac->ops.setup_fc = ixgbe_setup_fc_x550em;
2178*4882a593Smuzhiyun
2179*4882a593Smuzhiyun switch (mac->ops.get_media_type(hw)) {
2180*4882a593Smuzhiyun case ixgbe_media_type_fiber:
2181*4882a593Smuzhiyun /* CS4227 does not support autoneg, so disable the laser control
2182*4882a593Smuzhiyun * functions for SFP+ fiber
2183*4882a593Smuzhiyun */
2184*4882a593Smuzhiyun mac->ops.disable_tx_laser = NULL;
2185*4882a593Smuzhiyun mac->ops.enable_tx_laser = NULL;
2186*4882a593Smuzhiyun mac->ops.flap_tx_laser = NULL;
2187*4882a593Smuzhiyun mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
2188*4882a593Smuzhiyun switch (hw->device_id) {
2189*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_SFP_N:
2190*4882a593Smuzhiyun mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n;
2191*4882a593Smuzhiyun break;
2192*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_SFP:
2193*4882a593Smuzhiyun mac->ops.setup_mac_link =
2194*4882a593Smuzhiyun ixgbe_setup_mac_link_sfp_x550a;
2195*4882a593Smuzhiyun break;
2196*4882a593Smuzhiyun default:
2197*4882a593Smuzhiyun mac->ops.setup_mac_link =
2198*4882a593Smuzhiyun ixgbe_setup_mac_link_sfp_x550em;
2199*4882a593Smuzhiyun break;
2200*4882a593Smuzhiyun }
2201*4882a593Smuzhiyun mac->ops.set_rate_select_speed =
2202*4882a593Smuzhiyun ixgbe_set_soft_rate_select_speed;
2203*4882a593Smuzhiyun break;
2204*4882a593Smuzhiyun case ixgbe_media_type_copper:
2205*4882a593Smuzhiyun if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T)
2206*4882a593Smuzhiyun break;
2207*4882a593Smuzhiyun mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
2208*4882a593Smuzhiyun mac->ops.setup_fc = ixgbe_setup_fc_generic;
2209*4882a593Smuzhiyun mac->ops.check_link = ixgbe_check_link_t_X550em;
2210*4882a593Smuzhiyun break;
2211*4882a593Smuzhiyun case ixgbe_media_type_backplane:
2212*4882a593Smuzhiyun if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
2213*4882a593Smuzhiyun hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
2214*4882a593Smuzhiyun mac->ops.setup_link = ixgbe_setup_sgmii;
2215*4882a593Smuzhiyun break;
2216*4882a593Smuzhiyun default:
2217*4882a593Smuzhiyun break;
2218*4882a593Smuzhiyun }
2219*4882a593Smuzhiyun
2220*4882a593Smuzhiyun /* Additional modification for X550em_a devices */
2221*4882a593Smuzhiyun if (hw->mac.type == ixgbe_mac_x550em_a)
2222*4882a593Smuzhiyun ixgbe_init_mac_link_ops_X550em_a(hw);
2223*4882a593Smuzhiyun }
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun /** ixgbe_setup_sfp_modules_X550em - Setup SFP module
2226*4882a593Smuzhiyun * @hw: pointer to hardware structure
2227*4882a593Smuzhiyun */
ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw * hw)2228*4882a593Smuzhiyun static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
2229*4882a593Smuzhiyun {
2230*4882a593Smuzhiyun s32 status;
2231*4882a593Smuzhiyun bool linear;
2232*4882a593Smuzhiyun
2233*4882a593Smuzhiyun /* Check if SFP module is supported */
2234*4882a593Smuzhiyun status = ixgbe_supported_sfp_modules_X550em(hw, &linear);
2235*4882a593Smuzhiyun if (status)
2236*4882a593Smuzhiyun return status;
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun ixgbe_init_mac_link_ops_X550em(hw);
2239*4882a593Smuzhiyun hw->phy.ops.reset = NULL;
2240*4882a593Smuzhiyun
2241*4882a593Smuzhiyun return 0;
2242*4882a593Smuzhiyun }
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun /** ixgbe_get_link_capabilities_x550em - Determines link capabilities
2245*4882a593Smuzhiyun * @hw: pointer to hardware structure
2246*4882a593Smuzhiyun * @speed: pointer to link speed
2247*4882a593Smuzhiyun * @autoneg: true when autoneg or autotry is enabled
2248*4882a593Smuzhiyun **/
ixgbe_get_link_capabilities_X550em(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)2249*4882a593Smuzhiyun static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
2250*4882a593Smuzhiyun ixgbe_link_speed *speed,
2251*4882a593Smuzhiyun bool *autoneg)
2252*4882a593Smuzhiyun {
2253*4882a593Smuzhiyun if (hw->phy.type == ixgbe_phy_fw) {
2254*4882a593Smuzhiyun *autoneg = true;
2255*4882a593Smuzhiyun *speed = hw->phy.speeds_supported;
2256*4882a593Smuzhiyun return 0;
2257*4882a593Smuzhiyun }
2258*4882a593Smuzhiyun
2259*4882a593Smuzhiyun /* SFP */
2260*4882a593Smuzhiyun if (hw->phy.media_type == ixgbe_media_type_fiber) {
2261*4882a593Smuzhiyun /* CS4227 SFP must not enable auto-negotiation */
2262*4882a593Smuzhiyun *autoneg = false;
2263*4882a593Smuzhiyun
2264*4882a593Smuzhiyun if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
2265*4882a593Smuzhiyun hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
2266*4882a593Smuzhiyun hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
2267*4882a593Smuzhiyun hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
2268*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_1GB_FULL;
2269*4882a593Smuzhiyun return 0;
2270*4882a593Smuzhiyun }
2271*4882a593Smuzhiyun
2272*4882a593Smuzhiyun /* Link capabilities are based on SFP */
2273*4882a593Smuzhiyun if (hw->phy.multispeed_fiber)
2274*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_10GB_FULL |
2275*4882a593Smuzhiyun IXGBE_LINK_SPEED_1GB_FULL;
2276*4882a593Smuzhiyun else
2277*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_10GB_FULL;
2278*4882a593Smuzhiyun } else {
2279*4882a593Smuzhiyun switch (hw->phy.type) {
2280*4882a593Smuzhiyun case ixgbe_phy_x550em_kx4:
2281*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_1GB_FULL |
2282*4882a593Smuzhiyun IXGBE_LINK_SPEED_2_5GB_FULL |
2283*4882a593Smuzhiyun IXGBE_LINK_SPEED_10GB_FULL;
2284*4882a593Smuzhiyun break;
2285*4882a593Smuzhiyun case ixgbe_phy_x550em_xfi:
2286*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_1GB_FULL |
2287*4882a593Smuzhiyun IXGBE_LINK_SPEED_10GB_FULL;
2288*4882a593Smuzhiyun break;
2289*4882a593Smuzhiyun case ixgbe_phy_ext_1g_t:
2290*4882a593Smuzhiyun case ixgbe_phy_sgmii:
2291*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_1GB_FULL;
2292*4882a593Smuzhiyun break;
2293*4882a593Smuzhiyun case ixgbe_phy_x550em_kr:
2294*4882a593Smuzhiyun if (hw->mac.type == ixgbe_mac_x550em_a) {
2295*4882a593Smuzhiyun /* check different backplane modes */
2296*4882a593Smuzhiyun if (hw->phy.nw_mng_if_sel &
2297*4882a593Smuzhiyun IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
2298*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
2299*4882a593Smuzhiyun break;
2300*4882a593Smuzhiyun } else if (hw->device_id ==
2301*4882a593Smuzhiyun IXGBE_DEV_ID_X550EM_A_KR_L) {
2302*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_1GB_FULL;
2303*4882a593Smuzhiyun break;
2304*4882a593Smuzhiyun }
2305*4882a593Smuzhiyun }
2306*4882a593Smuzhiyun fallthrough;
2307*4882a593Smuzhiyun default:
2308*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_10GB_FULL |
2309*4882a593Smuzhiyun IXGBE_LINK_SPEED_1GB_FULL;
2310*4882a593Smuzhiyun break;
2311*4882a593Smuzhiyun }
2312*4882a593Smuzhiyun *autoneg = true;
2313*4882a593Smuzhiyun }
2314*4882a593Smuzhiyun return 0;
2315*4882a593Smuzhiyun }
2316*4882a593Smuzhiyun
2317*4882a593Smuzhiyun /**
2318*4882a593Smuzhiyun * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause
2319*4882a593Smuzhiyun * @hw: pointer to hardware structure
2320*4882a593Smuzhiyun * @lsc: pointer to boolean flag which indicates whether external Base T
2321*4882a593Smuzhiyun * PHY interrupt is lsc
2322*4882a593Smuzhiyun *
2323*4882a593Smuzhiyun * Determime if external Base T PHY interrupt cause is high temperature
2324*4882a593Smuzhiyun * failure alarm or link status change.
2325*4882a593Smuzhiyun *
2326*4882a593Smuzhiyun * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2327*4882a593Smuzhiyun * failure alarm, else return PHY access status.
2328*4882a593Smuzhiyun **/
ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw * hw,bool * lsc)2329*4882a593Smuzhiyun static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc)
2330*4882a593Smuzhiyun {
2331*4882a593Smuzhiyun u32 status;
2332*4882a593Smuzhiyun u16 reg;
2333*4882a593Smuzhiyun
2334*4882a593Smuzhiyun *lsc = false;
2335*4882a593Smuzhiyun
2336*4882a593Smuzhiyun /* Vendor alarm triggered */
2337*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2338*4882a593Smuzhiyun MDIO_MMD_VEND1,
2339*4882a593Smuzhiyun ®);
2340*4882a593Smuzhiyun
2341*4882a593Smuzhiyun if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN))
2342*4882a593Smuzhiyun return status;
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */
2345*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG,
2346*4882a593Smuzhiyun MDIO_MMD_VEND1,
2347*4882a593Smuzhiyun ®);
2348*4882a593Smuzhiyun
2349*4882a593Smuzhiyun if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2350*4882a593Smuzhiyun IXGBE_MDIO_GLOBAL_ALARM_1_INT)))
2351*4882a593Smuzhiyun return status;
2352*4882a593Smuzhiyun
2353*4882a593Smuzhiyun /* Global alarm triggered */
2354*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1,
2355*4882a593Smuzhiyun MDIO_MMD_VEND1,
2356*4882a593Smuzhiyun ®);
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun if (status)
2359*4882a593Smuzhiyun return status;
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun /* If high temperature failure, then return over temp error and exit */
2362*4882a593Smuzhiyun if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) {
2363*4882a593Smuzhiyun /* power down the PHY in case the PHY FW didn't already */
2364*4882a593Smuzhiyun ixgbe_set_copper_phy_power(hw, false);
2365*4882a593Smuzhiyun return IXGBE_ERR_OVERTEMP;
2366*4882a593Smuzhiyun }
2367*4882a593Smuzhiyun if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) {
2368*4882a593Smuzhiyun /* device fault alarm triggered */
2369*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG,
2370*4882a593Smuzhiyun MDIO_MMD_VEND1,
2371*4882a593Smuzhiyun ®);
2372*4882a593Smuzhiyun if (status)
2373*4882a593Smuzhiyun return status;
2374*4882a593Smuzhiyun
2375*4882a593Smuzhiyun /* if device fault was due to high temp alarm handle and exit */
2376*4882a593Smuzhiyun if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) {
2377*4882a593Smuzhiyun /* power down the PHY in case the PHY FW didn't */
2378*4882a593Smuzhiyun ixgbe_set_copper_phy_power(hw, false);
2379*4882a593Smuzhiyun return IXGBE_ERR_OVERTEMP;
2380*4882a593Smuzhiyun }
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun /* Vendor alarm 2 triggered */
2384*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG,
2385*4882a593Smuzhiyun MDIO_MMD_AN, ®);
2386*4882a593Smuzhiyun
2387*4882a593Smuzhiyun if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT))
2388*4882a593Smuzhiyun return status;
2389*4882a593Smuzhiyun
2390*4882a593Smuzhiyun /* link connect/disconnect event occurred */
2391*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2,
2392*4882a593Smuzhiyun MDIO_MMD_AN, ®);
2393*4882a593Smuzhiyun
2394*4882a593Smuzhiyun if (status)
2395*4882a593Smuzhiyun return status;
2396*4882a593Smuzhiyun
2397*4882a593Smuzhiyun /* Indicate LSC */
2398*4882a593Smuzhiyun if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC)
2399*4882a593Smuzhiyun *lsc = true;
2400*4882a593Smuzhiyun
2401*4882a593Smuzhiyun return 0;
2402*4882a593Smuzhiyun }
2403*4882a593Smuzhiyun
2404*4882a593Smuzhiyun /**
2405*4882a593Smuzhiyun * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts
2406*4882a593Smuzhiyun * @hw: pointer to hardware structure
2407*4882a593Smuzhiyun *
2408*4882a593Smuzhiyun * Enable link status change and temperature failure alarm for the external
2409*4882a593Smuzhiyun * Base T PHY
2410*4882a593Smuzhiyun *
2411*4882a593Smuzhiyun * Returns PHY access status
2412*4882a593Smuzhiyun **/
ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw * hw)2413*4882a593Smuzhiyun static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2414*4882a593Smuzhiyun {
2415*4882a593Smuzhiyun u32 status;
2416*4882a593Smuzhiyun u16 reg;
2417*4882a593Smuzhiyun bool lsc;
2418*4882a593Smuzhiyun
2419*4882a593Smuzhiyun /* Clear interrupt flags */
2420*4882a593Smuzhiyun status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2421*4882a593Smuzhiyun
2422*4882a593Smuzhiyun /* Enable link status change alarm */
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun /* Enable the LASI interrupts on X552 devices to receive notifications
2425*4882a593Smuzhiyun * of the link configurations of the external PHY and correspondingly
2426*4882a593Smuzhiyun * support the configuration of the internal iXFI link, since iXFI does
2427*4882a593Smuzhiyun * not support auto-negotiation. This is not required for X553 devices
2428*4882a593Smuzhiyun * having KR support, which performs auto-negotiations and which is used
2429*4882a593Smuzhiyun * as the internal link to the external PHY. Hence adding a check here
2430*4882a593Smuzhiyun * to avoid enabling LASI interrupts for X553 devices.
2431*4882a593Smuzhiyun */
2432*4882a593Smuzhiyun if (hw->mac.type != ixgbe_mac_x550em_a) {
2433*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw,
2434*4882a593Smuzhiyun IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2435*4882a593Smuzhiyun MDIO_MMD_AN, ®);
2436*4882a593Smuzhiyun if (status)
2437*4882a593Smuzhiyun return status;
2438*4882a593Smuzhiyun
2439*4882a593Smuzhiyun reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
2440*4882a593Smuzhiyun
2441*4882a593Smuzhiyun status = hw->phy.ops.write_reg(hw,
2442*4882a593Smuzhiyun IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
2443*4882a593Smuzhiyun MDIO_MMD_AN, reg);
2444*4882a593Smuzhiyun if (status)
2445*4882a593Smuzhiyun return status;
2446*4882a593Smuzhiyun }
2447*4882a593Smuzhiyun
2448*4882a593Smuzhiyun /* Enable high temperature failure and global fault alarms */
2449*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2450*4882a593Smuzhiyun MDIO_MMD_VEND1,
2451*4882a593Smuzhiyun ®);
2452*4882a593Smuzhiyun if (status)
2453*4882a593Smuzhiyun return status;
2454*4882a593Smuzhiyun
2455*4882a593Smuzhiyun reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN |
2456*4882a593Smuzhiyun IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN);
2457*4882a593Smuzhiyun
2458*4882a593Smuzhiyun status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
2459*4882a593Smuzhiyun MDIO_MMD_VEND1,
2460*4882a593Smuzhiyun reg);
2461*4882a593Smuzhiyun if (status)
2462*4882a593Smuzhiyun return status;
2463*4882a593Smuzhiyun
2464*4882a593Smuzhiyun /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */
2465*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2466*4882a593Smuzhiyun MDIO_MMD_VEND1,
2467*4882a593Smuzhiyun ®);
2468*4882a593Smuzhiyun if (status)
2469*4882a593Smuzhiyun return status;
2470*4882a593Smuzhiyun
2471*4882a593Smuzhiyun reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN |
2472*4882a593Smuzhiyun IXGBE_MDIO_GLOBAL_ALARM_1_INT);
2473*4882a593Smuzhiyun
2474*4882a593Smuzhiyun status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK,
2475*4882a593Smuzhiyun MDIO_MMD_VEND1,
2476*4882a593Smuzhiyun reg);
2477*4882a593Smuzhiyun if (status)
2478*4882a593Smuzhiyun return status;
2479*4882a593Smuzhiyun
2480*4882a593Smuzhiyun /* Enable chip-wide vendor alarm */
2481*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2482*4882a593Smuzhiyun MDIO_MMD_VEND1,
2483*4882a593Smuzhiyun ®);
2484*4882a593Smuzhiyun if (status)
2485*4882a593Smuzhiyun return status;
2486*4882a593Smuzhiyun
2487*4882a593Smuzhiyun reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN;
2488*4882a593Smuzhiyun
2489*4882a593Smuzhiyun status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK,
2490*4882a593Smuzhiyun MDIO_MMD_VEND1,
2491*4882a593Smuzhiyun reg);
2492*4882a593Smuzhiyun
2493*4882a593Smuzhiyun return status;
2494*4882a593Smuzhiyun }
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun /**
2497*4882a593Smuzhiyun * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt
2498*4882a593Smuzhiyun * @hw: pointer to hardware structure
2499*4882a593Smuzhiyun *
2500*4882a593Smuzhiyun * Handle external Base T PHY interrupt. If high temperature
2501*4882a593Smuzhiyun * failure alarm then return error, else if link status change
2502*4882a593Smuzhiyun * then setup internal/external PHY link
2503*4882a593Smuzhiyun *
2504*4882a593Smuzhiyun * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature
2505*4882a593Smuzhiyun * failure alarm, else return PHY access status.
2506*4882a593Smuzhiyun **/
ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw * hw)2507*4882a593Smuzhiyun static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw)
2508*4882a593Smuzhiyun {
2509*4882a593Smuzhiyun struct ixgbe_phy_info *phy = &hw->phy;
2510*4882a593Smuzhiyun bool lsc;
2511*4882a593Smuzhiyun u32 status;
2512*4882a593Smuzhiyun
2513*4882a593Smuzhiyun status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
2514*4882a593Smuzhiyun if (status)
2515*4882a593Smuzhiyun return status;
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun if (lsc && phy->ops.setup_internal_link)
2518*4882a593Smuzhiyun return phy->ops.setup_internal_link(hw);
2519*4882a593Smuzhiyun
2520*4882a593Smuzhiyun return 0;
2521*4882a593Smuzhiyun }
2522*4882a593Smuzhiyun
2523*4882a593Smuzhiyun /**
2524*4882a593Smuzhiyun * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed.
2525*4882a593Smuzhiyun * @hw: pointer to hardware structure
2526*4882a593Smuzhiyun * @speed: link speed
2527*4882a593Smuzhiyun *
2528*4882a593Smuzhiyun * Configures the integrated KR PHY.
2529*4882a593Smuzhiyun **/
ixgbe_setup_kr_speed_x550em(struct ixgbe_hw * hw,ixgbe_link_speed speed)2530*4882a593Smuzhiyun static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
2531*4882a593Smuzhiyun ixgbe_link_speed speed)
2532*4882a593Smuzhiyun {
2533*4882a593Smuzhiyun s32 status;
2534*4882a593Smuzhiyun u32 reg_val;
2535*4882a593Smuzhiyun
2536*4882a593Smuzhiyun status = hw->mac.ops.read_iosf_sb_reg(hw,
2537*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2538*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2539*4882a593Smuzhiyun if (status)
2540*4882a593Smuzhiyun return status;
2541*4882a593Smuzhiyun
2542*4882a593Smuzhiyun reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
2543*4882a593Smuzhiyun reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
2544*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
2545*4882a593Smuzhiyun
2546*4882a593Smuzhiyun /* Advertise 10G support. */
2547*4882a593Smuzhiyun if (speed & IXGBE_LINK_SPEED_10GB_FULL)
2548*4882a593Smuzhiyun reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR;
2549*4882a593Smuzhiyun
2550*4882a593Smuzhiyun /* Advertise 1G support. */
2551*4882a593Smuzhiyun if (speed & IXGBE_LINK_SPEED_1GB_FULL)
2552*4882a593Smuzhiyun reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun status = hw->mac.ops.write_iosf_sb_reg(hw,
2555*4882a593Smuzhiyun IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
2556*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2557*4882a593Smuzhiyun
2558*4882a593Smuzhiyun if (hw->mac.type == ixgbe_mac_x550em_a) {
2559*4882a593Smuzhiyun /* Set lane mode to KR auto negotiation */
2560*4882a593Smuzhiyun status = hw->mac.ops.read_iosf_sb_reg(hw,
2561*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2562*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
2563*4882a593Smuzhiyun
2564*4882a593Smuzhiyun if (status)
2565*4882a593Smuzhiyun return status;
2566*4882a593Smuzhiyun
2567*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
2568*4882a593Smuzhiyun reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
2569*4882a593Smuzhiyun reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
2570*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
2571*4882a593Smuzhiyun reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
2572*4882a593Smuzhiyun
2573*4882a593Smuzhiyun status = hw->mac.ops.write_iosf_sb_reg(hw,
2574*4882a593Smuzhiyun IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
2575*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
2576*4882a593Smuzhiyun }
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun return ixgbe_restart_an_internal_phy_x550em(hw);
2579*4882a593Smuzhiyun }
2580*4882a593Smuzhiyun
2581*4882a593Smuzhiyun /**
2582*4882a593Smuzhiyun * ixgbe_setup_kr_x550em - Configure the KR PHY
2583*4882a593Smuzhiyun * @hw: pointer to hardware structure
2584*4882a593Smuzhiyun **/
ixgbe_setup_kr_x550em(struct ixgbe_hw * hw)2585*4882a593Smuzhiyun static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
2586*4882a593Smuzhiyun {
2587*4882a593Smuzhiyun /* leave link alone for 2.5G */
2588*4882a593Smuzhiyun if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
2589*4882a593Smuzhiyun return 0;
2590*4882a593Smuzhiyun
2591*4882a593Smuzhiyun if (ixgbe_check_reset_blocked(hw))
2592*4882a593Smuzhiyun return 0;
2593*4882a593Smuzhiyun
2594*4882a593Smuzhiyun return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
2595*4882a593Smuzhiyun }
2596*4882a593Smuzhiyun
2597*4882a593Smuzhiyun /** ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status
2598*4882a593Smuzhiyun * @hw: address of hardware structure
2599*4882a593Smuzhiyun * @link_up: address of boolean to indicate link status
2600*4882a593Smuzhiyun *
2601*4882a593Smuzhiyun * Returns error code if unable to get link status.
2602*4882a593Smuzhiyun **/
ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw * hw,bool * link_up)2603*4882a593Smuzhiyun static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up)
2604*4882a593Smuzhiyun {
2605*4882a593Smuzhiyun u32 ret;
2606*4882a593Smuzhiyun u16 autoneg_status;
2607*4882a593Smuzhiyun
2608*4882a593Smuzhiyun *link_up = false;
2609*4882a593Smuzhiyun
2610*4882a593Smuzhiyun /* read this twice back to back to indicate current status */
2611*4882a593Smuzhiyun ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
2612*4882a593Smuzhiyun &autoneg_status);
2613*4882a593Smuzhiyun if (ret)
2614*4882a593Smuzhiyun return ret;
2615*4882a593Smuzhiyun
2616*4882a593Smuzhiyun ret = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN,
2617*4882a593Smuzhiyun &autoneg_status);
2618*4882a593Smuzhiyun if (ret)
2619*4882a593Smuzhiyun return ret;
2620*4882a593Smuzhiyun
2621*4882a593Smuzhiyun *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS);
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun return 0;
2624*4882a593Smuzhiyun }
2625*4882a593Smuzhiyun
2626*4882a593Smuzhiyun /** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link
2627*4882a593Smuzhiyun * @hw: point to hardware structure
2628*4882a593Smuzhiyun *
2629*4882a593Smuzhiyun * Configures the link between the integrated KR PHY and the external X557 PHY
2630*4882a593Smuzhiyun * The driver will call this function when it gets a link status change
2631*4882a593Smuzhiyun * interrupt from the X557 PHY. This function configures the link speed
2632*4882a593Smuzhiyun * between the PHYs to match the link speed of the BASE-T link.
2633*4882a593Smuzhiyun *
2634*4882a593Smuzhiyun * A return of a non-zero value indicates an error, and the base driver should
2635*4882a593Smuzhiyun * not report link up.
2636*4882a593Smuzhiyun **/
ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw * hw)2637*4882a593Smuzhiyun static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
2638*4882a593Smuzhiyun {
2639*4882a593Smuzhiyun ixgbe_link_speed force_speed;
2640*4882a593Smuzhiyun bool link_up;
2641*4882a593Smuzhiyun u32 status;
2642*4882a593Smuzhiyun u16 speed;
2643*4882a593Smuzhiyun
2644*4882a593Smuzhiyun if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
2645*4882a593Smuzhiyun return IXGBE_ERR_CONFIG;
2646*4882a593Smuzhiyun
2647*4882a593Smuzhiyun if (!(hw->mac.type == ixgbe_mac_X550EM_x &&
2648*4882a593Smuzhiyun !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) {
2649*4882a593Smuzhiyun speed = IXGBE_LINK_SPEED_10GB_FULL |
2650*4882a593Smuzhiyun IXGBE_LINK_SPEED_1GB_FULL;
2651*4882a593Smuzhiyun return ixgbe_setup_kr_speed_x550em(hw, speed);
2652*4882a593Smuzhiyun }
2653*4882a593Smuzhiyun
2654*4882a593Smuzhiyun /* If link is not up, then there is no setup necessary so return */
2655*4882a593Smuzhiyun status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2656*4882a593Smuzhiyun if (status)
2657*4882a593Smuzhiyun return status;
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun if (!link_up)
2660*4882a593Smuzhiyun return 0;
2661*4882a593Smuzhiyun
2662*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
2663*4882a593Smuzhiyun MDIO_MMD_AN,
2664*4882a593Smuzhiyun &speed);
2665*4882a593Smuzhiyun if (status)
2666*4882a593Smuzhiyun return status;
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun /* If link is not still up, then no setup is necessary so return */
2669*4882a593Smuzhiyun status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
2670*4882a593Smuzhiyun if (status)
2671*4882a593Smuzhiyun return status;
2672*4882a593Smuzhiyun
2673*4882a593Smuzhiyun if (!link_up)
2674*4882a593Smuzhiyun return 0;
2675*4882a593Smuzhiyun
2676*4882a593Smuzhiyun /* clear everything but the speed and duplex bits */
2677*4882a593Smuzhiyun speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK;
2678*4882a593Smuzhiyun
2679*4882a593Smuzhiyun switch (speed) {
2680*4882a593Smuzhiyun case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL:
2681*4882a593Smuzhiyun force_speed = IXGBE_LINK_SPEED_10GB_FULL;
2682*4882a593Smuzhiyun break;
2683*4882a593Smuzhiyun case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL:
2684*4882a593Smuzhiyun force_speed = IXGBE_LINK_SPEED_1GB_FULL;
2685*4882a593Smuzhiyun break;
2686*4882a593Smuzhiyun default:
2687*4882a593Smuzhiyun /* Internal PHY does not support anything else */
2688*4882a593Smuzhiyun return IXGBE_ERR_INVALID_LINK_SETTINGS;
2689*4882a593Smuzhiyun }
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun return ixgbe_setup_ixfi_x550em(hw, &force_speed);
2692*4882a593Smuzhiyun }
2693*4882a593Smuzhiyun
2694*4882a593Smuzhiyun /** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI
2695*4882a593Smuzhiyun * @hw: pointer to hardware structure
2696*4882a593Smuzhiyun **/
ixgbe_reset_phy_t_X550em(struct ixgbe_hw * hw)2697*4882a593Smuzhiyun static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw)
2698*4882a593Smuzhiyun {
2699*4882a593Smuzhiyun s32 status;
2700*4882a593Smuzhiyun
2701*4882a593Smuzhiyun status = ixgbe_reset_phy_generic(hw);
2702*4882a593Smuzhiyun
2703*4882a593Smuzhiyun if (status)
2704*4882a593Smuzhiyun return status;
2705*4882a593Smuzhiyun
2706*4882a593Smuzhiyun /* Configure Link Status Alarm and Temperature Threshold interrupts */
2707*4882a593Smuzhiyun return ixgbe_enable_lasi_ext_t_x550em(hw);
2708*4882a593Smuzhiyun }
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun /**
2711*4882a593Smuzhiyun * ixgbe_led_on_t_x550em - Turns on the software controllable LEDs.
2712*4882a593Smuzhiyun * @hw: pointer to hardware structure
2713*4882a593Smuzhiyun * @led_idx: led number to turn on
2714*4882a593Smuzhiyun **/
ixgbe_led_on_t_x550em(struct ixgbe_hw * hw,u32 led_idx)2715*4882a593Smuzhiyun static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
2716*4882a593Smuzhiyun {
2717*4882a593Smuzhiyun u16 phy_data;
2718*4882a593Smuzhiyun
2719*4882a593Smuzhiyun if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
2720*4882a593Smuzhiyun return IXGBE_ERR_PARAM;
2721*4882a593Smuzhiyun
2722*4882a593Smuzhiyun /* To turn on the LED, set mode to ON. */
2723*4882a593Smuzhiyun hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
2724*4882a593Smuzhiyun MDIO_MMD_VEND1, &phy_data);
2725*4882a593Smuzhiyun phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK;
2726*4882a593Smuzhiyun hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
2727*4882a593Smuzhiyun MDIO_MMD_VEND1, phy_data);
2728*4882a593Smuzhiyun
2729*4882a593Smuzhiyun return 0;
2730*4882a593Smuzhiyun }
2731*4882a593Smuzhiyun
2732*4882a593Smuzhiyun /**
2733*4882a593Smuzhiyun * ixgbe_led_off_t_x550em - Turns off the software controllable LEDs.
2734*4882a593Smuzhiyun * @hw: pointer to hardware structure
2735*4882a593Smuzhiyun * @led_idx: led number to turn off
2736*4882a593Smuzhiyun **/
ixgbe_led_off_t_x550em(struct ixgbe_hw * hw,u32 led_idx)2737*4882a593Smuzhiyun static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
2738*4882a593Smuzhiyun {
2739*4882a593Smuzhiyun u16 phy_data;
2740*4882a593Smuzhiyun
2741*4882a593Smuzhiyun if (led_idx >= IXGBE_X557_MAX_LED_INDEX)
2742*4882a593Smuzhiyun return IXGBE_ERR_PARAM;
2743*4882a593Smuzhiyun
2744*4882a593Smuzhiyun /* To turn on the LED, set mode to ON. */
2745*4882a593Smuzhiyun hw->phy.ops.read_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
2746*4882a593Smuzhiyun MDIO_MMD_VEND1, &phy_data);
2747*4882a593Smuzhiyun phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK;
2748*4882a593Smuzhiyun hw->phy.ops.write_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
2749*4882a593Smuzhiyun MDIO_MMD_VEND1, phy_data);
2750*4882a593Smuzhiyun
2751*4882a593Smuzhiyun return 0;
2752*4882a593Smuzhiyun }
2753*4882a593Smuzhiyun
2754*4882a593Smuzhiyun /**
2755*4882a593Smuzhiyun * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
2756*4882a593Smuzhiyun * @hw: pointer to the HW structure
2757*4882a593Smuzhiyun * @maj: driver version major number
2758*4882a593Smuzhiyun * @min: driver version minor number
2759*4882a593Smuzhiyun * @build: driver version build number
2760*4882a593Smuzhiyun * @sub: driver version sub build number
2761*4882a593Smuzhiyun * @len: length of driver_ver string
2762*4882a593Smuzhiyun * @driver_ver: driver string
2763*4882a593Smuzhiyun *
2764*4882a593Smuzhiyun * Sends driver version number to firmware through the manageability
2765*4882a593Smuzhiyun * block. On success return 0
2766*4882a593Smuzhiyun * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
2767*4882a593Smuzhiyun * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
2768*4882a593Smuzhiyun **/
ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw * hw,u8 maj,u8 min,u8 build,u8 sub,u16 len,const char * driver_ver)2769*4882a593Smuzhiyun static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
2770*4882a593Smuzhiyun u8 build, u8 sub, u16 len,
2771*4882a593Smuzhiyun const char *driver_ver)
2772*4882a593Smuzhiyun {
2773*4882a593Smuzhiyun struct ixgbe_hic_drv_info2 fw_cmd;
2774*4882a593Smuzhiyun s32 ret_val;
2775*4882a593Smuzhiyun int i;
2776*4882a593Smuzhiyun
2777*4882a593Smuzhiyun if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
2778*4882a593Smuzhiyun return IXGBE_ERR_INVALID_ARGUMENT;
2779*4882a593Smuzhiyun
2780*4882a593Smuzhiyun fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
2781*4882a593Smuzhiyun fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
2782*4882a593Smuzhiyun fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
2783*4882a593Smuzhiyun fw_cmd.port_num = (u8)hw->bus.func;
2784*4882a593Smuzhiyun fw_cmd.ver_maj = maj;
2785*4882a593Smuzhiyun fw_cmd.ver_min = min;
2786*4882a593Smuzhiyun fw_cmd.ver_build = build;
2787*4882a593Smuzhiyun fw_cmd.ver_sub = sub;
2788*4882a593Smuzhiyun fw_cmd.hdr.checksum = 0;
2789*4882a593Smuzhiyun memcpy(fw_cmd.driver_string, driver_ver, len);
2790*4882a593Smuzhiyun fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
2791*4882a593Smuzhiyun (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
2792*4882a593Smuzhiyun
2793*4882a593Smuzhiyun for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
2794*4882a593Smuzhiyun ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
2795*4882a593Smuzhiyun sizeof(fw_cmd),
2796*4882a593Smuzhiyun IXGBE_HI_COMMAND_TIMEOUT,
2797*4882a593Smuzhiyun true);
2798*4882a593Smuzhiyun if (ret_val)
2799*4882a593Smuzhiyun continue;
2800*4882a593Smuzhiyun
2801*4882a593Smuzhiyun if (fw_cmd.hdr.cmd_or_resp.ret_status !=
2802*4882a593Smuzhiyun FW_CEM_RESP_STATUS_SUCCESS)
2803*4882a593Smuzhiyun return IXGBE_ERR_HOST_INTERFACE_COMMAND;
2804*4882a593Smuzhiyun return 0;
2805*4882a593Smuzhiyun }
2806*4882a593Smuzhiyun
2807*4882a593Smuzhiyun return ret_val;
2808*4882a593Smuzhiyun }
2809*4882a593Smuzhiyun
2810*4882a593Smuzhiyun /** ixgbe_get_lcd_x550em - Determine lowest common denominator
2811*4882a593Smuzhiyun * @hw: pointer to hardware structure
2812*4882a593Smuzhiyun * @lcd_speed: pointer to lowest common link speed
2813*4882a593Smuzhiyun *
2814*4882a593Smuzhiyun * Determine lowest common link speed with link partner.
2815*4882a593Smuzhiyun **/
ixgbe_get_lcd_t_x550em(struct ixgbe_hw * hw,ixgbe_link_speed * lcd_speed)2816*4882a593Smuzhiyun static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
2817*4882a593Smuzhiyun ixgbe_link_speed *lcd_speed)
2818*4882a593Smuzhiyun {
2819*4882a593Smuzhiyun u16 an_lp_status;
2820*4882a593Smuzhiyun s32 status;
2821*4882a593Smuzhiyun u16 word = hw->eeprom.ctrl_word_3;
2822*4882a593Smuzhiyun
2823*4882a593Smuzhiyun *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
2824*4882a593Smuzhiyun
2825*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
2826*4882a593Smuzhiyun MDIO_MMD_AN,
2827*4882a593Smuzhiyun &an_lp_status);
2828*4882a593Smuzhiyun if (status)
2829*4882a593Smuzhiyun return status;
2830*4882a593Smuzhiyun
2831*4882a593Smuzhiyun /* If link partner advertised 1G, return 1G */
2832*4882a593Smuzhiyun if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
2833*4882a593Smuzhiyun *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
2834*4882a593Smuzhiyun return status;
2835*4882a593Smuzhiyun }
2836*4882a593Smuzhiyun
2837*4882a593Smuzhiyun /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
2838*4882a593Smuzhiyun if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
2839*4882a593Smuzhiyun (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
2840*4882a593Smuzhiyun return status;
2841*4882a593Smuzhiyun
2842*4882a593Smuzhiyun /* Link partner not capable of lower speeds, return 10G */
2843*4882a593Smuzhiyun *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
2844*4882a593Smuzhiyun return status;
2845*4882a593Smuzhiyun }
2846*4882a593Smuzhiyun
2847*4882a593Smuzhiyun /**
2848*4882a593Smuzhiyun * ixgbe_setup_fc_x550em - Set up flow control
2849*4882a593Smuzhiyun * @hw: pointer to hardware structure
2850*4882a593Smuzhiyun */
ixgbe_setup_fc_x550em(struct ixgbe_hw * hw)2851*4882a593Smuzhiyun static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
2852*4882a593Smuzhiyun {
2853*4882a593Smuzhiyun bool pause, asm_dir;
2854*4882a593Smuzhiyun u32 reg_val;
2855*4882a593Smuzhiyun s32 rc = 0;
2856*4882a593Smuzhiyun
2857*4882a593Smuzhiyun /* Validate the requested mode */
2858*4882a593Smuzhiyun if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
2859*4882a593Smuzhiyun hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
2860*4882a593Smuzhiyun return IXGBE_ERR_INVALID_LINK_SETTINGS;
2861*4882a593Smuzhiyun }
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun /* 10gig parts do not have a word in the EEPROM to determine the
2864*4882a593Smuzhiyun * default flow control setting, so we explicitly set it to full.
2865*4882a593Smuzhiyun */
2866*4882a593Smuzhiyun if (hw->fc.requested_mode == ixgbe_fc_default)
2867*4882a593Smuzhiyun hw->fc.requested_mode = ixgbe_fc_full;
2868*4882a593Smuzhiyun
2869*4882a593Smuzhiyun /* Determine PAUSE and ASM_DIR bits. */
2870*4882a593Smuzhiyun switch (hw->fc.requested_mode) {
2871*4882a593Smuzhiyun case ixgbe_fc_none:
2872*4882a593Smuzhiyun pause = false;
2873*4882a593Smuzhiyun asm_dir = false;
2874*4882a593Smuzhiyun break;
2875*4882a593Smuzhiyun case ixgbe_fc_tx_pause:
2876*4882a593Smuzhiyun pause = false;
2877*4882a593Smuzhiyun asm_dir = true;
2878*4882a593Smuzhiyun break;
2879*4882a593Smuzhiyun case ixgbe_fc_rx_pause:
2880*4882a593Smuzhiyun /* Rx Flow control is enabled and Tx Flow control is
2881*4882a593Smuzhiyun * disabled by software override. Since there really
2882*4882a593Smuzhiyun * isn't a way to advertise that we are capable of RX
2883*4882a593Smuzhiyun * Pause ONLY, we will advertise that we support both
2884*4882a593Smuzhiyun * symmetric and asymmetric Rx PAUSE, as such we fall
2885*4882a593Smuzhiyun * through to the fc_full statement. Later, we will
2886*4882a593Smuzhiyun * disable the adapter's ability to send PAUSE frames.
2887*4882a593Smuzhiyun */
2888*4882a593Smuzhiyun fallthrough;
2889*4882a593Smuzhiyun case ixgbe_fc_full:
2890*4882a593Smuzhiyun pause = true;
2891*4882a593Smuzhiyun asm_dir = true;
2892*4882a593Smuzhiyun break;
2893*4882a593Smuzhiyun default:
2894*4882a593Smuzhiyun hw_err(hw, "Flow control param set incorrectly\n");
2895*4882a593Smuzhiyun return IXGBE_ERR_CONFIG;
2896*4882a593Smuzhiyun }
2897*4882a593Smuzhiyun
2898*4882a593Smuzhiyun switch (hw->device_id) {
2899*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_KR:
2900*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_KR:
2901*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_KR_L:
2902*4882a593Smuzhiyun rc = hw->mac.ops.read_iosf_sb_reg(hw,
2903*4882a593Smuzhiyun IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
2904*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY,
2905*4882a593Smuzhiyun ®_val);
2906*4882a593Smuzhiyun if (rc)
2907*4882a593Smuzhiyun return rc;
2908*4882a593Smuzhiyun
2909*4882a593Smuzhiyun reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
2910*4882a593Smuzhiyun IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
2911*4882a593Smuzhiyun if (pause)
2912*4882a593Smuzhiyun reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
2913*4882a593Smuzhiyun if (asm_dir)
2914*4882a593Smuzhiyun reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
2915*4882a593Smuzhiyun rc = hw->mac.ops.write_iosf_sb_reg(hw,
2916*4882a593Smuzhiyun IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
2917*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY,
2918*4882a593Smuzhiyun reg_val);
2919*4882a593Smuzhiyun
2920*4882a593Smuzhiyun /* This device does not fully support AN. */
2921*4882a593Smuzhiyun hw->fc.disable_fc_autoneg = true;
2922*4882a593Smuzhiyun break;
2923*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_XFI:
2924*4882a593Smuzhiyun hw->fc.disable_fc_autoneg = true;
2925*4882a593Smuzhiyun break;
2926*4882a593Smuzhiyun default:
2927*4882a593Smuzhiyun break;
2928*4882a593Smuzhiyun }
2929*4882a593Smuzhiyun return rc;
2930*4882a593Smuzhiyun }
2931*4882a593Smuzhiyun
2932*4882a593Smuzhiyun /**
2933*4882a593Smuzhiyun * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
2934*4882a593Smuzhiyun * @hw: pointer to hardware structure
2935*4882a593Smuzhiyun **/
ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw * hw)2936*4882a593Smuzhiyun static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
2937*4882a593Smuzhiyun {
2938*4882a593Smuzhiyun u32 link_s1, lp_an_page_low, an_cntl_1;
2939*4882a593Smuzhiyun s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
2940*4882a593Smuzhiyun ixgbe_link_speed speed;
2941*4882a593Smuzhiyun bool link_up;
2942*4882a593Smuzhiyun
2943*4882a593Smuzhiyun /* AN should have completed when the cable was plugged in.
2944*4882a593Smuzhiyun * Look for reasons to bail out. Bail out if:
2945*4882a593Smuzhiyun * - FC autoneg is disabled, or if
2946*4882a593Smuzhiyun * - link is not up.
2947*4882a593Smuzhiyun */
2948*4882a593Smuzhiyun if (hw->fc.disable_fc_autoneg) {
2949*4882a593Smuzhiyun hw_err(hw, "Flow control autoneg is disabled");
2950*4882a593Smuzhiyun goto out;
2951*4882a593Smuzhiyun }
2952*4882a593Smuzhiyun
2953*4882a593Smuzhiyun hw->mac.ops.check_link(hw, &speed, &link_up, false);
2954*4882a593Smuzhiyun if (!link_up) {
2955*4882a593Smuzhiyun hw_err(hw, "The link is down");
2956*4882a593Smuzhiyun goto out;
2957*4882a593Smuzhiyun }
2958*4882a593Smuzhiyun
2959*4882a593Smuzhiyun /* Check at auto-negotiation has completed */
2960*4882a593Smuzhiyun status = hw->mac.ops.read_iosf_sb_reg(hw,
2961*4882a593Smuzhiyun IXGBE_KRM_LINK_S1(hw->bus.lan_id),
2962*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
2963*4882a593Smuzhiyun
2964*4882a593Smuzhiyun if (status || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
2965*4882a593Smuzhiyun hw_dbg(hw, "Auto-Negotiation did not complete\n");
2966*4882a593Smuzhiyun status = IXGBE_ERR_FC_NOT_NEGOTIATED;
2967*4882a593Smuzhiyun goto out;
2968*4882a593Smuzhiyun }
2969*4882a593Smuzhiyun
2970*4882a593Smuzhiyun /* Read the 10g AN autoc and LP ability registers and resolve
2971*4882a593Smuzhiyun * local flow control settings accordingly
2972*4882a593Smuzhiyun */
2973*4882a593Smuzhiyun status = hw->mac.ops.read_iosf_sb_reg(hw,
2974*4882a593Smuzhiyun IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
2975*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
2976*4882a593Smuzhiyun
2977*4882a593Smuzhiyun if (status) {
2978*4882a593Smuzhiyun hw_dbg(hw, "Auto-Negotiation did not complete\n");
2979*4882a593Smuzhiyun goto out;
2980*4882a593Smuzhiyun }
2981*4882a593Smuzhiyun
2982*4882a593Smuzhiyun status = hw->mac.ops.read_iosf_sb_reg(hw,
2983*4882a593Smuzhiyun IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
2984*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun if (status) {
2987*4882a593Smuzhiyun hw_dbg(hw, "Auto-Negotiation did not complete\n");
2988*4882a593Smuzhiyun goto out;
2989*4882a593Smuzhiyun }
2990*4882a593Smuzhiyun
2991*4882a593Smuzhiyun status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low,
2992*4882a593Smuzhiyun IXGBE_KRM_AN_CNTL_1_SYM_PAUSE,
2993*4882a593Smuzhiyun IXGBE_KRM_AN_CNTL_1_ASM_PAUSE,
2994*4882a593Smuzhiyun IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE,
2995*4882a593Smuzhiyun IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE);
2996*4882a593Smuzhiyun
2997*4882a593Smuzhiyun out:
2998*4882a593Smuzhiyun if (!status) {
2999*4882a593Smuzhiyun hw->fc.fc_was_autonegged = true;
3000*4882a593Smuzhiyun } else {
3001*4882a593Smuzhiyun hw->fc.fc_was_autonegged = false;
3002*4882a593Smuzhiyun hw->fc.current_mode = hw->fc.requested_mode;
3003*4882a593Smuzhiyun }
3004*4882a593Smuzhiyun }
3005*4882a593Smuzhiyun
3006*4882a593Smuzhiyun /**
3007*4882a593Smuzhiyun * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
3008*4882a593Smuzhiyun * @hw: pointer to hardware structure
3009*4882a593Smuzhiyun **/
ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw * hw)3010*4882a593Smuzhiyun static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
3011*4882a593Smuzhiyun {
3012*4882a593Smuzhiyun hw->fc.fc_was_autonegged = false;
3013*4882a593Smuzhiyun hw->fc.current_mode = hw->fc.requested_mode;
3014*4882a593Smuzhiyun }
3015*4882a593Smuzhiyun
3016*4882a593Smuzhiyun /** ixgbe_enter_lplu_x550em - Transition to low power states
3017*4882a593Smuzhiyun * @hw: pointer to hardware structure
3018*4882a593Smuzhiyun *
3019*4882a593Smuzhiyun * Configures Low Power Link Up on transition to low power states
3020*4882a593Smuzhiyun * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting
3021*4882a593Smuzhiyun * the X557 PHY immediately prior to entering LPLU.
3022*4882a593Smuzhiyun **/
ixgbe_enter_lplu_t_x550em(struct ixgbe_hw * hw)3023*4882a593Smuzhiyun static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
3024*4882a593Smuzhiyun {
3025*4882a593Smuzhiyun u16 an_10g_cntl_reg, autoneg_reg, speed;
3026*4882a593Smuzhiyun s32 status;
3027*4882a593Smuzhiyun ixgbe_link_speed lcd_speed;
3028*4882a593Smuzhiyun u32 save_autoneg;
3029*4882a593Smuzhiyun bool link_up;
3030*4882a593Smuzhiyun
3031*4882a593Smuzhiyun /* If blocked by MNG FW, then don't restart AN */
3032*4882a593Smuzhiyun if (ixgbe_check_reset_blocked(hw))
3033*4882a593Smuzhiyun return 0;
3034*4882a593Smuzhiyun
3035*4882a593Smuzhiyun status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3036*4882a593Smuzhiyun if (status)
3037*4882a593Smuzhiyun return status;
3038*4882a593Smuzhiyun
3039*4882a593Smuzhiyun status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3,
3040*4882a593Smuzhiyun &hw->eeprom.ctrl_word_3);
3041*4882a593Smuzhiyun if (status)
3042*4882a593Smuzhiyun return status;
3043*4882a593Smuzhiyun
3044*4882a593Smuzhiyun /* If link is down, LPLU disabled in NVM, WoL disabled, or
3045*4882a593Smuzhiyun * manageability disabled, then force link down by entering
3046*4882a593Smuzhiyun * low power mode.
3047*4882a593Smuzhiyun */
3048*4882a593Smuzhiyun if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
3049*4882a593Smuzhiyun !(hw->wol_enabled || ixgbe_mng_present(hw)))
3050*4882a593Smuzhiyun return ixgbe_set_copper_phy_power(hw, false);
3051*4882a593Smuzhiyun
3052*4882a593Smuzhiyun /* Determine LCD */
3053*4882a593Smuzhiyun status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
3054*4882a593Smuzhiyun if (status)
3055*4882a593Smuzhiyun return status;
3056*4882a593Smuzhiyun
3057*4882a593Smuzhiyun /* If no valid LCD link speed, then force link down and exit. */
3058*4882a593Smuzhiyun if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
3059*4882a593Smuzhiyun return ixgbe_set_copper_phy_power(hw, false);
3060*4882a593Smuzhiyun
3061*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
3062*4882a593Smuzhiyun MDIO_MMD_AN,
3063*4882a593Smuzhiyun &speed);
3064*4882a593Smuzhiyun if (status)
3065*4882a593Smuzhiyun return status;
3066*4882a593Smuzhiyun
3067*4882a593Smuzhiyun /* If no link now, speed is invalid so take link down */
3068*4882a593Smuzhiyun status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
3069*4882a593Smuzhiyun if (status)
3070*4882a593Smuzhiyun return ixgbe_set_copper_phy_power(hw, false);
3071*4882a593Smuzhiyun
3072*4882a593Smuzhiyun /* clear everything but the speed bits */
3073*4882a593Smuzhiyun speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
3074*4882a593Smuzhiyun
3075*4882a593Smuzhiyun /* If current speed is already LCD, then exit. */
3076*4882a593Smuzhiyun if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
3077*4882a593Smuzhiyun (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
3078*4882a593Smuzhiyun ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
3079*4882a593Smuzhiyun (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
3080*4882a593Smuzhiyun return status;
3081*4882a593Smuzhiyun
3082*4882a593Smuzhiyun /* Clear AN completed indication */
3083*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
3084*4882a593Smuzhiyun MDIO_MMD_AN,
3085*4882a593Smuzhiyun &autoneg_reg);
3086*4882a593Smuzhiyun if (status)
3087*4882a593Smuzhiyun return status;
3088*4882a593Smuzhiyun
3089*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL,
3090*4882a593Smuzhiyun MDIO_MMD_AN,
3091*4882a593Smuzhiyun &an_10g_cntl_reg);
3092*4882a593Smuzhiyun if (status)
3093*4882a593Smuzhiyun return status;
3094*4882a593Smuzhiyun
3095*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw,
3096*4882a593Smuzhiyun IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
3097*4882a593Smuzhiyun MDIO_MMD_AN,
3098*4882a593Smuzhiyun &autoneg_reg);
3099*4882a593Smuzhiyun if (status)
3100*4882a593Smuzhiyun return status;
3101*4882a593Smuzhiyun
3102*4882a593Smuzhiyun save_autoneg = hw->phy.autoneg_advertised;
3103*4882a593Smuzhiyun
3104*4882a593Smuzhiyun /* Setup link at least common link speed */
3105*4882a593Smuzhiyun status = hw->mac.ops.setup_link(hw, lcd_speed, false);
3106*4882a593Smuzhiyun
3107*4882a593Smuzhiyun /* restore autoneg from before setting lplu speed */
3108*4882a593Smuzhiyun hw->phy.autoneg_advertised = save_autoneg;
3109*4882a593Smuzhiyun
3110*4882a593Smuzhiyun return status;
3111*4882a593Smuzhiyun }
3112*4882a593Smuzhiyun
3113*4882a593Smuzhiyun /**
3114*4882a593Smuzhiyun * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
3115*4882a593Smuzhiyun * @hw: pointer to hardware structure
3116*4882a593Smuzhiyun */
ixgbe_reset_phy_fw(struct ixgbe_hw * hw)3117*4882a593Smuzhiyun static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
3118*4882a593Smuzhiyun {
3119*4882a593Smuzhiyun u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
3120*4882a593Smuzhiyun s32 rc;
3121*4882a593Smuzhiyun
3122*4882a593Smuzhiyun if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
3123*4882a593Smuzhiyun return 0;
3124*4882a593Smuzhiyun
3125*4882a593Smuzhiyun rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
3126*4882a593Smuzhiyun if (rc)
3127*4882a593Smuzhiyun return rc;
3128*4882a593Smuzhiyun memset(store, 0, sizeof(store));
3129*4882a593Smuzhiyun
3130*4882a593Smuzhiyun rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
3131*4882a593Smuzhiyun if (rc)
3132*4882a593Smuzhiyun return rc;
3133*4882a593Smuzhiyun
3134*4882a593Smuzhiyun return ixgbe_setup_fw_link(hw);
3135*4882a593Smuzhiyun }
3136*4882a593Smuzhiyun
3137*4882a593Smuzhiyun /**
3138*4882a593Smuzhiyun * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
3139*4882a593Smuzhiyun * @hw: pointer to hardware structure
3140*4882a593Smuzhiyun */
ixgbe_check_overtemp_fw(struct ixgbe_hw * hw)3141*4882a593Smuzhiyun static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
3142*4882a593Smuzhiyun {
3143*4882a593Smuzhiyun u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
3144*4882a593Smuzhiyun s32 rc;
3145*4882a593Smuzhiyun
3146*4882a593Smuzhiyun rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
3147*4882a593Smuzhiyun if (rc)
3148*4882a593Smuzhiyun return rc;
3149*4882a593Smuzhiyun
3150*4882a593Smuzhiyun if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
3151*4882a593Smuzhiyun ixgbe_shutdown_fw_phy(hw);
3152*4882a593Smuzhiyun return IXGBE_ERR_OVERTEMP;
3153*4882a593Smuzhiyun }
3154*4882a593Smuzhiyun return 0;
3155*4882a593Smuzhiyun }
3156*4882a593Smuzhiyun
3157*4882a593Smuzhiyun /**
3158*4882a593Smuzhiyun * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
3159*4882a593Smuzhiyun * @hw: pointer to hardware structure
3160*4882a593Smuzhiyun *
3161*4882a593Smuzhiyun * Read NW_MNG_IF_SEL register and save field values.
3162*4882a593Smuzhiyun */
ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw * hw)3163*4882a593Smuzhiyun static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
3164*4882a593Smuzhiyun {
3165*4882a593Smuzhiyun /* Save NW management interface connected on board. This is used
3166*4882a593Smuzhiyun * to determine internal PHY mode.
3167*4882a593Smuzhiyun */
3168*4882a593Smuzhiyun hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
3169*4882a593Smuzhiyun
3170*4882a593Smuzhiyun /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
3171*4882a593Smuzhiyun * PHY address. This register field was has only been used for X552.
3172*4882a593Smuzhiyun */
3173*4882a593Smuzhiyun if (hw->mac.type == ixgbe_mac_x550em_a &&
3174*4882a593Smuzhiyun hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
3175*4882a593Smuzhiyun hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
3176*4882a593Smuzhiyun IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
3177*4882a593Smuzhiyun IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
3178*4882a593Smuzhiyun }
3179*4882a593Smuzhiyun }
3180*4882a593Smuzhiyun
3181*4882a593Smuzhiyun /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
3182*4882a593Smuzhiyun * @hw: pointer to hardware structure
3183*4882a593Smuzhiyun *
3184*4882a593Smuzhiyun * Initialize any function pointers that were not able to be
3185*4882a593Smuzhiyun * set during init_shared_code because the PHY/SFP type was
3186*4882a593Smuzhiyun * not known. Perform the SFP init if necessary.
3187*4882a593Smuzhiyun **/
ixgbe_init_phy_ops_X550em(struct ixgbe_hw * hw)3188*4882a593Smuzhiyun static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
3189*4882a593Smuzhiyun {
3190*4882a593Smuzhiyun struct ixgbe_phy_info *phy = &hw->phy;
3191*4882a593Smuzhiyun s32 ret_val;
3192*4882a593Smuzhiyun
3193*4882a593Smuzhiyun hw->mac.ops.set_lan_id(hw);
3194*4882a593Smuzhiyun
3195*4882a593Smuzhiyun ixgbe_read_mng_if_sel_x550em(hw);
3196*4882a593Smuzhiyun
3197*4882a593Smuzhiyun if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
3198*4882a593Smuzhiyun phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
3199*4882a593Smuzhiyun ixgbe_setup_mux_ctl(hw);
3200*4882a593Smuzhiyun }
3201*4882a593Smuzhiyun
3202*4882a593Smuzhiyun /* Identify the PHY or SFP module */
3203*4882a593Smuzhiyun ret_val = phy->ops.identify(hw);
3204*4882a593Smuzhiyun if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
3205*4882a593Smuzhiyun ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
3206*4882a593Smuzhiyun return ret_val;
3207*4882a593Smuzhiyun
3208*4882a593Smuzhiyun /* Setup function pointers based on detected hardware */
3209*4882a593Smuzhiyun ixgbe_init_mac_link_ops_X550em(hw);
3210*4882a593Smuzhiyun if (phy->sfp_type != ixgbe_sfp_type_unknown)
3211*4882a593Smuzhiyun phy->ops.reset = NULL;
3212*4882a593Smuzhiyun
3213*4882a593Smuzhiyun /* Set functions pointers based on phy type */
3214*4882a593Smuzhiyun switch (hw->phy.type) {
3215*4882a593Smuzhiyun case ixgbe_phy_x550em_kx4:
3216*4882a593Smuzhiyun phy->ops.setup_link = NULL;
3217*4882a593Smuzhiyun phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
3218*4882a593Smuzhiyun phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
3219*4882a593Smuzhiyun break;
3220*4882a593Smuzhiyun case ixgbe_phy_x550em_kr:
3221*4882a593Smuzhiyun phy->ops.setup_link = ixgbe_setup_kr_x550em;
3222*4882a593Smuzhiyun phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
3223*4882a593Smuzhiyun phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
3224*4882a593Smuzhiyun break;
3225*4882a593Smuzhiyun case ixgbe_phy_x550em_xfi:
3226*4882a593Smuzhiyun /* link is managed by HW */
3227*4882a593Smuzhiyun phy->ops.setup_link = NULL;
3228*4882a593Smuzhiyun phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
3229*4882a593Smuzhiyun phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
3230*4882a593Smuzhiyun break;
3231*4882a593Smuzhiyun case ixgbe_phy_x550em_ext_t:
3232*4882a593Smuzhiyun /* Save NW management interface connected on board. This is used
3233*4882a593Smuzhiyun * to determine internal PHY mode
3234*4882a593Smuzhiyun */
3235*4882a593Smuzhiyun phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
3236*4882a593Smuzhiyun
3237*4882a593Smuzhiyun /* If internal link mode is XFI, then setup iXFI internal link,
3238*4882a593Smuzhiyun * else setup KR now.
3239*4882a593Smuzhiyun */
3240*4882a593Smuzhiyun phy->ops.setup_internal_link =
3241*4882a593Smuzhiyun ixgbe_setup_internal_phy_t_x550em;
3242*4882a593Smuzhiyun
3243*4882a593Smuzhiyun /* setup SW LPLU only for first revision */
3244*4882a593Smuzhiyun if (hw->mac.type == ixgbe_mac_X550EM_x &&
3245*4882a593Smuzhiyun !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) &
3246*4882a593Smuzhiyun IXGBE_FUSES0_REV_MASK))
3247*4882a593Smuzhiyun phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
3248*4882a593Smuzhiyun
3249*4882a593Smuzhiyun phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
3250*4882a593Smuzhiyun phy->ops.reset = ixgbe_reset_phy_t_X550em;
3251*4882a593Smuzhiyun break;
3252*4882a593Smuzhiyun case ixgbe_phy_sgmii:
3253*4882a593Smuzhiyun phy->ops.setup_link = NULL;
3254*4882a593Smuzhiyun break;
3255*4882a593Smuzhiyun case ixgbe_phy_fw:
3256*4882a593Smuzhiyun phy->ops.setup_link = ixgbe_setup_fw_link;
3257*4882a593Smuzhiyun phy->ops.reset = ixgbe_reset_phy_fw;
3258*4882a593Smuzhiyun break;
3259*4882a593Smuzhiyun case ixgbe_phy_ext_1g_t:
3260*4882a593Smuzhiyun phy->ops.setup_link = NULL;
3261*4882a593Smuzhiyun phy->ops.read_reg = NULL;
3262*4882a593Smuzhiyun phy->ops.write_reg = NULL;
3263*4882a593Smuzhiyun phy->ops.reset = NULL;
3264*4882a593Smuzhiyun break;
3265*4882a593Smuzhiyun default:
3266*4882a593Smuzhiyun break;
3267*4882a593Smuzhiyun }
3268*4882a593Smuzhiyun
3269*4882a593Smuzhiyun return ret_val;
3270*4882a593Smuzhiyun }
3271*4882a593Smuzhiyun
3272*4882a593Smuzhiyun /** ixgbe_get_media_type_X550em - Get media type
3273*4882a593Smuzhiyun * @hw: pointer to hardware structure
3274*4882a593Smuzhiyun *
3275*4882a593Smuzhiyun * Returns the media type (fiber, copper, backplane)
3276*4882a593Smuzhiyun *
3277*4882a593Smuzhiyun */
ixgbe_get_media_type_X550em(struct ixgbe_hw * hw)3278*4882a593Smuzhiyun static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
3279*4882a593Smuzhiyun {
3280*4882a593Smuzhiyun enum ixgbe_media_type media_type;
3281*4882a593Smuzhiyun
3282*4882a593Smuzhiyun /* Detect if there is a copper PHY attached. */
3283*4882a593Smuzhiyun switch (hw->device_id) {
3284*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_SGMII:
3285*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_SGMII_L:
3286*4882a593Smuzhiyun hw->phy.type = ixgbe_phy_sgmii;
3287*4882a593Smuzhiyun fallthrough;
3288*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_KR:
3289*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_KX4:
3290*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_XFI:
3291*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_KR:
3292*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_KR_L:
3293*4882a593Smuzhiyun media_type = ixgbe_media_type_backplane;
3294*4882a593Smuzhiyun break;
3295*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_SFP:
3296*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_SFP:
3297*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_SFP_N:
3298*4882a593Smuzhiyun media_type = ixgbe_media_type_fiber;
3299*4882a593Smuzhiyun break;
3300*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_1G_T:
3301*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_10G_T:
3302*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_10G_T:
3303*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_1G_T:
3304*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_1G_T_L:
3305*4882a593Smuzhiyun media_type = ixgbe_media_type_copper;
3306*4882a593Smuzhiyun break;
3307*4882a593Smuzhiyun default:
3308*4882a593Smuzhiyun media_type = ixgbe_media_type_unknown;
3309*4882a593Smuzhiyun break;
3310*4882a593Smuzhiyun }
3311*4882a593Smuzhiyun return media_type;
3312*4882a593Smuzhiyun }
3313*4882a593Smuzhiyun
3314*4882a593Smuzhiyun /** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY.
3315*4882a593Smuzhiyun ** @hw: pointer to hardware structure
3316*4882a593Smuzhiyun **/
ixgbe_init_ext_t_x550em(struct ixgbe_hw * hw)3317*4882a593Smuzhiyun static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
3318*4882a593Smuzhiyun {
3319*4882a593Smuzhiyun s32 status;
3320*4882a593Smuzhiyun u16 reg;
3321*4882a593Smuzhiyun
3322*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw,
3323*4882a593Smuzhiyun IXGBE_MDIO_TX_VENDOR_ALARMS_3,
3324*4882a593Smuzhiyun MDIO_MMD_PMAPMD,
3325*4882a593Smuzhiyun ®);
3326*4882a593Smuzhiyun if (status)
3327*4882a593Smuzhiyun return status;
3328*4882a593Smuzhiyun
3329*4882a593Smuzhiyun /* If PHY FW reset completed bit is set then this is the first
3330*4882a593Smuzhiyun * SW instance after a power on so the PHY FW must be un-stalled.
3331*4882a593Smuzhiyun */
3332*4882a593Smuzhiyun if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
3333*4882a593Smuzhiyun status = hw->phy.ops.read_reg(hw,
3334*4882a593Smuzhiyun IXGBE_MDIO_GLOBAL_RES_PR_10,
3335*4882a593Smuzhiyun MDIO_MMD_VEND1,
3336*4882a593Smuzhiyun ®);
3337*4882a593Smuzhiyun if (status)
3338*4882a593Smuzhiyun return status;
3339*4882a593Smuzhiyun
3340*4882a593Smuzhiyun reg &= ~IXGBE_MDIO_POWER_UP_STALL;
3341*4882a593Smuzhiyun
3342*4882a593Smuzhiyun status = hw->phy.ops.write_reg(hw,
3343*4882a593Smuzhiyun IXGBE_MDIO_GLOBAL_RES_PR_10,
3344*4882a593Smuzhiyun MDIO_MMD_VEND1,
3345*4882a593Smuzhiyun reg);
3346*4882a593Smuzhiyun if (status)
3347*4882a593Smuzhiyun return status;
3348*4882a593Smuzhiyun }
3349*4882a593Smuzhiyun
3350*4882a593Smuzhiyun return status;
3351*4882a593Smuzhiyun }
3352*4882a593Smuzhiyun
3353*4882a593Smuzhiyun /**
3354*4882a593Smuzhiyun * ixgbe_set_mdio_speed - Set MDIO clock speed
3355*4882a593Smuzhiyun * @hw: pointer to hardware structure
3356*4882a593Smuzhiyun */
ixgbe_set_mdio_speed(struct ixgbe_hw * hw)3357*4882a593Smuzhiyun static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
3358*4882a593Smuzhiyun {
3359*4882a593Smuzhiyun u32 hlreg0;
3360*4882a593Smuzhiyun
3361*4882a593Smuzhiyun switch (hw->device_id) {
3362*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_X_10G_T:
3363*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_SGMII:
3364*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_SGMII_L:
3365*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_10G_T:
3366*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_SFP:
3367*4882a593Smuzhiyun /* Config MDIO clock speed before the first MDIO PHY access */
3368*4882a593Smuzhiyun hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3369*4882a593Smuzhiyun hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
3370*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3371*4882a593Smuzhiyun break;
3372*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_1G_T:
3373*4882a593Smuzhiyun case IXGBE_DEV_ID_X550EM_A_1G_T_L:
3374*4882a593Smuzhiyun /* Select fast MDIO clock speed for these devices */
3375*4882a593Smuzhiyun hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3376*4882a593Smuzhiyun hlreg0 |= IXGBE_HLREG0_MDCSPD;
3377*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3378*4882a593Smuzhiyun break;
3379*4882a593Smuzhiyun default:
3380*4882a593Smuzhiyun break;
3381*4882a593Smuzhiyun }
3382*4882a593Smuzhiyun }
3383*4882a593Smuzhiyun
3384*4882a593Smuzhiyun /** ixgbe_reset_hw_X550em - Perform hardware reset
3385*4882a593Smuzhiyun ** @hw: pointer to hardware structure
3386*4882a593Smuzhiyun **
3387*4882a593Smuzhiyun ** Resets the hardware by resetting the transmit and receive units, masks
3388*4882a593Smuzhiyun ** and clears all interrupts, perform a PHY reset, and perform a link (MAC)
3389*4882a593Smuzhiyun ** reset.
3390*4882a593Smuzhiyun **/
ixgbe_reset_hw_X550em(struct ixgbe_hw * hw)3391*4882a593Smuzhiyun static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
3392*4882a593Smuzhiyun {
3393*4882a593Smuzhiyun ixgbe_link_speed link_speed;
3394*4882a593Smuzhiyun s32 status;
3395*4882a593Smuzhiyun u32 ctrl = 0;
3396*4882a593Smuzhiyun u32 i;
3397*4882a593Smuzhiyun bool link_up = false;
3398*4882a593Smuzhiyun u32 swfw_mask = hw->phy.phy_semaphore_mask;
3399*4882a593Smuzhiyun
3400*4882a593Smuzhiyun /* Call adapter stop to disable Tx/Rx and clear interrupts */
3401*4882a593Smuzhiyun status = hw->mac.ops.stop_adapter(hw);
3402*4882a593Smuzhiyun if (status)
3403*4882a593Smuzhiyun return status;
3404*4882a593Smuzhiyun
3405*4882a593Smuzhiyun /* flush pending Tx transactions */
3406*4882a593Smuzhiyun ixgbe_clear_tx_pending(hw);
3407*4882a593Smuzhiyun
3408*4882a593Smuzhiyun /* set MDIO speed before talking to the PHY in case it's the 1st time */
3409*4882a593Smuzhiyun ixgbe_set_mdio_speed(hw);
3410*4882a593Smuzhiyun
3411*4882a593Smuzhiyun /* PHY ops must be identified and initialized prior to reset */
3412*4882a593Smuzhiyun status = hw->phy.ops.init(hw);
3413*4882a593Smuzhiyun if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
3414*4882a593Smuzhiyun status == IXGBE_ERR_PHY_ADDR_INVALID)
3415*4882a593Smuzhiyun return status;
3416*4882a593Smuzhiyun
3417*4882a593Smuzhiyun /* start the external PHY */
3418*4882a593Smuzhiyun if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
3419*4882a593Smuzhiyun status = ixgbe_init_ext_t_x550em(hw);
3420*4882a593Smuzhiyun if (status)
3421*4882a593Smuzhiyun return status;
3422*4882a593Smuzhiyun }
3423*4882a593Smuzhiyun
3424*4882a593Smuzhiyun /* Setup SFP module if there is one present. */
3425*4882a593Smuzhiyun if (hw->phy.sfp_setup_needed) {
3426*4882a593Smuzhiyun status = hw->mac.ops.setup_sfp(hw);
3427*4882a593Smuzhiyun hw->phy.sfp_setup_needed = false;
3428*4882a593Smuzhiyun }
3429*4882a593Smuzhiyun
3430*4882a593Smuzhiyun if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
3431*4882a593Smuzhiyun return status;
3432*4882a593Smuzhiyun
3433*4882a593Smuzhiyun /* Reset PHY */
3434*4882a593Smuzhiyun if (!hw->phy.reset_disable && hw->phy.ops.reset)
3435*4882a593Smuzhiyun hw->phy.ops.reset(hw);
3436*4882a593Smuzhiyun
3437*4882a593Smuzhiyun mac_reset_top:
3438*4882a593Smuzhiyun /* Issue global reset to the MAC. Needs to be SW reset if link is up.
3439*4882a593Smuzhiyun * If link reset is used when link is up, it might reset the PHY when
3440*4882a593Smuzhiyun * mng is using it. If link is down or the flag to force full link
3441*4882a593Smuzhiyun * reset is set, then perform link reset.
3442*4882a593Smuzhiyun */
3443*4882a593Smuzhiyun ctrl = IXGBE_CTRL_LNK_RST;
3444*4882a593Smuzhiyun
3445*4882a593Smuzhiyun if (!hw->force_full_reset) {
3446*4882a593Smuzhiyun hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3447*4882a593Smuzhiyun if (link_up)
3448*4882a593Smuzhiyun ctrl = IXGBE_CTRL_RST;
3449*4882a593Smuzhiyun }
3450*4882a593Smuzhiyun
3451*4882a593Smuzhiyun status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
3452*4882a593Smuzhiyun if (status) {
3453*4882a593Smuzhiyun hw_dbg(hw, "semaphore failed with %d", status);
3454*4882a593Smuzhiyun return IXGBE_ERR_SWFW_SYNC;
3455*4882a593Smuzhiyun }
3456*4882a593Smuzhiyun
3457*4882a593Smuzhiyun ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
3458*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
3459*4882a593Smuzhiyun IXGBE_WRITE_FLUSH(hw);
3460*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, swfw_mask);
3461*4882a593Smuzhiyun usleep_range(1000, 1200);
3462*4882a593Smuzhiyun
3463*4882a593Smuzhiyun /* Poll for reset bit to self-clear meaning reset is complete */
3464*4882a593Smuzhiyun for (i = 0; i < 10; i++) {
3465*4882a593Smuzhiyun ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
3466*4882a593Smuzhiyun if (!(ctrl & IXGBE_CTRL_RST_MASK))
3467*4882a593Smuzhiyun break;
3468*4882a593Smuzhiyun udelay(1);
3469*4882a593Smuzhiyun }
3470*4882a593Smuzhiyun
3471*4882a593Smuzhiyun if (ctrl & IXGBE_CTRL_RST_MASK) {
3472*4882a593Smuzhiyun status = IXGBE_ERR_RESET_FAILED;
3473*4882a593Smuzhiyun hw_dbg(hw, "Reset polling failed to complete.\n");
3474*4882a593Smuzhiyun }
3475*4882a593Smuzhiyun
3476*4882a593Smuzhiyun msleep(50);
3477*4882a593Smuzhiyun
3478*4882a593Smuzhiyun /* Double resets are required for recovery from certain error
3479*4882a593Smuzhiyun * clear the multicast table. Also reset num_rar_entries to 128,
3480*4882a593Smuzhiyun * since we modify this value when programming the SAN MAC address.
3481*4882a593Smuzhiyun */
3482*4882a593Smuzhiyun if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
3483*4882a593Smuzhiyun hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3484*4882a593Smuzhiyun goto mac_reset_top;
3485*4882a593Smuzhiyun }
3486*4882a593Smuzhiyun
3487*4882a593Smuzhiyun /* Store the permanent mac address */
3488*4882a593Smuzhiyun hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
3489*4882a593Smuzhiyun
3490*4882a593Smuzhiyun /* Store MAC address from RAR0, clear receive address registers, and
3491*4882a593Smuzhiyun * clear the multicast table. Also reset num_rar_entries to 128,
3492*4882a593Smuzhiyun * since we modify this value when programming the SAN MAC address.
3493*4882a593Smuzhiyun */
3494*4882a593Smuzhiyun hw->mac.num_rar_entries = 128;
3495*4882a593Smuzhiyun hw->mac.ops.init_rx_addrs(hw);
3496*4882a593Smuzhiyun
3497*4882a593Smuzhiyun ixgbe_set_mdio_speed(hw);
3498*4882a593Smuzhiyun
3499*4882a593Smuzhiyun if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
3500*4882a593Smuzhiyun ixgbe_setup_mux_ctl(hw);
3501*4882a593Smuzhiyun
3502*4882a593Smuzhiyun return status;
3503*4882a593Smuzhiyun }
3504*4882a593Smuzhiyun
3505*4882a593Smuzhiyun /** ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype
3506*4882a593Smuzhiyun * anti-spoofing
3507*4882a593Smuzhiyun * @hw: pointer to hardware structure
3508*4882a593Smuzhiyun * @enable: enable or disable switch for Ethertype anti-spoofing
3509*4882a593Smuzhiyun * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
3510*4882a593Smuzhiyun **/
ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw * hw,bool enable,int vf)3511*4882a593Smuzhiyun static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
3512*4882a593Smuzhiyun bool enable, int vf)
3513*4882a593Smuzhiyun {
3514*4882a593Smuzhiyun int vf_target_reg = vf >> 3;
3515*4882a593Smuzhiyun int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
3516*4882a593Smuzhiyun u32 pfvfspoof;
3517*4882a593Smuzhiyun
3518*4882a593Smuzhiyun pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
3519*4882a593Smuzhiyun if (enable)
3520*4882a593Smuzhiyun pfvfspoof |= BIT(vf_target_shift);
3521*4882a593Smuzhiyun else
3522*4882a593Smuzhiyun pfvfspoof &= ~BIT(vf_target_shift);
3523*4882a593Smuzhiyun
3524*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
3525*4882a593Smuzhiyun }
3526*4882a593Smuzhiyun
3527*4882a593Smuzhiyun /** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning
3528*4882a593Smuzhiyun * @hw: pointer to hardware structure
3529*4882a593Smuzhiyun * @enable: enable or disable source address pruning
3530*4882a593Smuzhiyun * @pool: Rx pool to set source address pruning for
3531*4882a593Smuzhiyun **/
ixgbe_set_source_address_pruning_X550(struct ixgbe_hw * hw,bool enable,unsigned int pool)3532*4882a593Smuzhiyun static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw,
3533*4882a593Smuzhiyun bool enable,
3534*4882a593Smuzhiyun unsigned int pool)
3535*4882a593Smuzhiyun {
3536*4882a593Smuzhiyun u64 pfflp;
3537*4882a593Smuzhiyun
3538*4882a593Smuzhiyun /* max rx pool is 63 */
3539*4882a593Smuzhiyun if (pool > 63)
3540*4882a593Smuzhiyun return;
3541*4882a593Smuzhiyun
3542*4882a593Smuzhiyun pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL);
3543*4882a593Smuzhiyun pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32;
3544*4882a593Smuzhiyun
3545*4882a593Smuzhiyun if (enable)
3546*4882a593Smuzhiyun pfflp |= (1ULL << pool);
3547*4882a593Smuzhiyun else
3548*4882a593Smuzhiyun pfflp &= ~(1ULL << pool);
3549*4882a593Smuzhiyun
3550*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp);
3551*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32));
3552*4882a593Smuzhiyun }
3553*4882a593Smuzhiyun
3554*4882a593Smuzhiyun /**
3555*4882a593Smuzhiyun * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
3556*4882a593Smuzhiyun * @hw: pointer to hardware structure
3557*4882a593Smuzhiyun *
3558*4882a593Smuzhiyun * Called at init time to set up flow control.
3559*4882a593Smuzhiyun **/
ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw * hw)3560*4882a593Smuzhiyun static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
3561*4882a593Smuzhiyun {
3562*4882a593Smuzhiyun s32 status = 0;
3563*4882a593Smuzhiyun u32 an_cntl = 0;
3564*4882a593Smuzhiyun
3565*4882a593Smuzhiyun /* Validate the requested mode */
3566*4882a593Smuzhiyun if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
3567*4882a593Smuzhiyun hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
3568*4882a593Smuzhiyun return IXGBE_ERR_INVALID_LINK_SETTINGS;
3569*4882a593Smuzhiyun }
3570*4882a593Smuzhiyun
3571*4882a593Smuzhiyun if (hw->fc.requested_mode == ixgbe_fc_default)
3572*4882a593Smuzhiyun hw->fc.requested_mode = ixgbe_fc_full;
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun /* Set up the 1G and 10G flow control advertisement registers so the
3575*4882a593Smuzhiyun * HW will be able to do FC autoneg once the cable is plugged in. If
3576*4882a593Smuzhiyun * we link at 10G, the 1G advertisement is harmless and vice versa.
3577*4882a593Smuzhiyun */
3578*4882a593Smuzhiyun status = hw->mac.ops.read_iosf_sb_reg(hw,
3579*4882a593Smuzhiyun IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3580*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
3581*4882a593Smuzhiyun
3582*4882a593Smuzhiyun if (status) {
3583*4882a593Smuzhiyun hw_dbg(hw, "Auto-Negotiation did not complete\n");
3584*4882a593Smuzhiyun return status;
3585*4882a593Smuzhiyun }
3586*4882a593Smuzhiyun
3587*4882a593Smuzhiyun /* The possible values of fc.requested_mode are:
3588*4882a593Smuzhiyun * 0: Flow control is completely disabled
3589*4882a593Smuzhiyun * 1: Rx flow control is enabled (we can receive pause frames,
3590*4882a593Smuzhiyun * but not send pause frames).
3591*4882a593Smuzhiyun * 2: Tx flow control is enabled (we can send pause frames but
3592*4882a593Smuzhiyun * we do not support receiving pause frames).
3593*4882a593Smuzhiyun * 3: Both Rx and Tx flow control (symmetric) are enabled.
3594*4882a593Smuzhiyun * other: Invalid.
3595*4882a593Smuzhiyun */
3596*4882a593Smuzhiyun switch (hw->fc.requested_mode) {
3597*4882a593Smuzhiyun case ixgbe_fc_none:
3598*4882a593Smuzhiyun /* Flow control completely disabled by software override. */
3599*4882a593Smuzhiyun an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
3600*4882a593Smuzhiyun IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
3601*4882a593Smuzhiyun break;
3602*4882a593Smuzhiyun case ixgbe_fc_tx_pause:
3603*4882a593Smuzhiyun /* Tx Flow control is enabled, and Rx Flow control is
3604*4882a593Smuzhiyun * disabled by software override.
3605*4882a593Smuzhiyun */
3606*4882a593Smuzhiyun an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
3607*4882a593Smuzhiyun an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
3608*4882a593Smuzhiyun break;
3609*4882a593Smuzhiyun case ixgbe_fc_rx_pause:
3610*4882a593Smuzhiyun /* Rx Flow control is enabled and Tx Flow control is
3611*4882a593Smuzhiyun * disabled by software override. Since there really
3612*4882a593Smuzhiyun * isn't a way to advertise that we are capable of RX
3613*4882a593Smuzhiyun * Pause ONLY, we will advertise that we support both
3614*4882a593Smuzhiyun * symmetric and asymmetric Rx PAUSE, as such we fall
3615*4882a593Smuzhiyun * through to the fc_full statement. Later, we will
3616*4882a593Smuzhiyun * disable the adapter's ability to send PAUSE frames.
3617*4882a593Smuzhiyun */
3618*4882a593Smuzhiyun case ixgbe_fc_full:
3619*4882a593Smuzhiyun /* Flow control (both Rx and Tx) is enabled by SW override. */
3620*4882a593Smuzhiyun an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
3621*4882a593Smuzhiyun IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
3622*4882a593Smuzhiyun break;
3623*4882a593Smuzhiyun default:
3624*4882a593Smuzhiyun hw_err(hw, "Flow control param set incorrectly\n");
3625*4882a593Smuzhiyun return IXGBE_ERR_CONFIG;
3626*4882a593Smuzhiyun }
3627*4882a593Smuzhiyun
3628*4882a593Smuzhiyun status = hw->mac.ops.write_iosf_sb_reg(hw,
3629*4882a593Smuzhiyun IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
3630*4882a593Smuzhiyun IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
3631*4882a593Smuzhiyun
3632*4882a593Smuzhiyun /* Restart auto-negotiation. */
3633*4882a593Smuzhiyun status = ixgbe_restart_an_internal_phy_x550em(hw);
3634*4882a593Smuzhiyun
3635*4882a593Smuzhiyun return status;
3636*4882a593Smuzhiyun }
3637*4882a593Smuzhiyun
3638*4882a593Smuzhiyun /**
3639*4882a593Smuzhiyun * ixgbe_set_mux - Set mux for port 1 access with CS4227
3640*4882a593Smuzhiyun * @hw: pointer to hardware structure
3641*4882a593Smuzhiyun * @state: set mux if 1, clear if 0
3642*4882a593Smuzhiyun */
ixgbe_set_mux(struct ixgbe_hw * hw,u8 state)3643*4882a593Smuzhiyun static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state)
3644*4882a593Smuzhiyun {
3645*4882a593Smuzhiyun u32 esdp;
3646*4882a593Smuzhiyun
3647*4882a593Smuzhiyun if (!hw->bus.lan_id)
3648*4882a593Smuzhiyun return;
3649*4882a593Smuzhiyun esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
3650*4882a593Smuzhiyun if (state)
3651*4882a593Smuzhiyun esdp |= IXGBE_ESDP_SDP1;
3652*4882a593Smuzhiyun else
3653*4882a593Smuzhiyun esdp &= ~IXGBE_ESDP_SDP1;
3654*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
3655*4882a593Smuzhiyun IXGBE_WRITE_FLUSH(hw);
3656*4882a593Smuzhiyun }
3657*4882a593Smuzhiyun
3658*4882a593Smuzhiyun /**
3659*4882a593Smuzhiyun * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore
3660*4882a593Smuzhiyun * @hw: pointer to hardware structure
3661*4882a593Smuzhiyun * @mask: Mask to specify which semaphore to acquire
3662*4882a593Smuzhiyun *
3663*4882a593Smuzhiyun * Acquires the SWFW semaphore and sets the I2C MUX
3664*4882a593Smuzhiyun */
ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)3665*4882a593Smuzhiyun static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
3666*4882a593Smuzhiyun {
3667*4882a593Smuzhiyun s32 status;
3668*4882a593Smuzhiyun
3669*4882a593Smuzhiyun status = ixgbe_acquire_swfw_sync_X540(hw, mask);
3670*4882a593Smuzhiyun if (status)
3671*4882a593Smuzhiyun return status;
3672*4882a593Smuzhiyun
3673*4882a593Smuzhiyun if (mask & IXGBE_GSSR_I2C_MASK)
3674*4882a593Smuzhiyun ixgbe_set_mux(hw, 1);
3675*4882a593Smuzhiyun
3676*4882a593Smuzhiyun return 0;
3677*4882a593Smuzhiyun }
3678*4882a593Smuzhiyun
3679*4882a593Smuzhiyun /**
3680*4882a593Smuzhiyun * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore
3681*4882a593Smuzhiyun * @hw: pointer to hardware structure
3682*4882a593Smuzhiyun * @mask: Mask to specify which semaphore to release
3683*4882a593Smuzhiyun *
3684*4882a593Smuzhiyun * Releases the SWFW semaphore and sets the I2C MUX
3685*4882a593Smuzhiyun */
ixgbe_release_swfw_sync_X550em(struct ixgbe_hw * hw,u32 mask)3686*4882a593Smuzhiyun static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
3687*4882a593Smuzhiyun {
3688*4882a593Smuzhiyun if (mask & IXGBE_GSSR_I2C_MASK)
3689*4882a593Smuzhiyun ixgbe_set_mux(hw, 0);
3690*4882a593Smuzhiyun
3691*4882a593Smuzhiyun ixgbe_release_swfw_sync_X540(hw, mask);
3692*4882a593Smuzhiyun }
3693*4882a593Smuzhiyun
3694*4882a593Smuzhiyun /**
3695*4882a593Smuzhiyun * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore
3696*4882a593Smuzhiyun * @hw: pointer to hardware structure
3697*4882a593Smuzhiyun * @mask: Mask to specify which semaphore to acquire
3698*4882a593Smuzhiyun *
3699*4882a593Smuzhiyun * Acquires the SWFW semaphore and get the shared PHY token as needed
3700*4882a593Smuzhiyun */
ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw * hw,u32 mask)3701*4882a593Smuzhiyun static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
3702*4882a593Smuzhiyun {
3703*4882a593Smuzhiyun u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
3704*4882a593Smuzhiyun int retries = FW_PHY_TOKEN_RETRIES;
3705*4882a593Smuzhiyun s32 status;
3706*4882a593Smuzhiyun
3707*4882a593Smuzhiyun while (--retries) {
3708*4882a593Smuzhiyun status = 0;
3709*4882a593Smuzhiyun if (hmask)
3710*4882a593Smuzhiyun status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
3711*4882a593Smuzhiyun if (status)
3712*4882a593Smuzhiyun return status;
3713*4882a593Smuzhiyun if (!(mask & IXGBE_GSSR_TOKEN_SM))
3714*4882a593Smuzhiyun return 0;
3715*4882a593Smuzhiyun
3716*4882a593Smuzhiyun status = ixgbe_get_phy_token(hw);
3717*4882a593Smuzhiyun if (!status)
3718*4882a593Smuzhiyun return 0;
3719*4882a593Smuzhiyun if (hmask)
3720*4882a593Smuzhiyun ixgbe_release_swfw_sync_X540(hw, hmask);
3721*4882a593Smuzhiyun if (status != IXGBE_ERR_TOKEN_RETRY)
3722*4882a593Smuzhiyun return status;
3723*4882a593Smuzhiyun msleep(FW_PHY_TOKEN_DELAY);
3724*4882a593Smuzhiyun }
3725*4882a593Smuzhiyun
3726*4882a593Smuzhiyun return status;
3727*4882a593Smuzhiyun }
3728*4882a593Smuzhiyun
3729*4882a593Smuzhiyun /**
3730*4882a593Smuzhiyun * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore
3731*4882a593Smuzhiyun * @hw: pointer to hardware structure
3732*4882a593Smuzhiyun * @mask: Mask to specify which semaphore to release
3733*4882a593Smuzhiyun *
3734*4882a593Smuzhiyun * Release the SWFW semaphore and puts the shared PHY token as needed
3735*4882a593Smuzhiyun */
ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw * hw,u32 mask)3736*4882a593Smuzhiyun static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
3737*4882a593Smuzhiyun {
3738*4882a593Smuzhiyun u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
3739*4882a593Smuzhiyun
3740*4882a593Smuzhiyun if (mask & IXGBE_GSSR_TOKEN_SM)
3741*4882a593Smuzhiyun ixgbe_put_phy_token(hw);
3742*4882a593Smuzhiyun
3743*4882a593Smuzhiyun if (hmask)
3744*4882a593Smuzhiyun ixgbe_release_swfw_sync_X540(hw, hmask);
3745*4882a593Smuzhiyun }
3746*4882a593Smuzhiyun
3747*4882a593Smuzhiyun /**
3748*4882a593Smuzhiyun * ixgbe_read_phy_reg_x550a - Reads specified PHY register
3749*4882a593Smuzhiyun * @hw: pointer to hardware structure
3750*4882a593Smuzhiyun * @reg_addr: 32 bit address of PHY register to read
3751*4882a593Smuzhiyun * @device_type: 5 bit device type
3752*4882a593Smuzhiyun * @phy_data: Pointer to read data from PHY register
3753*4882a593Smuzhiyun *
3754*4882a593Smuzhiyun * Reads a value from a specified PHY register using the SWFW lock and PHY
3755*4882a593Smuzhiyun * Token. The PHY Token is needed since the MDIO is shared between to MAC
3756*4882a593Smuzhiyun * instances.
3757*4882a593Smuzhiyun */
ixgbe_read_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 * phy_data)3758*4882a593Smuzhiyun static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
3759*4882a593Smuzhiyun u32 device_type, u16 *phy_data)
3760*4882a593Smuzhiyun {
3761*4882a593Smuzhiyun u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
3762*4882a593Smuzhiyun s32 status;
3763*4882a593Smuzhiyun
3764*4882a593Smuzhiyun if (hw->mac.ops.acquire_swfw_sync(hw, mask))
3765*4882a593Smuzhiyun return IXGBE_ERR_SWFW_SYNC;
3766*4882a593Smuzhiyun
3767*4882a593Smuzhiyun status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
3768*4882a593Smuzhiyun
3769*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, mask);
3770*4882a593Smuzhiyun
3771*4882a593Smuzhiyun return status;
3772*4882a593Smuzhiyun }
3773*4882a593Smuzhiyun
3774*4882a593Smuzhiyun /**
3775*4882a593Smuzhiyun * ixgbe_write_phy_reg_x550a - Writes specified PHY register
3776*4882a593Smuzhiyun * @hw: pointer to hardware structure
3777*4882a593Smuzhiyun * @reg_addr: 32 bit PHY register to write
3778*4882a593Smuzhiyun * @device_type: 5 bit device type
3779*4882a593Smuzhiyun * @phy_data: Data to write to the PHY register
3780*4882a593Smuzhiyun *
3781*4882a593Smuzhiyun * Writes a value to specified PHY register using the SWFW lock and PHY Token.
3782*4882a593Smuzhiyun * The PHY Token is needed since the MDIO is shared between to MAC instances.
3783*4882a593Smuzhiyun */
ixgbe_write_phy_reg_x550a(struct ixgbe_hw * hw,u32 reg_addr,u32 device_type,u16 phy_data)3784*4882a593Smuzhiyun static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
3785*4882a593Smuzhiyun u32 device_type, u16 phy_data)
3786*4882a593Smuzhiyun {
3787*4882a593Smuzhiyun u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
3788*4882a593Smuzhiyun s32 status;
3789*4882a593Smuzhiyun
3790*4882a593Smuzhiyun if (hw->mac.ops.acquire_swfw_sync(hw, mask))
3791*4882a593Smuzhiyun return IXGBE_ERR_SWFW_SYNC;
3792*4882a593Smuzhiyun
3793*4882a593Smuzhiyun status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
3794*4882a593Smuzhiyun hw->mac.ops.release_swfw_sync(hw, mask);
3795*4882a593Smuzhiyun
3796*4882a593Smuzhiyun return status;
3797*4882a593Smuzhiyun }
3798*4882a593Smuzhiyun
3799*4882a593Smuzhiyun #define X550_COMMON_MAC \
3800*4882a593Smuzhiyun .init_hw = &ixgbe_init_hw_generic, \
3801*4882a593Smuzhiyun .start_hw = &ixgbe_start_hw_X540, \
3802*4882a593Smuzhiyun .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, \
3803*4882a593Smuzhiyun .enable_rx_dma = &ixgbe_enable_rx_dma_generic, \
3804*4882a593Smuzhiyun .get_mac_addr = &ixgbe_get_mac_addr_generic, \
3805*4882a593Smuzhiyun .get_device_caps = &ixgbe_get_device_caps_generic, \
3806*4882a593Smuzhiyun .stop_adapter = &ixgbe_stop_adapter_generic, \
3807*4882a593Smuzhiyun .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, \
3808*4882a593Smuzhiyun .read_analog_reg8 = NULL, \
3809*4882a593Smuzhiyun .write_analog_reg8 = NULL, \
3810*4882a593Smuzhiyun .set_rxpba = &ixgbe_set_rxpba_generic, \
3811*4882a593Smuzhiyun .check_link = &ixgbe_check_mac_link_generic, \
3812*4882a593Smuzhiyun .blink_led_start = &ixgbe_blink_led_start_X540, \
3813*4882a593Smuzhiyun .blink_led_stop = &ixgbe_blink_led_stop_X540, \
3814*4882a593Smuzhiyun .set_rar = &ixgbe_set_rar_generic, \
3815*4882a593Smuzhiyun .clear_rar = &ixgbe_clear_rar_generic, \
3816*4882a593Smuzhiyun .set_vmdq = &ixgbe_set_vmdq_generic, \
3817*4882a593Smuzhiyun .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, \
3818*4882a593Smuzhiyun .clear_vmdq = &ixgbe_clear_vmdq_generic, \
3819*4882a593Smuzhiyun .init_rx_addrs = &ixgbe_init_rx_addrs_generic, \
3820*4882a593Smuzhiyun .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, \
3821*4882a593Smuzhiyun .enable_mc = &ixgbe_enable_mc_generic, \
3822*4882a593Smuzhiyun .disable_mc = &ixgbe_disable_mc_generic, \
3823*4882a593Smuzhiyun .clear_vfta = &ixgbe_clear_vfta_generic, \
3824*4882a593Smuzhiyun .set_vfta = &ixgbe_set_vfta_generic, \
3825*4882a593Smuzhiyun .fc_enable = &ixgbe_fc_enable_generic, \
3826*4882a593Smuzhiyun .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_x550, \
3827*4882a593Smuzhiyun .init_uta_tables = &ixgbe_init_uta_tables_generic, \
3828*4882a593Smuzhiyun .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \
3829*4882a593Smuzhiyun .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \
3830*4882a593Smuzhiyun .set_source_address_pruning = \
3831*4882a593Smuzhiyun &ixgbe_set_source_address_pruning_X550, \
3832*4882a593Smuzhiyun .set_ethertype_anti_spoofing = \
3833*4882a593Smuzhiyun &ixgbe_set_ethertype_anti_spoofing_X550, \
3834*4882a593Smuzhiyun .disable_rx_buff = &ixgbe_disable_rx_buff_generic, \
3835*4882a593Smuzhiyun .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
3836*4882a593Smuzhiyun .get_thermal_sensor_data = NULL, \
3837*4882a593Smuzhiyun .init_thermal_sensor_thresh = NULL, \
3838*4882a593Smuzhiyun .fw_recovery_mode = &ixgbe_fw_recovery_mode_X550, \
3839*4882a593Smuzhiyun .enable_rx = &ixgbe_enable_rx_generic, \
3840*4882a593Smuzhiyun .disable_rx = &ixgbe_disable_rx_x550, \
3841*4882a593Smuzhiyun
3842*4882a593Smuzhiyun static const struct ixgbe_mac_operations mac_ops_X550 = {
3843*4882a593Smuzhiyun X550_COMMON_MAC
3844*4882a593Smuzhiyun .led_on = ixgbe_led_on_generic,
3845*4882a593Smuzhiyun .led_off = ixgbe_led_off_generic,
3846*4882a593Smuzhiyun .init_led_link_act = ixgbe_init_led_link_act_generic,
3847*4882a593Smuzhiyun .reset_hw = &ixgbe_reset_hw_X540,
3848*4882a593Smuzhiyun .get_media_type = &ixgbe_get_media_type_X540,
3849*4882a593Smuzhiyun .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
3850*4882a593Smuzhiyun .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
3851*4882a593Smuzhiyun .setup_link = &ixgbe_setup_mac_link_X540,
3852*4882a593Smuzhiyun .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
3853*4882a593Smuzhiyun .get_bus_info = &ixgbe_get_bus_info_generic,
3854*4882a593Smuzhiyun .setup_sfp = NULL,
3855*4882a593Smuzhiyun .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
3856*4882a593Smuzhiyun .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
3857*4882a593Smuzhiyun .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
3858*4882a593Smuzhiyun .prot_autoc_read = prot_autoc_read_generic,
3859*4882a593Smuzhiyun .prot_autoc_write = prot_autoc_write_generic,
3860*4882a593Smuzhiyun .setup_fc = ixgbe_setup_fc_generic,
3861*4882a593Smuzhiyun .fc_autoneg = ixgbe_fc_autoneg,
3862*4882a593Smuzhiyun };
3863*4882a593Smuzhiyun
3864*4882a593Smuzhiyun static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
3865*4882a593Smuzhiyun X550_COMMON_MAC
3866*4882a593Smuzhiyun .led_on = ixgbe_led_on_t_x550em,
3867*4882a593Smuzhiyun .led_off = ixgbe_led_off_t_x550em,
3868*4882a593Smuzhiyun .init_led_link_act = ixgbe_init_led_link_act_generic,
3869*4882a593Smuzhiyun .reset_hw = &ixgbe_reset_hw_X550em,
3870*4882a593Smuzhiyun .get_media_type = &ixgbe_get_media_type_X550em,
3871*4882a593Smuzhiyun .get_san_mac_addr = NULL,
3872*4882a593Smuzhiyun .get_wwn_prefix = NULL,
3873*4882a593Smuzhiyun .setup_link = &ixgbe_setup_mac_link_X540,
3874*4882a593Smuzhiyun .get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
3875*4882a593Smuzhiyun .get_bus_info = &ixgbe_get_bus_info_X550em,
3876*4882a593Smuzhiyun .setup_sfp = ixgbe_setup_sfp_modules_X550em,
3877*4882a593Smuzhiyun .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
3878*4882a593Smuzhiyun .release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
3879*4882a593Smuzhiyun .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
3880*4882a593Smuzhiyun .setup_fc = NULL, /* defined later */
3881*4882a593Smuzhiyun .fc_autoneg = ixgbe_fc_autoneg,
3882*4882a593Smuzhiyun .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
3883*4882a593Smuzhiyun .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
3884*4882a593Smuzhiyun };
3885*4882a593Smuzhiyun
3886*4882a593Smuzhiyun static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = {
3887*4882a593Smuzhiyun X550_COMMON_MAC
3888*4882a593Smuzhiyun .led_on = NULL,
3889*4882a593Smuzhiyun .led_off = NULL,
3890*4882a593Smuzhiyun .init_led_link_act = NULL,
3891*4882a593Smuzhiyun .reset_hw = &ixgbe_reset_hw_X550em,
3892*4882a593Smuzhiyun .get_media_type = &ixgbe_get_media_type_X550em,
3893*4882a593Smuzhiyun .get_san_mac_addr = NULL,
3894*4882a593Smuzhiyun .get_wwn_prefix = NULL,
3895*4882a593Smuzhiyun .setup_link = &ixgbe_setup_mac_link_X540,
3896*4882a593Smuzhiyun .get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
3897*4882a593Smuzhiyun .get_bus_info = &ixgbe_get_bus_info_X550em,
3898*4882a593Smuzhiyun .setup_sfp = ixgbe_setup_sfp_modules_X550em,
3899*4882a593Smuzhiyun .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
3900*4882a593Smuzhiyun .release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
3901*4882a593Smuzhiyun .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
3902*4882a593Smuzhiyun .setup_fc = NULL,
3903*4882a593Smuzhiyun .fc_autoneg = ixgbe_fc_autoneg,
3904*4882a593Smuzhiyun .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
3905*4882a593Smuzhiyun .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
3906*4882a593Smuzhiyun };
3907*4882a593Smuzhiyun
3908*4882a593Smuzhiyun static const struct ixgbe_mac_operations mac_ops_x550em_a = {
3909*4882a593Smuzhiyun X550_COMMON_MAC
3910*4882a593Smuzhiyun .led_on = ixgbe_led_on_t_x550em,
3911*4882a593Smuzhiyun .led_off = ixgbe_led_off_t_x550em,
3912*4882a593Smuzhiyun .init_led_link_act = ixgbe_init_led_link_act_generic,
3913*4882a593Smuzhiyun .reset_hw = ixgbe_reset_hw_X550em,
3914*4882a593Smuzhiyun .get_media_type = ixgbe_get_media_type_X550em,
3915*4882a593Smuzhiyun .get_san_mac_addr = NULL,
3916*4882a593Smuzhiyun .get_wwn_prefix = NULL,
3917*4882a593Smuzhiyun .setup_link = &ixgbe_setup_mac_link_X540,
3918*4882a593Smuzhiyun .get_link_capabilities = ixgbe_get_link_capabilities_X550em,
3919*4882a593Smuzhiyun .get_bus_info = ixgbe_get_bus_info_X550em,
3920*4882a593Smuzhiyun .setup_sfp = ixgbe_setup_sfp_modules_X550em,
3921*4882a593Smuzhiyun .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
3922*4882a593Smuzhiyun .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
3923*4882a593Smuzhiyun .setup_fc = ixgbe_setup_fc_x550em,
3924*4882a593Smuzhiyun .fc_autoneg = ixgbe_fc_autoneg,
3925*4882a593Smuzhiyun .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
3926*4882a593Smuzhiyun .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
3927*4882a593Smuzhiyun };
3928*4882a593Smuzhiyun
3929*4882a593Smuzhiyun static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
3930*4882a593Smuzhiyun X550_COMMON_MAC
3931*4882a593Smuzhiyun .led_on = ixgbe_led_on_generic,
3932*4882a593Smuzhiyun .led_off = ixgbe_led_off_generic,
3933*4882a593Smuzhiyun .init_led_link_act = ixgbe_init_led_link_act_generic,
3934*4882a593Smuzhiyun .reset_hw = ixgbe_reset_hw_X550em,
3935*4882a593Smuzhiyun .get_media_type = ixgbe_get_media_type_X550em,
3936*4882a593Smuzhiyun .get_san_mac_addr = NULL,
3937*4882a593Smuzhiyun .get_wwn_prefix = NULL,
3938*4882a593Smuzhiyun .setup_link = NULL, /* defined later */
3939*4882a593Smuzhiyun .get_link_capabilities = ixgbe_get_link_capabilities_X550em,
3940*4882a593Smuzhiyun .get_bus_info = ixgbe_get_bus_info_X550em,
3941*4882a593Smuzhiyun .setup_sfp = ixgbe_setup_sfp_modules_X550em,
3942*4882a593Smuzhiyun .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
3943*4882a593Smuzhiyun .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
3944*4882a593Smuzhiyun .setup_fc = ixgbe_setup_fc_x550em,
3945*4882a593Smuzhiyun .fc_autoneg = ixgbe_fc_autoneg,
3946*4882a593Smuzhiyun .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
3947*4882a593Smuzhiyun .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
3948*4882a593Smuzhiyun };
3949*4882a593Smuzhiyun
3950*4882a593Smuzhiyun #define X550_COMMON_EEP \
3951*4882a593Smuzhiyun .read = &ixgbe_read_ee_hostif_X550, \
3952*4882a593Smuzhiyun .read_buffer = &ixgbe_read_ee_hostif_buffer_X550, \
3953*4882a593Smuzhiyun .write = &ixgbe_write_ee_hostif_X550, \
3954*4882a593Smuzhiyun .write_buffer = &ixgbe_write_ee_hostif_buffer_X550, \
3955*4882a593Smuzhiyun .validate_checksum = &ixgbe_validate_eeprom_checksum_X550, \
3956*4882a593Smuzhiyun .update_checksum = &ixgbe_update_eeprom_checksum_X550, \
3957*4882a593Smuzhiyun .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
3958*4882a593Smuzhiyun
3959*4882a593Smuzhiyun static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
3960*4882a593Smuzhiyun X550_COMMON_EEP
3961*4882a593Smuzhiyun .init_params = &ixgbe_init_eeprom_params_X550,
3962*4882a593Smuzhiyun };
3963*4882a593Smuzhiyun
3964*4882a593Smuzhiyun static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
3965*4882a593Smuzhiyun X550_COMMON_EEP
3966*4882a593Smuzhiyun .init_params = &ixgbe_init_eeprom_params_X540,
3967*4882a593Smuzhiyun };
3968*4882a593Smuzhiyun
3969*4882a593Smuzhiyun #define X550_COMMON_PHY \
3970*4882a593Smuzhiyun .identify_sfp = &ixgbe_identify_module_generic, \
3971*4882a593Smuzhiyun .reset = NULL, \
3972*4882a593Smuzhiyun .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, \
3973*4882a593Smuzhiyun .read_i2c_byte = &ixgbe_read_i2c_byte_generic, \
3974*4882a593Smuzhiyun .write_i2c_byte = &ixgbe_write_i2c_byte_generic, \
3975*4882a593Smuzhiyun .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
3976*4882a593Smuzhiyun .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
3977*4882a593Smuzhiyun .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
3978*4882a593Smuzhiyun .setup_link = &ixgbe_setup_phy_link_generic, \
3979*4882a593Smuzhiyun .set_phy_power = NULL,
3980*4882a593Smuzhiyun
3981*4882a593Smuzhiyun static const struct ixgbe_phy_operations phy_ops_X550 = {
3982*4882a593Smuzhiyun X550_COMMON_PHY
3983*4882a593Smuzhiyun .check_overtemp = &ixgbe_tn_check_overtemp,
3984*4882a593Smuzhiyun .init = NULL,
3985*4882a593Smuzhiyun .identify = &ixgbe_identify_phy_generic,
3986*4882a593Smuzhiyun .read_reg = &ixgbe_read_phy_reg_generic,
3987*4882a593Smuzhiyun .write_reg = &ixgbe_write_phy_reg_generic,
3988*4882a593Smuzhiyun };
3989*4882a593Smuzhiyun
3990*4882a593Smuzhiyun static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
3991*4882a593Smuzhiyun X550_COMMON_PHY
3992*4882a593Smuzhiyun .check_overtemp = &ixgbe_tn_check_overtemp,
3993*4882a593Smuzhiyun .init = &ixgbe_init_phy_ops_X550em,
3994*4882a593Smuzhiyun .identify = &ixgbe_identify_phy_x550em,
3995*4882a593Smuzhiyun .read_reg = &ixgbe_read_phy_reg_generic,
3996*4882a593Smuzhiyun .write_reg = &ixgbe_write_phy_reg_generic,
3997*4882a593Smuzhiyun };
3998*4882a593Smuzhiyun
3999*4882a593Smuzhiyun static const struct ixgbe_phy_operations phy_ops_x550em_x_fw = {
4000*4882a593Smuzhiyun X550_COMMON_PHY
4001*4882a593Smuzhiyun .check_overtemp = NULL,
4002*4882a593Smuzhiyun .init = ixgbe_init_phy_ops_X550em,
4003*4882a593Smuzhiyun .identify = ixgbe_identify_phy_x550em,
4004*4882a593Smuzhiyun .read_reg = NULL,
4005*4882a593Smuzhiyun .write_reg = NULL,
4006*4882a593Smuzhiyun .read_reg_mdi = NULL,
4007*4882a593Smuzhiyun .write_reg_mdi = NULL,
4008*4882a593Smuzhiyun };
4009*4882a593Smuzhiyun
4010*4882a593Smuzhiyun static const struct ixgbe_phy_operations phy_ops_x550em_a = {
4011*4882a593Smuzhiyun X550_COMMON_PHY
4012*4882a593Smuzhiyun .check_overtemp = &ixgbe_tn_check_overtemp,
4013*4882a593Smuzhiyun .init = &ixgbe_init_phy_ops_X550em,
4014*4882a593Smuzhiyun .identify = &ixgbe_identify_phy_x550em,
4015*4882a593Smuzhiyun .read_reg = &ixgbe_read_phy_reg_x550a,
4016*4882a593Smuzhiyun .write_reg = &ixgbe_write_phy_reg_x550a,
4017*4882a593Smuzhiyun .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
4018*4882a593Smuzhiyun .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
4019*4882a593Smuzhiyun };
4020*4882a593Smuzhiyun
4021*4882a593Smuzhiyun static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = {
4022*4882a593Smuzhiyun X550_COMMON_PHY
4023*4882a593Smuzhiyun .check_overtemp = ixgbe_check_overtemp_fw,
4024*4882a593Smuzhiyun .init = ixgbe_init_phy_ops_X550em,
4025*4882a593Smuzhiyun .identify = ixgbe_identify_phy_fw,
4026*4882a593Smuzhiyun .read_reg = NULL,
4027*4882a593Smuzhiyun .write_reg = NULL,
4028*4882a593Smuzhiyun .read_reg_mdi = NULL,
4029*4882a593Smuzhiyun .write_reg_mdi = NULL,
4030*4882a593Smuzhiyun };
4031*4882a593Smuzhiyun
4032*4882a593Smuzhiyun static const struct ixgbe_link_operations link_ops_x550em_x = {
4033*4882a593Smuzhiyun .read_link = &ixgbe_read_i2c_combined_generic,
4034*4882a593Smuzhiyun .read_link_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
4035*4882a593Smuzhiyun .write_link = &ixgbe_write_i2c_combined_generic,
4036*4882a593Smuzhiyun .write_link_unlocked = &ixgbe_write_i2c_combined_generic_unlocked,
4037*4882a593Smuzhiyun };
4038*4882a593Smuzhiyun
4039*4882a593Smuzhiyun static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
4040*4882a593Smuzhiyun IXGBE_MVALS_INIT(X550)
4041*4882a593Smuzhiyun };
4042*4882a593Smuzhiyun
4043*4882a593Smuzhiyun static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
4044*4882a593Smuzhiyun IXGBE_MVALS_INIT(X550EM_x)
4045*4882a593Smuzhiyun };
4046*4882a593Smuzhiyun
4047*4882a593Smuzhiyun static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
4048*4882a593Smuzhiyun IXGBE_MVALS_INIT(X550EM_a)
4049*4882a593Smuzhiyun };
4050*4882a593Smuzhiyun
4051*4882a593Smuzhiyun const struct ixgbe_info ixgbe_X550_info = {
4052*4882a593Smuzhiyun .mac = ixgbe_mac_X550,
4053*4882a593Smuzhiyun .get_invariants = &ixgbe_get_invariants_X540,
4054*4882a593Smuzhiyun .mac_ops = &mac_ops_X550,
4055*4882a593Smuzhiyun .eeprom_ops = &eeprom_ops_X550,
4056*4882a593Smuzhiyun .phy_ops = &phy_ops_X550,
4057*4882a593Smuzhiyun .mbx_ops = &mbx_ops_generic,
4058*4882a593Smuzhiyun .mvals = ixgbe_mvals_X550,
4059*4882a593Smuzhiyun };
4060*4882a593Smuzhiyun
4061*4882a593Smuzhiyun const struct ixgbe_info ixgbe_X550EM_x_info = {
4062*4882a593Smuzhiyun .mac = ixgbe_mac_X550EM_x,
4063*4882a593Smuzhiyun .get_invariants = &ixgbe_get_invariants_X550_x,
4064*4882a593Smuzhiyun .mac_ops = &mac_ops_X550EM_x,
4065*4882a593Smuzhiyun .eeprom_ops = &eeprom_ops_X550EM_x,
4066*4882a593Smuzhiyun .phy_ops = &phy_ops_X550EM_x,
4067*4882a593Smuzhiyun .mbx_ops = &mbx_ops_generic,
4068*4882a593Smuzhiyun .mvals = ixgbe_mvals_X550EM_x,
4069*4882a593Smuzhiyun .link_ops = &link_ops_x550em_x,
4070*4882a593Smuzhiyun };
4071*4882a593Smuzhiyun
4072*4882a593Smuzhiyun const struct ixgbe_info ixgbe_x550em_x_fw_info = {
4073*4882a593Smuzhiyun .mac = ixgbe_mac_X550EM_x,
4074*4882a593Smuzhiyun .get_invariants = ixgbe_get_invariants_X550_x_fw,
4075*4882a593Smuzhiyun .mac_ops = &mac_ops_X550EM_x_fw,
4076*4882a593Smuzhiyun .eeprom_ops = &eeprom_ops_X550EM_x,
4077*4882a593Smuzhiyun .phy_ops = &phy_ops_x550em_x_fw,
4078*4882a593Smuzhiyun .mbx_ops = &mbx_ops_generic,
4079*4882a593Smuzhiyun .mvals = ixgbe_mvals_X550EM_x,
4080*4882a593Smuzhiyun };
4081*4882a593Smuzhiyun
4082*4882a593Smuzhiyun const struct ixgbe_info ixgbe_x550em_a_info = {
4083*4882a593Smuzhiyun .mac = ixgbe_mac_x550em_a,
4084*4882a593Smuzhiyun .get_invariants = &ixgbe_get_invariants_X550_a,
4085*4882a593Smuzhiyun .mac_ops = &mac_ops_x550em_a,
4086*4882a593Smuzhiyun .eeprom_ops = &eeprom_ops_X550EM_x,
4087*4882a593Smuzhiyun .phy_ops = &phy_ops_x550em_a,
4088*4882a593Smuzhiyun .mbx_ops = &mbx_ops_generic,
4089*4882a593Smuzhiyun .mvals = ixgbe_mvals_x550em_a,
4090*4882a593Smuzhiyun };
4091*4882a593Smuzhiyun
4092*4882a593Smuzhiyun const struct ixgbe_info ixgbe_x550em_a_fw_info = {
4093*4882a593Smuzhiyun .mac = ixgbe_mac_x550em_a,
4094*4882a593Smuzhiyun .get_invariants = ixgbe_get_invariants_X550_a_fw,
4095*4882a593Smuzhiyun .mac_ops = &mac_ops_x550em_a_fw,
4096*4882a593Smuzhiyun .eeprom_ops = &eeprom_ops_X550EM_x,
4097*4882a593Smuzhiyun .phy_ops = &phy_ops_x550em_a_fw,
4098*4882a593Smuzhiyun .mbx_ops = &mbx_ops_generic,
4099*4882a593Smuzhiyun .mvals = ixgbe_mvals_x550em_a,
4100*4882a593Smuzhiyun };
4101