xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 1999 - 2018 Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/pci.h>
5*4882a593Smuzhiyun #include <linux/delay.h>
6*4882a593Smuzhiyun #include <linux/sched.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "ixgbe.h"
9*4882a593Smuzhiyun #include "ixgbe_phy.h"
10*4882a593Smuzhiyun #include "ixgbe_x540.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #define IXGBE_X540_MAX_TX_QUEUES	128
13*4882a593Smuzhiyun #define IXGBE_X540_MAX_RX_QUEUES	128
14*4882a593Smuzhiyun #define IXGBE_X540_RAR_ENTRIES		128
15*4882a593Smuzhiyun #define IXGBE_X540_MC_TBL_SIZE		128
16*4882a593Smuzhiyun #define IXGBE_X540_VFT_TBL_SIZE		128
17*4882a593Smuzhiyun #define IXGBE_X540_RX_PB_SIZE		384
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
20*4882a593Smuzhiyun static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
21*4882a593Smuzhiyun static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
22*4882a593Smuzhiyun static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
23*4882a593Smuzhiyun 
ixgbe_get_media_type_X540(struct ixgbe_hw * hw)24*4882a593Smuzhiyun enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	return ixgbe_media_type_copper;
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun 
ixgbe_get_invariants_X540(struct ixgbe_hw * hw)29*4882a593Smuzhiyun s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	struct ixgbe_mac_info *mac = &hw->mac;
32*4882a593Smuzhiyun 	struct ixgbe_phy_info *phy = &hw->phy;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	/* set_phy_power was set by default to NULL */
35*4882a593Smuzhiyun 	phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
38*4882a593Smuzhiyun 	mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
39*4882a593Smuzhiyun 	mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
40*4882a593Smuzhiyun 	mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
41*4882a593Smuzhiyun 	mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
42*4882a593Smuzhiyun 	mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
43*4882a593Smuzhiyun 	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	return 0;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /**
49*4882a593Smuzhiyun  *  ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
50*4882a593Smuzhiyun  *  @hw: pointer to hardware structure
51*4882a593Smuzhiyun  *  @speed: new link speed
52*4882a593Smuzhiyun  *  @autoneg_wait_to_complete: true when waiting for completion is needed
53*4882a593Smuzhiyun  **/
ixgbe_setup_mac_link_X540(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait_to_complete)54*4882a593Smuzhiyun s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
55*4882a593Smuzhiyun 			      bool autoneg_wait_to_complete)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	return hw->phy.ops.setup_link_speed(hw, speed,
58*4882a593Smuzhiyun 					    autoneg_wait_to_complete);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /**
62*4882a593Smuzhiyun  *  ixgbe_reset_hw_X540 - Perform hardware reset
63*4882a593Smuzhiyun  *  @hw: pointer to hardware structure
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  *  Resets the hardware by resetting the transmit and receive units, masks
66*4882a593Smuzhiyun  *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
67*4882a593Smuzhiyun  *  reset.
68*4882a593Smuzhiyun  **/
ixgbe_reset_hw_X540(struct ixgbe_hw * hw)69*4882a593Smuzhiyun s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	s32 status;
72*4882a593Smuzhiyun 	u32 ctrl, i;
73*4882a593Smuzhiyun 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/* Call adapter stop to disable tx/rx and clear interrupts */
76*4882a593Smuzhiyun 	status = hw->mac.ops.stop_adapter(hw);
77*4882a593Smuzhiyun 	if (status)
78*4882a593Smuzhiyun 		return status;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/* flush pending Tx transactions */
81*4882a593Smuzhiyun 	ixgbe_clear_tx_pending(hw);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun mac_reset_top:
84*4882a593Smuzhiyun 	status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
85*4882a593Smuzhiyun 	if (status) {
86*4882a593Smuzhiyun 		hw_dbg(hw, "semaphore failed with %d", status);
87*4882a593Smuzhiyun 		return IXGBE_ERR_SWFW_SYNC;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	ctrl = IXGBE_CTRL_RST;
91*4882a593Smuzhiyun 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
92*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
93*4882a593Smuzhiyun 	IXGBE_WRITE_FLUSH(hw);
94*4882a593Smuzhiyun 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
95*4882a593Smuzhiyun 	usleep_range(1000, 1200);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	/* Poll for reset bit to self-clear indicating reset is complete */
98*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
99*4882a593Smuzhiyun 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
100*4882a593Smuzhiyun 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
101*4882a593Smuzhiyun 			break;
102*4882a593Smuzhiyun 		udelay(1);
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (ctrl & IXGBE_CTRL_RST_MASK) {
106*4882a593Smuzhiyun 		status = IXGBE_ERR_RESET_FAILED;
107*4882a593Smuzhiyun 		hw_dbg(hw, "Reset polling failed to complete.\n");
108*4882a593Smuzhiyun 	}
109*4882a593Smuzhiyun 	msleep(100);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	/*
112*4882a593Smuzhiyun 	 * Double resets are required for recovery from certain error
113*4882a593Smuzhiyun 	 * conditions.  Between resets, it is necessary to stall to allow time
114*4882a593Smuzhiyun 	 * for any pending HW events to complete.
115*4882a593Smuzhiyun 	 */
116*4882a593Smuzhiyun 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
117*4882a593Smuzhiyun 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
118*4882a593Smuzhiyun 		goto mac_reset_top;
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* Set the Rx packet buffer size. */
122*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/* Store the permanent mac address */
125*4882a593Smuzhiyun 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/*
128*4882a593Smuzhiyun 	 * Store MAC address from RAR0, clear receive address registers, and
129*4882a593Smuzhiyun 	 * clear the multicast table.  Also reset num_rar_entries to 128,
130*4882a593Smuzhiyun 	 * since we modify this value when programming the SAN MAC address.
131*4882a593Smuzhiyun 	 */
132*4882a593Smuzhiyun 	hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES;
133*4882a593Smuzhiyun 	hw->mac.ops.init_rx_addrs(hw);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* Store the permanent SAN mac address */
136*4882a593Smuzhiyun 	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/* Add the SAN MAC address to the RAR only if it's a valid address */
139*4882a593Smuzhiyun 	if (is_valid_ether_addr(hw->mac.san_addr)) {
140*4882a593Smuzhiyun 		/* Save the SAN MAC RAR index */
141*4882a593Smuzhiyun 		hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
144*4882a593Smuzhiyun 				    hw->mac.san_addr, 0, IXGBE_RAH_AV);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 		/* clear VMDq pool/queue selection for this RAR */
147*4882a593Smuzhiyun 		hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
148*4882a593Smuzhiyun 				       IXGBE_CLEAR_VMDQ_ALL);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		/* Reserve the last RAR for the SAN MAC address */
151*4882a593Smuzhiyun 		hw->mac.num_rar_entries--;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* Store the alternative WWNN/WWPN prefix */
155*4882a593Smuzhiyun 	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
156*4882a593Smuzhiyun 				   &hw->mac.wwpn_prefix);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	return status;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /**
162*4882a593Smuzhiyun  *  ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
163*4882a593Smuzhiyun  *  @hw: pointer to hardware structure
164*4882a593Smuzhiyun  *
165*4882a593Smuzhiyun  *  Starts the hardware using the generic start_hw function
166*4882a593Smuzhiyun  *  and the generation start_hw function.
167*4882a593Smuzhiyun  *  Then performs revision-specific operations, if any.
168*4882a593Smuzhiyun  **/
ixgbe_start_hw_X540(struct ixgbe_hw * hw)169*4882a593Smuzhiyun s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	s32 ret_val;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	ret_val = ixgbe_start_hw_generic(hw);
174*4882a593Smuzhiyun 	if (ret_val)
175*4882a593Smuzhiyun 		return ret_val;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	return ixgbe_start_hw_gen2(hw);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun /**
181*4882a593Smuzhiyun  *  ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
182*4882a593Smuzhiyun  *  @hw: pointer to hardware structure
183*4882a593Smuzhiyun  *
184*4882a593Smuzhiyun  *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
185*4882a593Smuzhiyun  *  ixgbe_hw struct in order to set up EEPROM access.
186*4882a593Smuzhiyun  **/
ixgbe_init_eeprom_params_X540(struct ixgbe_hw * hw)187*4882a593Smuzhiyun s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
190*4882a593Smuzhiyun 	u32 eec;
191*4882a593Smuzhiyun 	u16 eeprom_size;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (eeprom->type == ixgbe_eeprom_uninitialized) {
194*4882a593Smuzhiyun 		eeprom->semaphore_delay = 10;
195*4882a593Smuzhiyun 		eeprom->type = ixgbe_flash;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
198*4882a593Smuzhiyun 		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
199*4882a593Smuzhiyun 				    IXGBE_EEC_SIZE_SHIFT);
200*4882a593Smuzhiyun 		eeprom->word_size = BIT(eeprom_size +
201*4882a593Smuzhiyun 					IXGBE_EEPROM_WORD_SIZE_SHIFT);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
204*4882a593Smuzhiyun 		       eeprom->type, eeprom->word_size);
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	return 0;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /**
211*4882a593Smuzhiyun  *  ixgbe_read_eerd_X540- Read EEPROM word using EERD
212*4882a593Smuzhiyun  *  @hw: pointer to hardware structure
213*4882a593Smuzhiyun  *  @offset: offset of  word in the EEPROM to read
214*4882a593Smuzhiyun  *  @data: word read from the EEPROM
215*4882a593Smuzhiyun  *
216*4882a593Smuzhiyun  *  Reads a 16 bit word from the EEPROM using the EERD register.
217*4882a593Smuzhiyun  **/
ixgbe_read_eerd_X540(struct ixgbe_hw * hw,u16 offset,u16 * data)218*4882a593Smuzhiyun static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	s32 status;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
223*4882a593Smuzhiyun 		return IXGBE_ERR_SWFW_SYNC;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	status = ixgbe_read_eerd_generic(hw, offset, data);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
228*4882a593Smuzhiyun 	return status;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun /**
232*4882a593Smuzhiyun  *  ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD
233*4882a593Smuzhiyun  *  @hw: pointer to hardware structure
234*4882a593Smuzhiyun  *  @offset: offset of  word in the EEPROM to read
235*4882a593Smuzhiyun  *  @words: number of words
236*4882a593Smuzhiyun  *  @data: word(s) read from the EEPROM
237*4882a593Smuzhiyun  *
238*4882a593Smuzhiyun  *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
239*4882a593Smuzhiyun  **/
ixgbe_read_eerd_buffer_X540(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)240*4882a593Smuzhiyun static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
241*4882a593Smuzhiyun 				       u16 offset, u16 words, u16 *data)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	s32 status;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
246*4882a593Smuzhiyun 		return IXGBE_ERR_SWFW_SYNC;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
251*4882a593Smuzhiyun 	return status;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun  *  ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
256*4882a593Smuzhiyun  *  @hw: pointer to hardware structure
257*4882a593Smuzhiyun  *  @offset: offset of  word in the EEPROM to write
258*4882a593Smuzhiyun  *  @data: word write to the EEPROM
259*4882a593Smuzhiyun  *
260*4882a593Smuzhiyun  *  Write a 16 bit word to the EEPROM using the EEWR register.
261*4882a593Smuzhiyun  **/
ixgbe_write_eewr_X540(struct ixgbe_hw * hw,u16 offset,u16 data)262*4882a593Smuzhiyun static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	s32 status;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
267*4882a593Smuzhiyun 		return IXGBE_ERR_SWFW_SYNC;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	status = ixgbe_write_eewr_generic(hw, offset, data);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
272*4882a593Smuzhiyun 	return status;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun  *  ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
277*4882a593Smuzhiyun  *  @hw: pointer to hardware structure
278*4882a593Smuzhiyun  *  @offset: offset of  word in the EEPROM to write
279*4882a593Smuzhiyun  *  @words: number of words
280*4882a593Smuzhiyun  *  @data: word(s) write to the EEPROM
281*4882a593Smuzhiyun  *
282*4882a593Smuzhiyun  *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
283*4882a593Smuzhiyun  **/
ixgbe_write_eewr_buffer_X540(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)284*4882a593Smuzhiyun static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
285*4882a593Smuzhiyun 					u16 offset, u16 words, u16 *data)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	s32 status;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
290*4882a593Smuzhiyun 		return IXGBE_ERR_SWFW_SYNC;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
295*4882a593Smuzhiyun 	return status;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun /**
299*4882a593Smuzhiyun  *  ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
300*4882a593Smuzhiyun  *
301*4882a593Smuzhiyun  *  This function does not use synchronization for EERD and EEWR. It can
302*4882a593Smuzhiyun  *  be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
303*4882a593Smuzhiyun  *
304*4882a593Smuzhiyun  *  @hw: pointer to hardware structure
305*4882a593Smuzhiyun  **/
ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw * hw)306*4882a593Smuzhiyun static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	u16 i;
309*4882a593Smuzhiyun 	u16 j;
310*4882a593Smuzhiyun 	u16 checksum = 0;
311*4882a593Smuzhiyun 	u16 length = 0;
312*4882a593Smuzhiyun 	u16 pointer = 0;
313*4882a593Smuzhiyun 	u16 word = 0;
314*4882a593Smuzhiyun 	u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
315*4882a593Smuzhiyun 	u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/*
318*4882a593Smuzhiyun 	 * Do not use hw->eeprom.ops.read because we do not want to take
319*4882a593Smuzhiyun 	 * the synchronization semaphores here. Instead use
320*4882a593Smuzhiyun 	 * ixgbe_read_eerd_generic
321*4882a593Smuzhiyun 	 */
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/* Include 0x0-0x3F in the checksum */
324*4882a593Smuzhiyun 	for (i = 0; i < checksum_last_word; i++) {
325*4882a593Smuzhiyun 		if (ixgbe_read_eerd_generic(hw, i, &word)) {
326*4882a593Smuzhiyun 			hw_dbg(hw, "EEPROM read failed\n");
327*4882a593Smuzhiyun 			return IXGBE_ERR_EEPROM;
328*4882a593Smuzhiyun 		}
329*4882a593Smuzhiyun 		checksum += word;
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/*
333*4882a593Smuzhiyun 	 * Include all data from pointers 0x3, 0x6-0xE.  This excludes the
334*4882a593Smuzhiyun 	 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
335*4882a593Smuzhiyun 	 */
336*4882a593Smuzhiyun 	for (i = ptr_start; i < IXGBE_FW_PTR; i++) {
337*4882a593Smuzhiyun 		if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
338*4882a593Smuzhiyun 			continue;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 		if (ixgbe_read_eerd_generic(hw, i, &pointer)) {
341*4882a593Smuzhiyun 			hw_dbg(hw, "EEPROM read failed\n");
342*4882a593Smuzhiyun 			break;
343*4882a593Smuzhiyun 		}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 		/* Skip pointer section if the pointer is invalid. */
346*4882a593Smuzhiyun 		if (pointer == 0xFFFF || pointer == 0 ||
347*4882a593Smuzhiyun 		    pointer >= hw->eeprom.word_size)
348*4882a593Smuzhiyun 			continue;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 		if (ixgbe_read_eerd_generic(hw, pointer, &length)) {
351*4882a593Smuzhiyun 			hw_dbg(hw, "EEPROM read failed\n");
352*4882a593Smuzhiyun 			return IXGBE_ERR_EEPROM;
353*4882a593Smuzhiyun 		}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 		/* Skip pointer section if length is invalid. */
356*4882a593Smuzhiyun 		if (length == 0xFFFF || length == 0 ||
357*4882a593Smuzhiyun 		    (pointer + length) >= hw->eeprom.word_size)
358*4882a593Smuzhiyun 			continue;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 		for (j = pointer + 1; j <= pointer + length; j++) {
361*4882a593Smuzhiyun 			if (ixgbe_read_eerd_generic(hw, j, &word)) {
362*4882a593Smuzhiyun 				hw_dbg(hw, "EEPROM read failed\n");
363*4882a593Smuzhiyun 				return IXGBE_ERR_EEPROM;
364*4882a593Smuzhiyun 			}
365*4882a593Smuzhiyun 			checksum += word;
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	return (s32)checksum;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun  *  ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
376*4882a593Smuzhiyun  *  @hw: pointer to hardware structure
377*4882a593Smuzhiyun  *  @checksum_val: calculated checksum
378*4882a593Smuzhiyun  *
379*4882a593Smuzhiyun  *  Performs checksum calculation and validates the EEPROM checksum.  If the
380*4882a593Smuzhiyun  *  caller does not need checksum_val, the value can be NULL.
381*4882a593Smuzhiyun  **/
ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw * hw,u16 * checksum_val)382*4882a593Smuzhiyun static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
383*4882a593Smuzhiyun 					       u16 *checksum_val)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	s32 status;
386*4882a593Smuzhiyun 	u16 checksum;
387*4882a593Smuzhiyun 	u16 read_checksum = 0;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/* Read the first word from the EEPROM. If this times out or fails, do
390*4882a593Smuzhiyun 	 * not continue or we could be in for a very long wait while every
391*4882a593Smuzhiyun 	 * EEPROM read fails
392*4882a593Smuzhiyun 	 */
393*4882a593Smuzhiyun 	status = hw->eeprom.ops.read(hw, 0, &checksum);
394*4882a593Smuzhiyun 	if (status) {
395*4882a593Smuzhiyun 		hw_dbg(hw, "EEPROM read failed\n");
396*4882a593Smuzhiyun 		return status;
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
400*4882a593Smuzhiyun 		return IXGBE_ERR_SWFW_SYNC;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	status = hw->eeprom.ops.calc_checksum(hw);
403*4882a593Smuzhiyun 	if (status < 0)
404*4882a593Smuzhiyun 		goto out;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	checksum = (u16)(status & 0xffff);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/* Do not use hw->eeprom.ops.read because we do not want to take
409*4882a593Smuzhiyun 	 * the synchronization semaphores twice here.
410*4882a593Smuzhiyun 	 */
411*4882a593Smuzhiyun 	status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
412*4882a593Smuzhiyun 					 &read_checksum);
413*4882a593Smuzhiyun 	if (status)
414*4882a593Smuzhiyun 		goto out;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/* Verify read checksum from EEPROM is the same as
417*4882a593Smuzhiyun 	 * calculated checksum
418*4882a593Smuzhiyun 	 */
419*4882a593Smuzhiyun 	if (read_checksum != checksum) {
420*4882a593Smuzhiyun 		hw_dbg(hw, "Invalid EEPROM checksum");
421*4882a593Smuzhiyun 		status = IXGBE_ERR_EEPROM_CHECKSUM;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	/* If the user cares, return the calculated checksum */
425*4882a593Smuzhiyun 	if (checksum_val)
426*4882a593Smuzhiyun 		*checksum_val = checksum;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun out:
429*4882a593Smuzhiyun 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	return status;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /**
435*4882a593Smuzhiyun  * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
436*4882a593Smuzhiyun  * @hw: pointer to hardware structure
437*4882a593Smuzhiyun  *
438*4882a593Smuzhiyun  * After writing EEPROM to shadow RAM using EEWR register, software calculates
439*4882a593Smuzhiyun  * checksum and updates the EEPROM and instructs the hardware to update
440*4882a593Smuzhiyun  * the flash.
441*4882a593Smuzhiyun  **/
ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw * hw)442*4882a593Smuzhiyun static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	s32 status;
445*4882a593Smuzhiyun 	u16 checksum;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	/* Read the first word from the EEPROM. If this times out or fails, do
448*4882a593Smuzhiyun 	 * not continue or we could be in for a very long wait while every
449*4882a593Smuzhiyun 	 * EEPROM read fails
450*4882a593Smuzhiyun 	 */
451*4882a593Smuzhiyun 	status = hw->eeprom.ops.read(hw, 0, &checksum);
452*4882a593Smuzhiyun 	if (status) {
453*4882a593Smuzhiyun 		hw_dbg(hw, "EEPROM read failed\n");
454*4882a593Smuzhiyun 		return status;
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
458*4882a593Smuzhiyun 		return  IXGBE_ERR_SWFW_SYNC;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	status = hw->eeprom.ops.calc_checksum(hw);
461*4882a593Smuzhiyun 	if (status < 0)
462*4882a593Smuzhiyun 		goto out;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	checksum = (u16)(status & 0xffff);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* Do not use hw->eeprom.ops.write because we do not want to
467*4882a593Smuzhiyun 	 * take the synchronization semaphores twice here.
468*4882a593Smuzhiyun 	 */
469*4882a593Smuzhiyun 	status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
470*4882a593Smuzhiyun 	if (status)
471*4882a593Smuzhiyun 		goto out;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	status = ixgbe_update_flash_X540(hw);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun out:
476*4882a593Smuzhiyun 	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
477*4882a593Smuzhiyun 	return status;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun /**
481*4882a593Smuzhiyun  * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
482*4882a593Smuzhiyun  * @hw: pointer to hardware structure
483*4882a593Smuzhiyun  *
484*4882a593Smuzhiyun  * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
485*4882a593Smuzhiyun  * EEPROM from shadow RAM to the flash device.
486*4882a593Smuzhiyun  **/
ixgbe_update_flash_X540(struct ixgbe_hw * hw)487*4882a593Smuzhiyun static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	u32 flup;
490*4882a593Smuzhiyun 	s32 status;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	status = ixgbe_poll_flash_update_done_X540(hw);
493*4882a593Smuzhiyun 	if (status == IXGBE_ERR_EEPROM) {
494*4882a593Smuzhiyun 		hw_dbg(hw, "Flash update time out\n");
495*4882a593Smuzhiyun 		return status;
496*4882a593Smuzhiyun 	}
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)) | IXGBE_EEC_FLUP;
499*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	status = ixgbe_poll_flash_update_done_X540(hw);
502*4882a593Smuzhiyun 	if (status == 0)
503*4882a593Smuzhiyun 		hw_dbg(hw, "Flash update complete\n");
504*4882a593Smuzhiyun 	else
505*4882a593Smuzhiyun 		hw_dbg(hw, "Flash update time out\n");
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	if (hw->revision_id == 0) {
508*4882a593Smuzhiyun 		flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		if (flup & IXGBE_EEC_SEC1VAL) {
511*4882a593Smuzhiyun 			flup |= IXGBE_EEC_FLUP;
512*4882a593Smuzhiyun 			IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup);
513*4882a593Smuzhiyun 		}
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 		status = ixgbe_poll_flash_update_done_X540(hw);
516*4882a593Smuzhiyun 		if (status == 0)
517*4882a593Smuzhiyun 			hw_dbg(hw, "Flash update complete\n");
518*4882a593Smuzhiyun 		else
519*4882a593Smuzhiyun 			hw_dbg(hw, "Flash update time out\n");
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	return status;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun /**
526*4882a593Smuzhiyun  * ixgbe_poll_flash_update_done_X540 - Poll flash update status
527*4882a593Smuzhiyun  * @hw: pointer to hardware structure
528*4882a593Smuzhiyun  *
529*4882a593Smuzhiyun  * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
530*4882a593Smuzhiyun  * flash update is done.
531*4882a593Smuzhiyun  **/
ixgbe_poll_flash_update_done_X540(struct ixgbe_hw * hw)532*4882a593Smuzhiyun static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	u32 i;
535*4882a593Smuzhiyun 	u32 reg;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
538*4882a593Smuzhiyun 		reg = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
539*4882a593Smuzhiyun 		if (reg & IXGBE_EEC_FLUDONE)
540*4882a593Smuzhiyun 			return 0;
541*4882a593Smuzhiyun 		udelay(5);
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun 	return IXGBE_ERR_EEPROM;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun  * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
548*4882a593Smuzhiyun  * @hw: pointer to hardware structure
549*4882a593Smuzhiyun  * @mask: Mask to specify which semaphore to acquire
550*4882a593Smuzhiyun  *
551*4882a593Smuzhiyun  * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
552*4882a593Smuzhiyun  * the specified function (CSR, PHY0, PHY1, NVM, Flash)
553*4882a593Smuzhiyun  **/
ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw * hw,u32 mask)554*4882a593Smuzhiyun s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK;
557*4882a593Smuzhiyun 	u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK;
558*4882a593Smuzhiyun 	u32 fwmask = swmask << 5;
559*4882a593Smuzhiyun 	u32 timeout = 200;
560*4882a593Smuzhiyun 	u32 hwmask = 0;
561*4882a593Smuzhiyun 	u32 swfw_sync;
562*4882a593Smuzhiyun 	u32 i;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (swmask & IXGBE_GSSR_EEP_SM)
565*4882a593Smuzhiyun 		hwmask = IXGBE_GSSR_FLASH_SM;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	/* SW only mask does not have FW bit pair */
568*4882a593Smuzhiyun 	if (mask & IXGBE_GSSR_SW_MNG_SM)
569*4882a593Smuzhiyun 		swmask |= IXGBE_GSSR_SW_MNG_SM;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	swmask |= swi2c_mask;
572*4882a593Smuzhiyun 	fwmask |= swi2c_mask << 2;
573*4882a593Smuzhiyun 	for (i = 0; i < timeout; i++) {
574*4882a593Smuzhiyun 		/* SW NVM semaphore bit is used for access to all
575*4882a593Smuzhiyun 		 * SW_FW_SYNC bits (not just NVM)
576*4882a593Smuzhiyun 		 */
577*4882a593Smuzhiyun 		if (ixgbe_get_swfw_sync_semaphore(hw))
578*4882a593Smuzhiyun 			return IXGBE_ERR_SWFW_SYNC;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
581*4882a593Smuzhiyun 		if (!(swfw_sync & (fwmask | swmask | hwmask))) {
582*4882a593Smuzhiyun 			swfw_sync |= swmask;
583*4882a593Smuzhiyun 			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
584*4882a593Smuzhiyun 			ixgbe_release_swfw_sync_semaphore(hw);
585*4882a593Smuzhiyun 			usleep_range(5000, 6000);
586*4882a593Smuzhiyun 			return 0;
587*4882a593Smuzhiyun 		}
588*4882a593Smuzhiyun 		/* Firmware currently using resource (fwmask), hardware
589*4882a593Smuzhiyun 		 * currently using resource (hwmask), or other software
590*4882a593Smuzhiyun 		 * thread currently using resource (swmask)
591*4882a593Smuzhiyun 		 */
592*4882a593Smuzhiyun 		ixgbe_release_swfw_sync_semaphore(hw);
593*4882a593Smuzhiyun 		usleep_range(5000, 10000);
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	/* If the resource is not released by the FW/HW the SW can assume that
597*4882a593Smuzhiyun 	 * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
598*4882a593Smuzhiyun 	 * of the requested resource(s) while ignoring the corresponding FW/HW
599*4882a593Smuzhiyun 	 * bits in the SW_FW_SYNC register.
600*4882a593Smuzhiyun 	 */
601*4882a593Smuzhiyun 	if (ixgbe_get_swfw_sync_semaphore(hw))
602*4882a593Smuzhiyun 		return IXGBE_ERR_SWFW_SYNC;
603*4882a593Smuzhiyun 	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
604*4882a593Smuzhiyun 	if (swfw_sync & (fwmask | hwmask)) {
605*4882a593Smuzhiyun 		swfw_sync |= swmask;
606*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
607*4882a593Smuzhiyun 		ixgbe_release_swfw_sync_semaphore(hw);
608*4882a593Smuzhiyun 		usleep_range(5000, 6000);
609*4882a593Smuzhiyun 		return 0;
610*4882a593Smuzhiyun 	}
611*4882a593Smuzhiyun 	/* If the resource is not released by other SW the SW can assume that
612*4882a593Smuzhiyun 	 * the other SW malfunctions. In that case the SW should clear all SW
613*4882a593Smuzhiyun 	 * flags that it does not own and then repeat the whole process once
614*4882a593Smuzhiyun 	 * again.
615*4882a593Smuzhiyun 	 */
616*4882a593Smuzhiyun 	if (swfw_sync & swmask) {
617*4882a593Smuzhiyun 		u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
618*4882a593Smuzhiyun 			    IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
619*4882a593Smuzhiyun 			    IXGBE_GSSR_SW_MNG_SM;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 		if (swi2c_mask)
622*4882a593Smuzhiyun 			rmask |= IXGBE_GSSR_I2C_MASK;
623*4882a593Smuzhiyun 		ixgbe_release_swfw_sync_X540(hw, rmask);
624*4882a593Smuzhiyun 		ixgbe_release_swfw_sync_semaphore(hw);
625*4882a593Smuzhiyun 		return IXGBE_ERR_SWFW_SYNC;
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 	ixgbe_release_swfw_sync_semaphore(hw);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	return IXGBE_ERR_SWFW_SYNC;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun /**
633*4882a593Smuzhiyun  * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
634*4882a593Smuzhiyun  * @hw: pointer to hardware structure
635*4882a593Smuzhiyun  * @mask: Mask to specify which semaphore to release
636*4882a593Smuzhiyun  *
637*4882a593Smuzhiyun  * Releases the SWFW semaphore through the SW_FW_SYNC register
638*4882a593Smuzhiyun  * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
639*4882a593Smuzhiyun  **/
ixgbe_release_swfw_sync_X540(struct ixgbe_hw * hw,u32 mask)640*4882a593Smuzhiyun void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM);
643*4882a593Smuzhiyun 	u32 swfw_sync;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	if (mask & IXGBE_GSSR_I2C_MASK)
646*4882a593Smuzhiyun 		swmask |= mask & IXGBE_GSSR_I2C_MASK;
647*4882a593Smuzhiyun 	ixgbe_get_swfw_sync_semaphore(hw);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
650*4882a593Smuzhiyun 	swfw_sync &= ~swmask;
651*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	ixgbe_release_swfw_sync_semaphore(hw);
654*4882a593Smuzhiyun 	usleep_range(5000, 6000);
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun /**
658*4882a593Smuzhiyun  * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore
659*4882a593Smuzhiyun  * @hw: pointer to hardware structure
660*4882a593Smuzhiyun  *
661*4882a593Smuzhiyun  * Sets the hardware semaphores so SW/FW can gain control of shared resources
662*4882a593Smuzhiyun  */
ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw * hw)663*4882a593Smuzhiyun static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	u32 timeout = 2000;
666*4882a593Smuzhiyun 	u32 i;
667*4882a593Smuzhiyun 	u32 swsm;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	/* Get SMBI software semaphore between device drivers first */
670*4882a593Smuzhiyun 	for (i = 0; i < timeout; i++) {
671*4882a593Smuzhiyun 		/* If the SMBI bit is 0 when we read it, then the bit will be
672*4882a593Smuzhiyun 		 * set and we have the semaphore
673*4882a593Smuzhiyun 		 */
674*4882a593Smuzhiyun 		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
675*4882a593Smuzhiyun 		if (!(swsm & IXGBE_SWSM_SMBI))
676*4882a593Smuzhiyun 			break;
677*4882a593Smuzhiyun 		usleep_range(50, 100);
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	if (i == timeout) {
681*4882a593Smuzhiyun 		hw_dbg(hw,
682*4882a593Smuzhiyun 		       "Software semaphore SMBI between device drivers not granted.\n");
683*4882a593Smuzhiyun 		return IXGBE_ERR_EEPROM;
684*4882a593Smuzhiyun 	}
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	/* Now get the semaphore between SW/FW through the REGSMP bit */
687*4882a593Smuzhiyun 	for (i = 0; i < timeout; i++) {
688*4882a593Smuzhiyun 		swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
689*4882a593Smuzhiyun 		if (!(swsm & IXGBE_SWFW_REGSMP))
690*4882a593Smuzhiyun 			return 0;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 		usleep_range(50, 100);
693*4882a593Smuzhiyun 	}
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	/* Release semaphores and return error if SW NVM semaphore
696*4882a593Smuzhiyun 	 * was not granted because we do not have access to the EEPROM
697*4882a593Smuzhiyun 	 */
698*4882a593Smuzhiyun 	hw_dbg(hw, "REGSMP Software NVM semaphore not granted\n");
699*4882a593Smuzhiyun 	ixgbe_release_swfw_sync_semaphore(hw);
700*4882a593Smuzhiyun 	return IXGBE_ERR_EEPROM;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun /**
704*4882a593Smuzhiyun  * ixgbe_release_nvm_semaphore - Release hardware semaphore
705*4882a593Smuzhiyun  * @hw: pointer to hardware structure
706*4882a593Smuzhiyun  *
707*4882a593Smuzhiyun  * This function clears hardware semaphore bits.
708*4882a593Smuzhiyun  **/
ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw * hw)709*4882a593Smuzhiyun static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	 u32 swsm;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw));
716*4882a593Smuzhiyun 	swsm &= ~IXGBE_SWFW_REGSMP;
717*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swsm);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw));
720*4882a593Smuzhiyun 	swsm &= ~IXGBE_SWSM_SMBI;
721*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm);
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	IXGBE_WRITE_FLUSH(hw);
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun /**
727*4882a593Smuzhiyun  *  ixgbe_init_swfw_sync_X540 - Release hardware semaphore
728*4882a593Smuzhiyun  *  @hw: pointer to hardware structure
729*4882a593Smuzhiyun  *
730*4882a593Smuzhiyun  *  This function reset hardware semaphore bits for a semaphore that may
731*4882a593Smuzhiyun  *  have be left locked due to a catastrophic failure.
732*4882a593Smuzhiyun  **/
ixgbe_init_swfw_sync_X540(struct ixgbe_hw * hw)733*4882a593Smuzhiyun void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun 	u32 rmask;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	/* First try to grab the semaphore but we don't need to bother
738*4882a593Smuzhiyun 	 * looking to see whether we got the lock or not since we do
739*4882a593Smuzhiyun 	 * the same thing regardless of whether we got the lock or not.
740*4882a593Smuzhiyun 	 * We got the lock - we release it.
741*4882a593Smuzhiyun 	 * We timeout trying to get the lock - we force its release.
742*4882a593Smuzhiyun 	 */
743*4882a593Smuzhiyun 	ixgbe_get_swfw_sync_semaphore(hw);
744*4882a593Smuzhiyun 	ixgbe_release_swfw_sync_semaphore(hw);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	/* Acquire and release all software resources. */
747*4882a593Smuzhiyun 	rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
748*4882a593Smuzhiyun 		IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
749*4882a593Smuzhiyun 		IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_I2C_MASK;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	ixgbe_acquire_swfw_sync_X540(hw, rmask);
752*4882a593Smuzhiyun 	ixgbe_release_swfw_sync_X540(hw, rmask);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun /**
756*4882a593Smuzhiyun  * ixgbe_blink_led_start_X540 - Blink LED based on index.
757*4882a593Smuzhiyun  * @hw: pointer to hardware structure
758*4882a593Smuzhiyun  * @index: led number to blink
759*4882a593Smuzhiyun  *
760*4882a593Smuzhiyun  * Devices that implement the version 2 interface:
761*4882a593Smuzhiyun  *   X540
762*4882a593Smuzhiyun  **/
ixgbe_blink_led_start_X540(struct ixgbe_hw * hw,u32 index)763*4882a593Smuzhiyun s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun 	u32 macc_reg;
766*4882a593Smuzhiyun 	u32 ledctl_reg;
767*4882a593Smuzhiyun 	ixgbe_link_speed speed;
768*4882a593Smuzhiyun 	bool link_up;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	if (index > 3)
771*4882a593Smuzhiyun 		return IXGBE_ERR_PARAM;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	/* Link should be up in order for the blink bit in the LED control
774*4882a593Smuzhiyun 	 * register to work. Force link and speed in the MAC if link is down.
775*4882a593Smuzhiyun 	 * This will be reversed when we stop the blinking.
776*4882a593Smuzhiyun 	 */
777*4882a593Smuzhiyun 	hw->mac.ops.check_link(hw, &speed, &link_up, false);
778*4882a593Smuzhiyun 	if (!link_up) {
779*4882a593Smuzhiyun 		macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
780*4882a593Smuzhiyun 		macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
781*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun 	/* Set the LED to LINK_UP + BLINK. */
784*4882a593Smuzhiyun 	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
785*4882a593Smuzhiyun 	ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
786*4882a593Smuzhiyun 	ledctl_reg |= IXGBE_LED_BLINK(index);
787*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
788*4882a593Smuzhiyun 	IXGBE_WRITE_FLUSH(hw);
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	return 0;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun /**
794*4882a593Smuzhiyun  * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
795*4882a593Smuzhiyun  * @hw: pointer to hardware structure
796*4882a593Smuzhiyun  * @index: led number to stop blinking
797*4882a593Smuzhiyun  *
798*4882a593Smuzhiyun  * Devices that implement the version 2 interface:
799*4882a593Smuzhiyun  *   X540
800*4882a593Smuzhiyun  **/
ixgbe_blink_led_stop_X540(struct ixgbe_hw * hw,u32 index)801*4882a593Smuzhiyun s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	u32 macc_reg;
804*4882a593Smuzhiyun 	u32 ledctl_reg;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	if (index > 3)
807*4882a593Smuzhiyun 		return IXGBE_ERR_PARAM;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	/* Restore the LED to its default value. */
810*4882a593Smuzhiyun 	ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
811*4882a593Smuzhiyun 	ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
812*4882a593Smuzhiyun 	ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
813*4882a593Smuzhiyun 	ledctl_reg &= ~IXGBE_LED_BLINK(index);
814*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	/* Unforce link and speed in the MAC. */
817*4882a593Smuzhiyun 	macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
818*4882a593Smuzhiyun 	macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
819*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
820*4882a593Smuzhiyun 	IXGBE_WRITE_FLUSH(hw);
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	return 0;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun static const struct ixgbe_mac_operations mac_ops_X540 = {
825*4882a593Smuzhiyun 	.init_hw                = &ixgbe_init_hw_generic,
826*4882a593Smuzhiyun 	.reset_hw               = &ixgbe_reset_hw_X540,
827*4882a593Smuzhiyun 	.start_hw               = &ixgbe_start_hw_X540,
828*4882a593Smuzhiyun 	.clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
829*4882a593Smuzhiyun 	.get_media_type         = &ixgbe_get_media_type_X540,
830*4882a593Smuzhiyun 	.enable_rx_dma          = &ixgbe_enable_rx_dma_generic,
831*4882a593Smuzhiyun 	.get_mac_addr           = &ixgbe_get_mac_addr_generic,
832*4882a593Smuzhiyun 	.get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
833*4882a593Smuzhiyun 	.get_device_caps        = &ixgbe_get_device_caps_generic,
834*4882a593Smuzhiyun 	.get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic,
835*4882a593Smuzhiyun 	.stop_adapter           = &ixgbe_stop_adapter_generic,
836*4882a593Smuzhiyun 	.get_bus_info           = &ixgbe_get_bus_info_generic,
837*4882a593Smuzhiyun 	.set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
838*4882a593Smuzhiyun 	.read_analog_reg8       = NULL,
839*4882a593Smuzhiyun 	.write_analog_reg8      = NULL,
840*4882a593Smuzhiyun 	.setup_link             = &ixgbe_setup_mac_link_X540,
841*4882a593Smuzhiyun 	.set_rxpba		= &ixgbe_set_rxpba_generic,
842*4882a593Smuzhiyun 	.check_link             = &ixgbe_check_mac_link_generic,
843*4882a593Smuzhiyun 	.get_link_capabilities  = &ixgbe_get_copper_link_capabilities_generic,
844*4882a593Smuzhiyun 	.led_on                 = &ixgbe_led_on_generic,
845*4882a593Smuzhiyun 	.led_off                = &ixgbe_led_off_generic,
846*4882a593Smuzhiyun 	.init_led_link_act	= ixgbe_init_led_link_act_generic,
847*4882a593Smuzhiyun 	.blink_led_start        = &ixgbe_blink_led_start_X540,
848*4882a593Smuzhiyun 	.blink_led_stop         = &ixgbe_blink_led_stop_X540,
849*4882a593Smuzhiyun 	.set_rar                = &ixgbe_set_rar_generic,
850*4882a593Smuzhiyun 	.clear_rar              = &ixgbe_clear_rar_generic,
851*4882a593Smuzhiyun 	.set_vmdq               = &ixgbe_set_vmdq_generic,
852*4882a593Smuzhiyun 	.set_vmdq_san_mac	= &ixgbe_set_vmdq_san_mac_generic,
853*4882a593Smuzhiyun 	.clear_vmdq             = &ixgbe_clear_vmdq_generic,
854*4882a593Smuzhiyun 	.init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
855*4882a593Smuzhiyun 	.update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
856*4882a593Smuzhiyun 	.enable_mc              = &ixgbe_enable_mc_generic,
857*4882a593Smuzhiyun 	.disable_mc             = &ixgbe_disable_mc_generic,
858*4882a593Smuzhiyun 	.clear_vfta             = &ixgbe_clear_vfta_generic,
859*4882a593Smuzhiyun 	.set_vfta               = &ixgbe_set_vfta_generic,
860*4882a593Smuzhiyun 	.fc_enable              = &ixgbe_fc_enable_generic,
861*4882a593Smuzhiyun 	.setup_fc		= ixgbe_setup_fc_generic,
862*4882a593Smuzhiyun 	.fc_autoneg		= ixgbe_fc_autoneg,
863*4882a593Smuzhiyun 	.set_fw_drv_ver         = &ixgbe_set_fw_drv_ver_generic,
864*4882a593Smuzhiyun 	.init_uta_tables        = &ixgbe_init_uta_tables_generic,
865*4882a593Smuzhiyun 	.setup_sfp              = NULL,
866*4882a593Smuzhiyun 	.set_mac_anti_spoofing  = &ixgbe_set_mac_anti_spoofing,
867*4882a593Smuzhiyun 	.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
868*4882a593Smuzhiyun 	.acquire_swfw_sync      = &ixgbe_acquire_swfw_sync_X540,
869*4882a593Smuzhiyun 	.release_swfw_sync      = &ixgbe_release_swfw_sync_X540,
870*4882a593Smuzhiyun 	.init_swfw_sync		= &ixgbe_init_swfw_sync_X540,
871*4882a593Smuzhiyun 	.disable_rx_buff	= &ixgbe_disable_rx_buff_generic,
872*4882a593Smuzhiyun 	.enable_rx_buff		= &ixgbe_enable_rx_buff_generic,
873*4882a593Smuzhiyun 	.get_thermal_sensor_data = NULL,
874*4882a593Smuzhiyun 	.init_thermal_sensor_thresh = NULL,
875*4882a593Smuzhiyun 	.prot_autoc_read	= &prot_autoc_read_generic,
876*4882a593Smuzhiyun 	.prot_autoc_write	= &prot_autoc_write_generic,
877*4882a593Smuzhiyun 	.enable_rx		= &ixgbe_enable_rx_generic,
878*4882a593Smuzhiyun 	.disable_rx		= &ixgbe_disable_rx_generic,
879*4882a593Smuzhiyun };
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
882*4882a593Smuzhiyun 	.init_params            = &ixgbe_init_eeprom_params_X540,
883*4882a593Smuzhiyun 	.read                   = &ixgbe_read_eerd_X540,
884*4882a593Smuzhiyun 	.read_buffer		= &ixgbe_read_eerd_buffer_X540,
885*4882a593Smuzhiyun 	.write                  = &ixgbe_write_eewr_X540,
886*4882a593Smuzhiyun 	.write_buffer		= &ixgbe_write_eewr_buffer_X540,
887*4882a593Smuzhiyun 	.calc_checksum		= &ixgbe_calc_eeprom_checksum_X540,
888*4882a593Smuzhiyun 	.validate_checksum      = &ixgbe_validate_eeprom_checksum_X540,
889*4882a593Smuzhiyun 	.update_checksum        = &ixgbe_update_eeprom_checksum_X540,
890*4882a593Smuzhiyun };
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun static const struct ixgbe_phy_operations phy_ops_X540 = {
893*4882a593Smuzhiyun 	.identify               = &ixgbe_identify_phy_generic,
894*4882a593Smuzhiyun 	.identify_sfp           = &ixgbe_identify_sfp_module_generic,
895*4882a593Smuzhiyun 	.init			= NULL,
896*4882a593Smuzhiyun 	.reset                  = NULL,
897*4882a593Smuzhiyun 	.read_reg               = &ixgbe_read_phy_reg_generic,
898*4882a593Smuzhiyun 	.write_reg              = &ixgbe_write_phy_reg_generic,
899*4882a593Smuzhiyun 	.setup_link             = &ixgbe_setup_phy_link_generic,
900*4882a593Smuzhiyun 	.setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
901*4882a593Smuzhiyun 	.read_i2c_byte          = &ixgbe_read_i2c_byte_generic,
902*4882a593Smuzhiyun 	.write_i2c_byte         = &ixgbe_write_i2c_byte_generic,
903*4882a593Smuzhiyun 	.read_i2c_sff8472	= &ixgbe_read_i2c_sff8472_generic,
904*4882a593Smuzhiyun 	.read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
905*4882a593Smuzhiyun 	.write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
906*4882a593Smuzhiyun 	.check_overtemp         = &ixgbe_tn_check_overtemp,
907*4882a593Smuzhiyun 	.set_phy_power          = &ixgbe_set_copper_phy_power,
908*4882a593Smuzhiyun };
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
911*4882a593Smuzhiyun 	IXGBE_MVALS_INIT(X540)
912*4882a593Smuzhiyun };
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun const struct ixgbe_info ixgbe_X540_info = {
915*4882a593Smuzhiyun 	.mac                    = ixgbe_mac_X540,
916*4882a593Smuzhiyun 	.get_invariants         = &ixgbe_get_invariants_X540,
917*4882a593Smuzhiyun 	.mac_ops                = &mac_ops_X540,
918*4882a593Smuzhiyun 	.eeprom_ops             = &eeprom_ops_X540,
919*4882a593Smuzhiyun 	.phy_ops                = &phy_ops_X540,
920*4882a593Smuzhiyun 	.mbx_ops                = &mbx_ops_generic,
921*4882a593Smuzhiyun 	.mvals			= ixgbe_mvals_X540,
922*4882a593Smuzhiyun };
923