xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/igb/e1000_i210.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 2007 - 2018 Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun /* e1000_i210
5*4882a593Smuzhiyun  * e1000_i211
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun #include <linux/if_ether.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "e1000_hw.h"
12*4882a593Smuzhiyun #include "e1000_i210.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun static s32 igb_update_flash_i210(struct e1000_hw *hw);
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /**
17*4882a593Smuzhiyun  * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
18*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  *  Acquire the HW semaphore to access the PHY or NVM
21*4882a593Smuzhiyun  */
igb_get_hw_semaphore_i210(struct e1000_hw * hw)22*4882a593Smuzhiyun static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	u32 swsm;
25*4882a593Smuzhiyun 	s32 timeout = hw->nvm.word_size + 1;
26*4882a593Smuzhiyun 	s32 i = 0;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	/* Get the SW semaphore */
29*4882a593Smuzhiyun 	while (i < timeout) {
30*4882a593Smuzhiyun 		swsm = rd32(E1000_SWSM);
31*4882a593Smuzhiyun 		if (!(swsm & E1000_SWSM_SMBI))
32*4882a593Smuzhiyun 			break;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 		udelay(50);
35*4882a593Smuzhiyun 		i++;
36*4882a593Smuzhiyun 	}
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	if (i == timeout) {
39*4882a593Smuzhiyun 		/* In rare circumstances, the SW semaphore may already be held
40*4882a593Smuzhiyun 		 * unintentionally. Clear the semaphore once before giving up.
41*4882a593Smuzhiyun 		 */
42*4882a593Smuzhiyun 		if (hw->dev_spec._82575.clear_semaphore_once) {
43*4882a593Smuzhiyun 			hw->dev_spec._82575.clear_semaphore_once = false;
44*4882a593Smuzhiyun 			igb_put_hw_semaphore(hw);
45*4882a593Smuzhiyun 			for (i = 0; i < timeout; i++) {
46*4882a593Smuzhiyun 				swsm = rd32(E1000_SWSM);
47*4882a593Smuzhiyun 				if (!(swsm & E1000_SWSM_SMBI))
48*4882a593Smuzhiyun 					break;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 				udelay(50);
51*4882a593Smuzhiyun 			}
52*4882a593Smuzhiyun 		}
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 		/* If we do not have the semaphore here, we have to give up. */
55*4882a593Smuzhiyun 		if (i == timeout) {
56*4882a593Smuzhiyun 			hw_dbg("Driver can't access device - SMBI bit is set.\n");
57*4882a593Smuzhiyun 			return -E1000_ERR_NVM;
58*4882a593Smuzhiyun 		}
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	/* Get the FW semaphore. */
62*4882a593Smuzhiyun 	for (i = 0; i < timeout; i++) {
63*4882a593Smuzhiyun 		swsm = rd32(E1000_SWSM);
64*4882a593Smuzhiyun 		wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 		/* Semaphore acquired if bit latched */
67*4882a593Smuzhiyun 		if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
68*4882a593Smuzhiyun 			break;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 		udelay(50);
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	if (i == timeout) {
74*4882a593Smuzhiyun 		/* Release semaphores */
75*4882a593Smuzhiyun 		igb_put_hw_semaphore(hw);
76*4882a593Smuzhiyun 		hw_dbg("Driver can't access the NVM\n");
77*4882a593Smuzhiyun 		return -E1000_ERR_NVM;
78*4882a593Smuzhiyun 	}
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	return 0;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /**
84*4882a593Smuzhiyun  *  igb_acquire_nvm_i210 - Request for access to EEPROM
85*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
86*4882a593Smuzhiyun  *
87*4882a593Smuzhiyun  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
88*4882a593Smuzhiyun  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
89*4882a593Smuzhiyun  *  Return successful if access grant bit set, else clear the request for
90*4882a593Smuzhiyun  *  EEPROM access and return -E1000_ERR_NVM (-1).
91*4882a593Smuzhiyun  **/
igb_acquire_nvm_i210(struct e1000_hw * hw)92*4882a593Smuzhiyun static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /**
98*4882a593Smuzhiyun  *  igb_release_nvm_i210 - Release exclusive access to EEPROM
99*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
100*4882a593Smuzhiyun  *
101*4882a593Smuzhiyun  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
102*4882a593Smuzhiyun  *  then release the semaphores acquired.
103*4882a593Smuzhiyun  **/
igb_release_nvm_i210(struct e1000_hw * hw)104*4882a593Smuzhiyun static void igb_release_nvm_i210(struct e1000_hw *hw)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /**
110*4882a593Smuzhiyun  *  igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
111*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
112*4882a593Smuzhiyun  *  @mask: specifies which semaphore to acquire
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
115*4882a593Smuzhiyun  *  will also specify which port we're acquiring the lock for.
116*4882a593Smuzhiyun  **/
igb_acquire_swfw_sync_i210(struct e1000_hw * hw,u16 mask)117*4882a593Smuzhiyun s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	u32 swfw_sync;
120*4882a593Smuzhiyun 	u32 swmask = mask;
121*4882a593Smuzhiyun 	u32 fwmask = mask << 16;
122*4882a593Smuzhiyun 	s32 ret_val = 0;
123*4882a593Smuzhiyun 	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	while (i < timeout) {
126*4882a593Smuzhiyun 		if (igb_get_hw_semaphore_i210(hw)) {
127*4882a593Smuzhiyun 			ret_val = -E1000_ERR_SWFW_SYNC;
128*4882a593Smuzhiyun 			goto out;
129*4882a593Smuzhiyun 		}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 		swfw_sync = rd32(E1000_SW_FW_SYNC);
132*4882a593Smuzhiyun 		if (!(swfw_sync & (fwmask | swmask)))
133*4882a593Smuzhiyun 			break;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 		/* Firmware currently using resource (fwmask) */
136*4882a593Smuzhiyun 		igb_put_hw_semaphore(hw);
137*4882a593Smuzhiyun 		mdelay(5);
138*4882a593Smuzhiyun 		i++;
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	if (i == timeout) {
142*4882a593Smuzhiyun 		hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
143*4882a593Smuzhiyun 		ret_val = -E1000_ERR_SWFW_SYNC;
144*4882a593Smuzhiyun 		goto out;
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	swfw_sync |= swmask;
148*4882a593Smuzhiyun 	wr32(E1000_SW_FW_SYNC, swfw_sync);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	igb_put_hw_semaphore(hw);
151*4882a593Smuzhiyun out:
152*4882a593Smuzhiyun 	return ret_val;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun  *  igb_release_swfw_sync_i210 - Release SW/FW semaphore
157*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
158*4882a593Smuzhiyun  *  @mask: specifies which semaphore to acquire
159*4882a593Smuzhiyun  *
160*4882a593Smuzhiyun  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
161*4882a593Smuzhiyun  *  will also specify which port we're releasing the lock for.
162*4882a593Smuzhiyun  **/
igb_release_swfw_sync_i210(struct e1000_hw * hw,u16 mask)163*4882a593Smuzhiyun void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	u32 swfw_sync;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	while (igb_get_hw_semaphore_i210(hw))
168*4882a593Smuzhiyun 		; /* Empty */
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	swfw_sync = rd32(E1000_SW_FW_SYNC);
171*4882a593Smuzhiyun 	swfw_sync &= ~mask;
172*4882a593Smuzhiyun 	wr32(E1000_SW_FW_SYNC, swfw_sync);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	igb_put_hw_semaphore(hw);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun /**
178*4882a593Smuzhiyun  *  igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
179*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
180*4882a593Smuzhiyun  *  @offset: offset of word in the Shadow Ram to read
181*4882a593Smuzhiyun  *  @words: number of words to read
182*4882a593Smuzhiyun  *  @data: word read from the Shadow Ram
183*4882a593Smuzhiyun  *
184*4882a593Smuzhiyun  *  Reads a 16 bit word from the Shadow Ram using the EERD register.
185*4882a593Smuzhiyun  *  Uses necessary synchronization semaphores.
186*4882a593Smuzhiyun  **/
igb_read_nvm_srrd_i210(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)187*4882a593Smuzhiyun static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
188*4882a593Smuzhiyun 				  u16 *data)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	s32 status = 0;
191*4882a593Smuzhiyun 	u16 i, count;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	/* We cannot hold synchronization semaphores for too long,
194*4882a593Smuzhiyun 	 * because of forceful takeover procedure. However it is more efficient
195*4882a593Smuzhiyun 	 * to read in bursts than synchronizing access for each word.
196*4882a593Smuzhiyun 	 */
197*4882a593Smuzhiyun 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
198*4882a593Smuzhiyun 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
199*4882a593Smuzhiyun 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
200*4882a593Smuzhiyun 		if (!(hw->nvm.ops.acquire(hw))) {
201*4882a593Smuzhiyun 			status = igb_read_nvm_eerd(hw, offset, count,
202*4882a593Smuzhiyun 						     data + i);
203*4882a593Smuzhiyun 			hw->nvm.ops.release(hw);
204*4882a593Smuzhiyun 		} else {
205*4882a593Smuzhiyun 			status = E1000_ERR_SWFW_SYNC;
206*4882a593Smuzhiyun 		}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 		if (status)
209*4882a593Smuzhiyun 			break;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return status;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /**
216*4882a593Smuzhiyun  *  igb_write_nvm_srwr - Write to Shadow Ram using EEWR
217*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
218*4882a593Smuzhiyun  *  @offset: offset within the Shadow Ram to be written to
219*4882a593Smuzhiyun  *  @words: number of words to write
220*4882a593Smuzhiyun  *  @data: 16 bit word(s) to be written to the Shadow Ram
221*4882a593Smuzhiyun  *
222*4882a593Smuzhiyun  *  Writes data to Shadow Ram at offset using EEWR register.
223*4882a593Smuzhiyun  *
224*4882a593Smuzhiyun  *  If igb_update_nvm_checksum is not called after this function , the
225*4882a593Smuzhiyun  *  Shadow Ram will most likely contain an invalid checksum.
226*4882a593Smuzhiyun  **/
igb_write_nvm_srwr(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)227*4882a593Smuzhiyun static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
228*4882a593Smuzhiyun 				u16 *data)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	struct e1000_nvm_info *nvm = &hw->nvm;
231*4882a593Smuzhiyun 	u32 i, k, eewr = 0;
232*4882a593Smuzhiyun 	u32 attempts = 100000;
233*4882a593Smuzhiyun 	s32 ret_val = 0;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	/* A check for invalid values:  offset too large, too many words,
236*4882a593Smuzhiyun 	 * too many words for the offset, and not enough words.
237*4882a593Smuzhiyun 	 */
238*4882a593Smuzhiyun 	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
239*4882a593Smuzhiyun 	    (words == 0)) {
240*4882a593Smuzhiyun 		hw_dbg("nvm parameter(s) out of bounds\n");
241*4882a593Smuzhiyun 		ret_val = -E1000_ERR_NVM;
242*4882a593Smuzhiyun 		goto out;
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	for (i = 0; i < words; i++) {
246*4882a593Smuzhiyun 		eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
247*4882a593Smuzhiyun 			(data[i] << E1000_NVM_RW_REG_DATA) |
248*4882a593Smuzhiyun 			E1000_NVM_RW_REG_START;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		wr32(E1000_SRWR, eewr);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 		for (k = 0; k < attempts; k++) {
253*4882a593Smuzhiyun 			if (E1000_NVM_RW_REG_DONE &
254*4882a593Smuzhiyun 			    rd32(E1000_SRWR)) {
255*4882a593Smuzhiyun 				ret_val = 0;
256*4882a593Smuzhiyun 				break;
257*4882a593Smuzhiyun 			}
258*4882a593Smuzhiyun 			udelay(5);
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		if (ret_val) {
262*4882a593Smuzhiyun 			hw_dbg("Shadow RAM write EEWR timed out\n");
263*4882a593Smuzhiyun 			break;
264*4882a593Smuzhiyun 		}
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun out:
268*4882a593Smuzhiyun 	return ret_val;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun /**
272*4882a593Smuzhiyun  *  igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
273*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
274*4882a593Smuzhiyun  *  @offset: offset within the Shadow RAM to be written to
275*4882a593Smuzhiyun  *  @words: number of words to write
276*4882a593Smuzhiyun  *  @data: 16 bit word(s) to be written to the Shadow RAM
277*4882a593Smuzhiyun  *
278*4882a593Smuzhiyun  *  Writes data to Shadow RAM at offset using EEWR register.
279*4882a593Smuzhiyun  *
280*4882a593Smuzhiyun  *  If e1000_update_nvm_checksum is not called after this function , the
281*4882a593Smuzhiyun  *  data will not be committed to FLASH and also Shadow RAM will most likely
282*4882a593Smuzhiyun  *  contain an invalid checksum.
283*4882a593Smuzhiyun  *
284*4882a593Smuzhiyun  *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
285*4882a593Smuzhiyun  *  partially written.
286*4882a593Smuzhiyun  **/
igb_write_nvm_srwr_i210(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)287*4882a593Smuzhiyun static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
288*4882a593Smuzhiyun 				   u16 *data)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	s32 status = 0;
291*4882a593Smuzhiyun 	u16 i, count;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	/* We cannot hold synchronization semaphores for too long,
294*4882a593Smuzhiyun 	 * because of forceful takeover procedure. However it is more efficient
295*4882a593Smuzhiyun 	 * to write in bursts than synchronizing access for each word.
296*4882a593Smuzhiyun 	 */
297*4882a593Smuzhiyun 	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
298*4882a593Smuzhiyun 		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
299*4882a593Smuzhiyun 			E1000_EERD_EEWR_MAX_COUNT : (words - i);
300*4882a593Smuzhiyun 		if (!(hw->nvm.ops.acquire(hw))) {
301*4882a593Smuzhiyun 			status = igb_write_nvm_srwr(hw, offset, count,
302*4882a593Smuzhiyun 						      data + i);
303*4882a593Smuzhiyun 			hw->nvm.ops.release(hw);
304*4882a593Smuzhiyun 		} else {
305*4882a593Smuzhiyun 			status = E1000_ERR_SWFW_SYNC;
306*4882a593Smuzhiyun 		}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 		if (status)
309*4882a593Smuzhiyun 			break;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	return status;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun /**
316*4882a593Smuzhiyun  *  igb_read_invm_word_i210 - Reads OTP
317*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
318*4882a593Smuzhiyun  *  @address: the word address (aka eeprom offset) to read
319*4882a593Smuzhiyun  *  @data: pointer to the data read
320*4882a593Smuzhiyun  *
321*4882a593Smuzhiyun  *  Reads 16-bit words from the OTP. Return error when the word is not
322*4882a593Smuzhiyun  *  stored in OTP.
323*4882a593Smuzhiyun  **/
igb_read_invm_word_i210(struct e1000_hw * hw,u8 address,u16 * data)324*4882a593Smuzhiyun static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
327*4882a593Smuzhiyun 	u32 invm_dword;
328*4882a593Smuzhiyun 	u16 i;
329*4882a593Smuzhiyun 	u8 record_type, word_address;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	for (i = 0; i < E1000_INVM_SIZE; i++) {
332*4882a593Smuzhiyun 		invm_dword = rd32(E1000_INVM_DATA_REG(i));
333*4882a593Smuzhiyun 		/* Get record type */
334*4882a593Smuzhiyun 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
335*4882a593Smuzhiyun 		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
336*4882a593Smuzhiyun 			break;
337*4882a593Smuzhiyun 		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
338*4882a593Smuzhiyun 			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
339*4882a593Smuzhiyun 		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
340*4882a593Smuzhiyun 			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
341*4882a593Smuzhiyun 		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
342*4882a593Smuzhiyun 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
343*4882a593Smuzhiyun 			if (word_address == address) {
344*4882a593Smuzhiyun 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
345*4882a593Smuzhiyun 				hw_dbg("Read INVM Word 0x%02x = %x\n",
346*4882a593Smuzhiyun 					  address, *data);
347*4882a593Smuzhiyun 				status = 0;
348*4882a593Smuzhiyun 				break;
349*4882a593Smuzhiyun 			}
350*4882a593Smuzhiyun 		}
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 	if (status)
353*4882a593Smuzhiyun 		hw_dbg("Requested word 0x%02x not found in OTP\n", address);
354*4882a593Smuzhiyun 	return status;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun /**
358*4882a593Smuzhiyun  * igb_read_invm_i210 - Read invm wrapper function for I210/I211
359*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
360*4882a593Smuzhiyun  *  @offset: offset to read from
361*4882a593Smuzhiyun  *  @words: number of words to read (unused)
362*4882a593Smuzhiyun  *  @data: pointer to the data read
363*4882a593Smuzhiyun  *
364*4882a593Smuzhiyun  *  Wrapper function to return data formerly found in the NVM.
365*4882a593Smuzhiyun  **/
igb_read_invm_i210(struct e1000_hw * hw,u16 offset,u16 __always_unused words,u16 * data)366*4882a593Smuzhiyun static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
367*4882a593Smuzhiyun 				u16 __always_unused words, u16 *data)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	s32 ret_val = 0;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/* Only the MAC addr is required to be present in the iNVM */
372*4882a593Smuzhiyun 	switch (offset) {
373*4882a593Smuzhiyun 	case NVM_MAC_ADDR:
374*4882a593Smuzhiyun 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
375*4882a593Smuzhiyun 		ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
376*4882a593Smuzhiyun 						     &data[1]);
377*4882a593Smuzhiyun 		ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
378*4882a593Smuzhiyun 						     &data[2]);
379*4882a593Smuzhiyun 		if (ret_val)
380*4882a593Smuzhiyun 			hw_dbg("MAC Addr not found in iNVM\n");
381*4882a593Smuzhiyun 		break;
382*4882a593Smuzhiyun 	case NVM_INIT_CTRL_2:
383*4882a593Smuzhiyun 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
384*4882a593Smuzhiyun 		if (ret_val) {
385*4882a593Smuzhiyun 			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
386*4882a593Smuzhiyun 			ret_val = 0;
387*4882a593Smuzhiyun 		}
388*4882a593Smuzhiyun 		break;
389*4882a593Smuzhiyun 	case NVM_INIT_CTRL_4:
390*4882a593Smuzhiyun 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
391*4882a593Smuzhiyun 		if (ret_val) {
392*4882a593Smuzhiyun 			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
393*4882a593Smuzhiyun 			ret_val = 0;
394*4882a593Smuzhiyun 		}
395*4882a593Smuzhiyun 		break;
396*4882a593Smuzhiyun 	case NVM_LED_1_CFG:
397*4882a593Smuzhiyun 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
398*4882a593Smuzhiyun 		if (ret_val) {
399*4882a593Smuzhiyun 			*data = NVM_LED_1_CFG_DEFAULT_I211;
400*4882a593Smuzhiyun 			ret_val = 0;
401*4882a593Smuzhiyun 		}
402*4882a593Smuzhiyun 		break;
403*4882a593Smuzhiyun 	case NVM_LED_0_2_CFG:
404*4882a593Smuzhiyun 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
405*4882a593Smuzhiyun 		if (ret_val) {
406*4882a593Smuzhiyun 			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
407*4882a593Smuzhiyun 			ret_val = 0;
408*4882a593Smuzhiyun 		}
409*4882a593Smuzhiyun 		break;
410*4882a593Smuzhiyun 	case NVM_ID_LED_SETTINGS:
411*4882a593Smuzhiyun 		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
412*4882a593Smuzhiyun 		if (ret_val) {
413*4882a593Smuzhiyun 			*data = ID_LED_RESERVED_FFFF;
414*4882a593Smuzhiyun 			ret_val = 0;
415*4882a593Smuzhiyun 		}
416*4882a593Smuzhiyun 		break;
417*4882a593Smuzhiyun 	case NVM_SUB_DEV_ID:
418*4882a593Smuzhiyun 		*data = hw->subsystem_device_id;
419*4882a593Smuzhiyun 		break;
420*4882a593Smuzhiyun 	case NVM_SUB_VEN_ID:
421*4882a593Smuzhiyun 		*data = hw->subsystem_vendor_id;
422*4882a593Smuzhiyun 		break;
423*4882a593Smuzhiyun 	case NVM_DEV_ID:
424*4882a593Smuzhiyun 		*data = hw->device_id;
425*4882a593Smuzhiyun 		break;
426*4882a593Smuzhiyun 	case NVM_VEN_ID:
427*4882a593Smuzhiyun 		*data = hw->vendor_id;
428*4882a593Smuzhiyun 		break;
429*4882a593Smuzhiyun 	default:
430*4882a593Smuzhiyun 		hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
431*4882a593Smuzhiyun 		*data = NVM_RESERVED_WORD;
432*4882a593Smuzhiyun 		break;
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun 	return ret_val;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun  *  igb_read_invm_version - Reads iNVM version and image type
439*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
440*4882a593Smuzhiyun  *  @invm_ver: version structure for the version read
441*4882a593Smuzhiyun  *
442*4882a593Smuzhiyun  *  Reads iNVM version and image type.
443*4882a593Smuzhiyun  **/
igb_read_invm_version(struct e1000_hw * hw,struct e1000_fw_version * invm_ver)444*4882a593Smuzhiyun s32 igb_read_invm_version(struct e1000_hw *hw,
445*4882a593Smuzhiyun 			  struct e1000_fw_version *invm_ver) {
446*4882a593Smuzhiyun 	u32 *record = NULL;
447*4882a593Smuzhiyun 	u32 *next_record = NULL;
448*4882a593Smuzhiyun 	u32 i = 0;
449*4882a593Smuzhiyun 	u32 invm_dword = 0;
450*4882a593Smuzhiyun 	u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
451*4882a593Smuzhiyun 					     E1000_INVM_RECORD_SIZE_IN_BYTES);
452*4882a593Smuzhiyun 	u32 buffer[E1000_INVM_SIZE];
453*4882a593Smuzhiyun 	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
454*4882a593Smuzhiyun 	u16 version = 0;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	/* Read iNVM memory */
457*4882a593Smuzhiyun 	for (i = 0; i < E1000_INVM_SIZE; i++) {
458*4882a593Smuzhiyun 		invm_dword = rd32(E1000_INVM_DATA_REG(i));
459*4882a593Smuzhiyun 		buffer[i] = invm_dword;
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	/* Read version number */
463*4882a593Smuzhiyun 	for (i = 1; i < invm_blocks; i++) {
464*4882a593Smuzhiyun 		record = &buffer[invm_blocks - i];
465*4882a593Smuzhiyun 		next_record = &buffer[invm_blocks - i + 1];
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 		/* Check if we have first version location used */
468*4882a593Smuzhiyun 		if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
469*4882a593Smuzhiyun 			version = 0;
470*4882a593Smuzhiyun 			status = 0;
471*4882a593Smuzhiyun 			break;
472*4882a593Smuzhiyun 		}
473*4882a593Smuzhiyun 		/* Check if we have second version location used */
474*4882a593Smuzhiyun 		else if ((i == 1) &&
475*4882a593Smuzhiyun 			 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
476*4882a593Smuzhiyun 			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
477*4882a593Smuzhiyun 			status = 0;
478*4882a593Smuzhiyun 			break;
479*4882a593Smuzhiyun 		}
480*4882a593Smuzhiyun 		/* Check if we have odd version location
481*4882a593Smuzhiyun 		 * used and it is the last one used
482*4882a593Smuzhiyun 		 */
483*4882a593Smuzhiyun 		else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
484*4882a593Smuzhiyun 			 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
485*4882a593Smuzhiyun 			 (i != 1))) {
486*4882a593Smuzhiyun 			version = (*next_record & E1000_INVM_VER_FIELD_TWO)
487*4882a593Smuzhiyun 				  >> 13;
488*4882a593Smuzhiyun 			status = 0;
489*4882a593Smuzhiyun 			break;
490*4882a593Smuzhiyun 		}
491*4882a593Smuzhiyun 		/* Check if we have even version location
492*4882a593Smuzhiyun 		 * used and it is the last one used
493*4882a593Smuzhiyun 		 */
494*4882a593Smuzhiyun 		else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
495*4882a593Smuzhiyun 			 ((*record & 0x3) == 0)) {
496*4882a593Smuzhiyun 			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
497*4882a593Smuzhiyun 			status = 0;
498*4882a593Smuzhiyun 			break;
499*4882a593Smuzhiyun 		}
500*4882a593Smuzhiyun 	}
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	if (!status) {
503*4882a593Smuzhiyun 		invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
504*4882a593Smuzhiyun 					>> E1000_INVM_MAJOR_SHIFT;
505*4882a593Smuzhiyun 		invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
506*4882a593Smuzhiyun 	}
507*4882a593Smuzhiyun 	/* Read Image Type */
508*4882a593Smuzhiyun 	for (i = 1; i < invm_blocks; i++) {
509*4882a593Smuzhiyun 		record = &buffer[invm_blocks - i];
510*4882a593Smuzhiyun 		next_record = &buffer[invm_blocks - i + 1];
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 		/* Check if we have image type in first location used */
513*4882a593Smuzhiyun 		if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
514*4882a593Smuzhiyun 			invm_ver->invm_img_type = 0;
515*4882a593Smuzhiyun 			status = 0;
516*4882a593Smuzhiyun 			break;
517*4882a593Smuzhiyun 		}
518*4882a593Smuzhiyun 		/* Check if we have image type in first location used */
519*4882a593Smuzhiyun 		else if ((((*record & 0x3) == 0) &&
520*4882a593Smuzhiyun 			 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
521*4882a593Smuzhiyun 			 ((((*record & 0x3) != 0) && (i != 1)))) {
522*4882a593Smuzhiyun 			invm_ver->invm_img_type =
523*4882a593Smuzhiyun 				(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
524*4882a593Smuzhiyun 			status = 0;
525*4882a593Smuzhiyun 			break;
526*4882a593Smuzhiyun 		}
527*4882a593Smuzhiyun 	}
528*4882a593Smuzhiyun 	return status;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun /**
532*4882a593Smuzhiyun  *  igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
533*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
534*4882a593Smuzhiyun  *
535*4882a593Smuzhiyun  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
536*4882a593Smuzhiyun  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
537*4882a593Smuzhiyun  **/
igb_validate_nvm_checksum_i210(struct e1000_hw * hw)538*4882a593Smuzhiyun static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	s32 status = 0;
541*4882a593Smuzhiyun 	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	if (!(hw->nvm.ops.acquire(hw))) {
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 		/* Replace the read function with semaphore grabbing with
546*4882a593Smuzhiyun 		 * the one that skips this for a while.
547*4882a593Smuzhiyun 		 * We have semaphore taken already here.
548*4882a593Smuzhiyun 		 */
549*4882a593Smuzhiyun 		read_op_ptr = hw->nvm.ops.read;
550*4882a593Smuzhiyun 		hw->nvm.ops.read = igb_read_nvm_eerd;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 		status = igb_validate_nvm_checksum(hw);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 		/* Revert original read operation. */
555*4882a593Smuzhiyun 		hw->nvm.ops.read = read_op_ptr;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		hw->nvm.ops.release(hw);
558*4882a593Smuzhiyun 	} else {
559*4882a593Smuzhiyun 		status = E1000_ERR_SWFW_SYNC;
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	return status;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun /**
566*4882a593Smuzhiyun  *  igb_update_nvm_checksum_i210 - Update EEPROM checksum
567*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
568*4882a593Smuzhiyun  *
569*4882a593Smuzhiyun  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
570*4882a593Smuzhiyun  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
571*4882a593Smuzhiyun  *  value to the EEPROM. Next commit EEPROM data onto the Flash.
572*4882a593Smuzhiyun  **/
igb_update_nvm_checksum_i210(struct e1000_hw * hw)573*4882a593Smuzhiyun static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	s32 ret_val = 0;
576*4882a593Smuzhiyun 	u16 checksum = 0;
577*4882a593Smuzhiyun 	u16 i, nvm_data;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	/* Read the first word from the EEPROM. If this times out or fails, do
580*4882a593Smuzhiyun 	 * not continue or we could be in for a very long wait while every
581*4882a593Smuzhiyun 	 * EEPROM read fails
582*4882a593Smuzhiyun 	 */
583*4882a593Smuzhiyun 	ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
584*4882a593Smuzhiyun 	if (ret_val) {
585*4882a593Smuzhiyun 		hw_dbg("EEPROM read failed\n");
586*4882a593Smuzhiyun 		goto out;
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	if (!(hw->nvm.ops.acquire(hw))) {
590*4882a593Smuzhiyun 		/* Do not use hw->nvm.ops.write, hw->nvm.ops.read
591*4882a593Smuzhiyun 		 * because we do not want to take the synchronization
592*4882a593Smuzhiyun 		 * semaphores twice here.
593*4882a593Smuzhiyun 		 */
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
596*4882a593Smuzhiyun 			ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
597*4882a593Smuzhiyun 			if (ret_val) {
598*4882a593Smuzhiyun 				hw->nvm.ops.release(hw);
599*4882a593Smuzhiyun 				hw_dbg("NVM Read Error while updating checksum.\n");
600*4882a593Smuzhiyun 				goto out;
601*4882a593Smuzhiyun 			}
602*4882a593Smuzhiyun 			checksum += nvm_data;
603*4882a593Smuzhiyun 		}
604*4882a593Smuzhiyun 		checksum = (u16) NVM_SUM - checksum;
605*4882a593Smuzhiyun 		ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
606*4882a593Smuzhiyun 						&checksum);
607*4882a593Smuzhiyun 		if (ret_val) {
608*4882a593Smuzhiyun 			hw->nvm.ops.release(hw);
609*4882a593Smuzhiyun 			hw_dbg("NVM Write Error while updating checksum.\n");
610*4882a593Smuzhiyun 			goto out;
611*4882a593Smuzhiyun 		}
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 		hw->nvm.ops.release(hw);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 		ret_val = igb_update_flash_i210(hw);
616*4882a593Smuzhiyun 	} else {
617*4882a593Smuzhiyun 		ret_val = -E1000_ERR_SWFW_SYNC;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun out:
620*4882a593Smuzhiyun 	return ret_val;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun /**
624*4882a593Smuzhiyun  *  igb_pool_flash_update_done_i210 - Pool FLUDONE status.
625*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
626*4882a593Smuzhiyun  *
627*4882a593Smuzhiyun  **/
igb_pool_flash_update_done_i210(struct e1000_hw * hw)628*4882a593Smuzhiyun static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	s32 ret_val = -E1000_ERR_NVM;
631*4882a593Smuzhiyun 	u32 i, reg;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
634*4882a593Smuzhiyun 		reg = rd32(E1000_EECD);
635*4882a593Smuzhiyun 		if (reg & E1000_EECD_FLUDONE_I210) {
636*4882a593Smuzhiyun 			ret_val = 0;
637*4882a593Smuzhiyun 			break;
638*4882a593Smuzhiyun 		}
639*4882a593Smuzhiyun 		udelay(5);
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	return ret_val;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun /**
646*4882a593Smuzhiyun  *  igb_get_flash_presence_i210 - Check if flash device is detected.
647*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
648*4882a593Smuzhiyun  *
649*4882a593Smuzhiyun  **/
igb_get_flash_presence_i210(struct e1000_hw * hw)650*4882a593Smuzhiyun bool igb_get_flash_presence_i210(struct e1000_hw *hw)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun 	u32 eec = 0;
653*4882a593Smuzhiyun 	bool ret_val = false;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	eec = rd32(E1000_EECD);
656*4882a593Smuzhiyun 	if (eec & E1000_EECD_FLASH_DETECTED_I210)
657*4882a593Smuzhiyun 		ret_val = true;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	return ret_val;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun /**
663*4882a593Smuzhiyun  *  igb_update_flash_i210 - Commit EEPROM to the flash
664*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
665*4882a593Smuzhiyun  *
666*4882a593Smuzhiyun  **/
igb_update_flash_i210(struct e1000_hw * hw)667*4882a593Smuzhiyun static s32 igb_update_flash_i210(struct e1000_hw *hw)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	s32 ret_val = 0;
670*4882a593Smuzhiyun 	u32 flup;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	ret_val = igb_pool_flash_update_done_i210(hw);
673*4882a593Smuzhiyun 	if (ret_val == -E1000_ERR_NVM) {
674*4882a593Smuzhiyun 		hw_dbg("Flash update time out\n");
675*4882a593Smuzhiyun 		goto out;
676*4882a593Smuzhiyun 	}
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
679*4882a593Smuzhiyun 	wr32(E1000_EECD, flup);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	ret_val = igb_pool_flash_update_done_i210(hw);
682*4882a593Smuzhiyun 	if (ret_val)
683*4882a593Smuzhiyun 		hw_dbg("Flash update time out\n");
684*4882a593Smuzhiyun 	else
685*4882a593Smuzhiyun 		hw_dbg("Flash update complete\n");
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun out:
688*4882a593Smuzhiyun 	return ret_val;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun /**
692*4882a593Smuzhiyun  *  igb_valid_led_default_i210 - Verify a valid default LED config
693*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
694*4882a593Smuzhiyun  *  @data: pointer to the NVM (EEPROM)
695*4882a593Smuzhiyun  *
696*4882a593Smuzhiyun  *  Read the EEPROM for the current default LED configuration.  If the
697*4882a593Smuzhiyun  *  LED configuration is not valid, set to a valid LED configuration.
698*4882a593Smuzhiyun  **/
igb_valid_led_default_i210(struct e1000_hw * hw,u16 * data)699*4882a593Smuzhiyun s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun 	s32 ret_val;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
704*4882a593Smuzhiyun 	if (ret_val) {
705*4882a593Smuzhiyun 		hw_dbg("NVM Read Error\n");
706*4882a593Smuzhiyun 		goto out;
707*4882a593Smuzhiyun 	}
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
710*4882a593Smuzhiyun 		switch (hw->phy.media_type) {
711*4882a593Smuzhiyun 		case e1000_media_type_internal_serdes:
712*4882a593Smuzhiyun 			*data = ID_LED_DEFAULT_I210_SERDES;
713*4882a593Smuzhiyun 			break;
714*4882a593Smuzhiyun 		case e1000_media_type_copper:
715*4882a593Smuzhiyun 		default:
716*4882a593Smuzhiyun 			*data = ID_LED_DEFAULT_I210;
717*4882a593Smuzhiyun 			break;
718*4882a593Smuzhiyun 		}
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun out:
721*4882a593Smuzhiyun 	return ret_val;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun /**
725*4882a593Smuzhiyun  *  __igb_access_xmdio_reg - Read/write XMDIO register
726*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
727*4882a593Smuzhiyun  *  @address: XMDIO address to program
728*4882a593Smuzhiyun  *  @dev_addr: device address to program
729*4882a593Smuzhiyun  *  @data: pointer to value to read/write from/to the XMDIO address
730*4882a593Smuzhiyun  *  @read: boolean flag to indicate read or write
731*4882a593Smuzhiyun  **/
__igb_access_xmdio_reg(struct e1000_hw * hw,u16 address,u8 dev_addr,u16 * data,bool read)732*4882a593Smuzhiyun static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
733*4882a593Smuzhiyun 				  u8 dev_addr, u16 *data, bool read)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun 	s32 ret_val = 0;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
738*4882a593Smuzhiyun 	if (ret_val)
739*4882a593Smuzhiyun 		return ret_val;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
742*4882a593Smuzhiyun 	if (ret_val)
743*4882a593Smuzhiyun 		return ret_val;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
746*4882a593Smuzhiyun 							 dev_addr);
747*4882a593Smuzhiyun 	if (ret_val)
748*4882a593Smuzhiyun 		return ret_val;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	if (read)
751*4882a593Smuzhiyun 		ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
752*4882a593Smuzhiyun 	else
753*4882a593Smuzhiyun 		ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
754*4882a593Smuzhiyun 	if (ret_val)
755*4882a593Smuzhiyun 		return ret_val;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	/* Recalibrate the device back to 0 */
758*4882a593Smuzhiyun 	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
759*4882a593Smuzhiyun 	if (ret_val)
760*4882a593Smuzhiyun 		return ret_val;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	return ret_val;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun /**
766*4882a593Smuzhiyun  *  igb_read_xmdio_reg - Read XMDIO register
767*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
768*4882a593Smuzhiyun  *  @addr: XMDIO address to program
769*4882a593Smuzhiyun  *  @dev_addr: device address to program
770*4882a593Smuzhiyun  *  @data: value to be read from the EMI address
771*4882a593Smuzhiyun  **/
igb_read_xmdio_reg(struct e1000_hw * hw,u16 addr,u8 dev_addr,u16 * data)772*4882a593Smuzhiyun s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun 	return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun /**
778*4882a593Smuzhiyun  *  igb_write_xmdio_reg - Write XMDIO register
779*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
780*4882a593Smuzhiyun  *  @addr: XMDIO address to program
781*4882a593Smuzhiyun  *  @dev_addr: device address to program
782*4882a593Smuzhiyun  *  @data: value to be written to the XMDIO address
783*4882a593Smuzhiyun  **/
igb_write_xmdio_reg(struct e1000_hw * hw,u16 addr,u8 dev_addr,u16 data)784*4882a593Smuzhiyun s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun 	return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun /**
790*4882a593Smuzhiyun  *  igb_init_nvm_params_i210 - Init NVM func ptrs.
791*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
792*4882a593Smuzhiyun  **/
igb_init_nvm_params_i210(struct e1000_hw * hw)793*4882a593Smuzhiyun s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	s32 ret_val = 0;
796*4882a593Smuzhiyun 	struct e1000_nvm_info *nvm = &hw->nvm;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	nvm->ops.acquire = igb_acquire_nvm_i210;
799*4882a593Smuzhiyun 	nvm->ops.release = igb_release_nvm_i210;
800*4882a593Smuzhiyun 	nvm->ops.valid_led_default = igb_valid_led_default_i210;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	/* NVM Function Pointers */
803*4882a593Smuzhiyun 	if (igb_get_flash_presence_i210(hw)) {
804*4882a593Smuzhiyun 		hw->nvm.type = e1000_nvm_flash_hw;
805*4882a593Smuzhiyun 		nvm->ops.read    = igb_read_nvm_srrd_i210;
806*4882a593Smuzhiyun 		nvm->ops.write   = igb_write_nvm_srwr_i210;
807*4882a593Smuzhiyun 		nvm->ops.validate = igb_validate_nvm_checksum_i210;
808*4882a593Smuzhiyun 		nvm->ops.update   = igb_update_nvm_checksum_i210;
809*4882a593Smuzhiyun 	} else {
810*4882a593Smuzhiyun 		hw->nvm.type = e1000_nvm_invm;
811*4882a593Smuzhiyun 		nvm->ops.read     = igb_read_invm_i210;
812*4882a593Smuzhiyun 		nvm->ops.write    = NULL;
813*4882a593Smuzhiyun 		nvm->ops.validate = NULL;
814*4882a593Smuzhiyun 		nvm->ops.update   = NULL;
815*4882a593Smuzhiyun 	}
816*4882a593Smuzhiyun 	return ret_val;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun /**
820*4882a593Smuzhiyun  * igb_pll_workaround_i210
821*4882a593Smuzhiyun  * @hw: pointer to the HW structure
822*4882a593Smuzhiyun  *
823*4882a593Smuzhiyun  * Works around an errata in the PLL circuit where it occasionally
824*4882a593Smuzhiyun  * provides the wrong clock frequency after power up.
825*4882a593Smuzhiyun  **/
igb_pll_workaround_i210(struct e1000_hw * hw)826*4882a593Smuzhiyun s32 igb_pll_workaround_i210(struct e1000_hw *hw)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun 	s32 ret_val;
829*4882a593Smuzhiyun 	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
830*4882a593Smuzhiyun 	u16 nvm_word, phy_word, pci_word, tmp_nvm;
831*4882a593Smuzhiyun 	int i;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	/* Get and set needed register values */
834*4882a593Smuzhiyun 	wuc = rd32(E1000_WUC);
835*4882a593Smuzhiyun 	mdicnfg = rd32(E1000_MDICNFG);
836*4882a593Smuzhiyun 	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
837*4882a593Smuzhiyun 	wr32(E1000_MDICNFG, reg_val);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	/* Get data from NVM, or set default */
840*4882a593Smuzhiyun 	ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
841*4882a593Smuzhiyun 					  &nvm_word);
842*4882a593Smuzhiyun 	if (ret_val)
843*4882a593Smuzhiyun 		nvm_word = E1000_INVM_DEFAULT_AL;
844*4882a593Smuzhiyun 	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
845*4882a593Smuzhiyun 	igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
846*4882a593Smuzhiyun 	phy_word = E1000_PHY_PLL_UNCONF;
847*4882a593Smuzhiyun 	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
848*4882a593Smuzhiyun 		/* check current state directly from internal PHY */
849*4882a593Smuzhiyun 		igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
850*4882a593Smuzhiyun 		if ((phy_word & E1000_PHY_PLL_UNCONF)
851*4882a593Smuzhiyun 		    != E1000_PHY_PLL_UNCONF) {
852*4882a593Smuzhiyun 			ret_val = 0;
853*4882a593Smuzhiyun 			break;
854*4882a593Smuzhiyun 		} else {
855*4882a593Smuzhiyun 			ret_val = -E1000_ERR_PHY;
856*4882a593Smuzhiyun 		}
857*4882a593Smuzhiyun 		/* directly reset the internal PHY */
858*4882a593Smuzhiyun 		ctrl = rd32(E1000_CTRL);
859*4882a593Smuzhiyun 		wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 		ctrl_ext = rd32(E1000_CTRL_EXT);
862*4882a593Smuzhiyun 		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
863*4882a593Smuzhiyun 		wr32(E1000_CTRL_EXT, ctrl_ext);
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 		wr32(E1000_WUC, 0);
866*4882a593Smuzhiyun 		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
867*4882a593Smuzhiyun 		wr32(E1000_EEARBC_I210, reg_val);
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 		igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
870*4882a593Smuzhiyun 		pci_word |= E1000_PCI_PMCSR_D3;
871*4882a593Smuzhiyun 		igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
872*4882a593Smuzhiyun 		usleep_range(1000, 2000);
873*4882a593Smuzhiyun 		pci_word &= ~E1000_PCI_PMCSR_D3;
874*4882a593Smuzhiyun 		igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
875*4882a593Smuzhiyun 		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
876*4882a593Smuzhiyun 		wr32(E1000_EEARBC_I210, reg_val);
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 		/* restore WUC register */
879*4882a593Smuzhiyun 		wr32(E1000_WUC, wuc);
880*4882a593Smuzhiyun 	}
881*4882a593Smuzhiyun 	igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
882*4882a593Smuzhiyun 	/* restore MDICNFG setting */
883*4882a593Smuzhiyun 	wr32(E1000_MDICNFG, mdicnfg);
884*4882a593Smuzhiyun 	return ret_val;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun /**
888*4882a593Smuzhiyun  *  igb_get_cfg_done_i210 - Read config done bit
889*4882a593Smuzhiyun  *  @hw: pointer to the HW structure
890*4882a593Smuzhiyun  *
891*4882a593Smuzhiyun  *  Read the management control register for the config done bit for
892*4882a593Smuzhiyun  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
893*4882a593Smuzhiyun  *  to read the config done bit, so an error is *ONLY* logged and returns
894*4882a593Smuzhiyun  *  0.  If we were to return with error, EEPROM-less silicon
895*4882a593Smuzhiyun  *  would not be able to be reset or change link.
896*4882a593Smuzhiyun  **/
igb_get_cfg_done_i210(struct e1000_hw * hw)897*4882a593Smuzhiyun s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun 	s32 timeout = PHY_CFG_TIMEOUT;
900*4882a593Smuzhiyun 	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	while (timeout) {
903*4882a593Smuzhiyun 		if (rd32(E1000_EEMNGCTL_I210) & mask)
904*4882a593Smuzhiyun 			break;
905*4882a593Smuzhiyun 		usleep_range(1000, 2000);
906*4882a593Smuzhiyun 		timeout--;
907*4882a593Smuzhiyun 	}
908*4882a593Smuzhiyun 	if (!timeout)
909*4882a593Smuzhiyun 		hw_dbg("MNG configuration cycle has not completed.\n");
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	return 0;
912*4882a593Smuzhiyun }
913