1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2018 Intel Corporation */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/delay.h>
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include "igc_hw.h"
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /**
9*4882a593Smuzhiyun * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
10*4882a593Smuzhiyun * @hw: pointer to the HW structure
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Acquire the necessary semaphores for exclusive access to the EEPROM.
13*4882a593Smuzhiyun * Set the EEPROM access request bit and wait for EEPROM access grant bit.
14*4882a593Smuzhiyun * Return successful if access grant bit set, else clear the request for
15*4882a593Smuzhiyun * EEPROM access and return -IGC_ERR_NVM (-1).
16*4882a593Smuzhiyun */
igc_acquire_nvm_i225(struct igc_hw * hw)17*4882a593Smuzhiyun static s32 igc_acquire_nvm_i225(struct igc_hw *hw)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun return igc_acquire_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /**
23*4882a593Smuzhiyun * igc_release_nvm_i225 - Release exclusive access to EEPROM
24*4882a593Smuzhiyun * @hw: pointer to the HW structure
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * Stop any current commands to the EEPROM and clear the EEPROM request bit,
27*4882a593Smuzhiyun * then release the semaphores acquired.
28*4882a593Smuzhiyun */
igc_release_nvm_i225(struct igc_hw * hw)29*4882a593Smuzhiyun static void igc_release_nvm_i225(struct igc_hw *hw)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun igc_release_swfw_sync_i225(hw, IGC_SWFW_EEP_SM);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /**
35*4882a593Smuzhiyun * igc_get_hw_semaphore_i225 - Acquire hardware semaphore
36*4882a593Smuzhiyun * @hw: pointer to the HW structure
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Acquire the HW semaphore to access the PHY or NVM
39*4882a593Smuzhiyun */
igc_get_hw_semaphore_i225(struct igc_hw * hw)40*4882a593Smuzhiyun static s32 igc_get_hw_semaphore_i225(struct igc_hw *hw)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun s32 timeout = hw->nvm.word_size + 1;
43*4882a593Smuzhiyun s32 i = 0;
44*4882a593Smuzhiyun u32 swsm;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* Get the SW semaphore */
47*4882a593Smuzhiyun while (i < timeout) {
48*4882a593Smuzhiyun swsm = rd32(IGC_SWSM);
49*4882a593Smuzhiyun if (!(swsm & IGC_SWSM_SMBI))
50*4882a593Smuzhiyun break;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun usleep_range(500, 600);
53*4882a593Smuzhiyun i++;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if (i == timeout) {
57*4882a593Smuzhiyun /* In rare circumstances, the SW semaphore may already be held
58*4882a593Smuzhiyun * unintentionally. Clear the semaphore once before giving up.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun if (hw->dev_spec._base.clear_semaphore_once) {
61*4882a593Smuzhiyun hw->dev_spec._base.clear_semaphore_once = false;
62*4882a593Smuzhiyun igc_put_hw_semaphore(hw);
63*4882a593Smuzhiyun for (i = 0; i < timeout; i++) {
64*4882a593Smuzhiyun swsm = rd32(IGC_SWSM);
65*4882a593Smuzhiyun if (!(swsm & IGC_SWSM_SMBI))
66*4882a593Smuzhiyun break;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun usleep_range(500, 600);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* If we do not have the semaphore here, we have to give up. */
73*4882a593Smuzhiyun if (i == timeout) {
74*4882a593Smuzhiyun hw_dbg("Driver can't access device - SMBI bit is set.\n");
75*4882a593Smuzhiyun return -IGC_ERR_NVM;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* Get the FW semaphore. */
80*4882a593Smuzhiyun for (i = 0; i < timeout; i++) {
81*4882a593Smuzhiyun swsm = rd32(IGC_SWSM);
82*4882a593Smuzhiyun wr32(IGC_SWSM, swsm | IGC_SWSM_SWESMBI);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* Semaphore acquired if bit latched */
85*4882a593Smuzhiyun if (rd32(IGC_SWSM) & IGC_SWSM_SWESMBI)
86*4882a593Smuzhiyun break;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun usleep_range(500, 600);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (i == timeout) {
92*4882a593Smuzhiyun /* Release semaphores */
93*4882a593Smuzhiyun igc_put_hw_semaphore(hw);
94*4882a593Smuzhiyun hw_dbg("Driver can't access the NVM\n");
95*4882a593Smuzhiyun return -IGC_ERR_NVM;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun return 0;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /**
102*4882a593Smuzhiyun * igc_acquire_swfw_sync_i225 - Acquire SW/FW semaphore
103*4882a593Smuzhiyun * @hw: pointer to the HW structure
104*4882a593Smuzhiyun * @mask: specifies which semaphore to acquire
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
107*4882a593Smuzhiyun * will also specify which port we're acquiring the lock for.
108*4882a593Smuzhiyun */
igc_acquire_swfw_sync_i225(struct igc_hw * hw,u16 mask)109*4882a593Smuzhiyun s32 igc_acquire_swfw_sync_i225(struct igc_hw *hw, u16 mask)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun s32 i = 0, timeout = 200;
112*4882a593Smuzhiyun u32 fwmask = mask << 16;
113*4882a593Smuzhiyun u32 swmask = mask;
114*4882a593Smuzhiyun s32 ret_val = 0;
115*4882a593Smuzhiyun u32 swfw_sync;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun while (i < timeout) {
118*4882a593Smuzhiyun if (igc_get_hw_semaphore_i225(hw)) {
119*4882a593Smuzhiyun ret_val = -IGC_ERR_SWFW_SYNC;
120*4882a593Smuzhiyun goto out;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun swfw_sync = rd32(IGC_SW_FW_SYNC);
124*4882a593Smuzhiyun if (!(swfw_sync & (fwmask | swmask)))
125*4882a593Smuzhiyun break;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* Firmware currently using resource (fwmask) */
128*4882a593Smuzhiyun igc_put_hw_semaphore(hw);
129*4882a593Smuzhiyun mdelay(5);
130*4882a593Smuzhiyun i++;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (i == timeout) {
134*4882a593Smuzhiyun hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
135*4882a593Smuzhiyun ret_val = -IGC_ERR_SWFW_SYNC;
136*4882a593Smuzhiyun goto out;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun swfw_sync |= swmask;
140*4882a593Smuzhiyun wr32(IGC_SW_FW_SYNC, swfw_sync);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun igc_put_hw_semaphore(hw);
143*4882a593Smuzhiyun out:
144*4882a593Smuzhiyun return ret_val;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /**
148*4882a593Smuzhiyun * igc_release_swfw_sync_i225 - Release SW/FW semaphore
149*4882a593Smuzhiyun * @hw: pointer to the HW structure
150*4882a593Smuzhiyun * @mask: specifies which semaphore to acquire
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * Release the SW/FW semaphore used to access the PHY or NVM. The mask
153*4882a593Smuzhiyun * will also specify which port we're releasing the lock for.
154*4882a593Smuzhiyun */
igc_release_swfw_sync_i225(struct igc_hw * hw,u16 mask)155*4882a593Smuzhiyun void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun u32 swfw_sync;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* Releasing the resource requires first getting the HW semaphore.
160*4882a593Smuzhiyun * If we fail to get the semaphore, there is nothing we can do,
161*4882a593Smuzhiyun * except log an error and quit. We are not allowed to hang here
162*4882a593Smuzhiyun * indefinitely, as it may cause denial of service or system crash.
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun if (igc_get_hw_semaphore_i225(hw)) {
165*4882a593Smuzhiyun hw_dbg("Failed to release SW_FW_SYNC.\n");
166*4882a593Smuzhiyun return;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun swfw_sync = rd32(IGC_SW_FW_SYNC);
170*4882a593Smuzhiyun swfw_sync &= ~mask;
171*4882a593Smuzhiyun wr32(IGC_SW_FW_SYNC, swfw_sync);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun igc_put_hw_semaphore(hw);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /**
177*4882a593Smuzhiyun * igc_read_nvm_srrd_i225 - Reads Shadow Ram using EERD register
178*4882a593Smuzhiyun * @hw: pointer to the HW structure
179*4882a593Smuzhiyun * @offset: offset of word in the Shadow Ram to read
180*4882a593Smuzhiyun * @words: number of words to read
181*4882a593Smuzhiyun * @data: word read from the Shadow Ram
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * Reads a 16 bit word from the Shadow Ram using the EERD register.
184*4882a593Smuzhiyun * Uses necessary synchronization semaphores.
185*4882a593Smuzhiyun */
igc_read_nvm_srrd_i225(struct igc_hw * hw,u16 offset,u16 words,u16 * data)186*4882a593Smuzhiyun static s32 igc_read_nvm_srrd_i225(struct igc_hw *hw, u16 offset, u16 words,
187*4882a593Smuzhiyun u16 *data)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun s32 status = 0;
190*4882a593Smuzhiyun u16 i, count;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /* We cannot hold synchronization semaphores for too long,
193*4882a593Smuzhiyun * because of forceful takeover procedure. However it is more efficient
194*4882a593Smuzhiyun * to read in bursts than synchronizing access for each word.
195*4882a593Smuzhiyun */
196*4882a593Smuzhiyun for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
197*4882a593Smuzhiyun count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
198*4882a593Smuzhiyun IGC_EERD_EEWR_MAX_COUNT : (words - i);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun status = hw->nvm.ops.acquire(hw);
201*4882a593Smuzhiyun if (status)
202*4882a593Smuzhiyun break;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun status = igc_read_nvm_eerd(hw, offset, count, data + i);
205*4882a593Smuzhiyun hw->nvm.ops.release(hw);
206*4882a593Smuzhiyun if (status)
207*4882a593Smuzhiyun break;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun return status;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /**
214*4882a593Smuzhiyun * igc_write_nvm_srwr - Write to Shadow Ram using EEWR
215*4882a593Smuzhiyun * @hw: pointer to the HW structure
216*4882a593Smuzhiyun * @offset: offset within the Shadow Ram to be written to
217*4882a593Smuzhiyun * @words: number of words to write
218*4882a593Smuzhiyun * @data: 16 bit word(s) to be written to the Shadow Ram
219*4882a593Smuzhiyun *
220*4882a593Smuzhiyun * Writes data to Shadow Ram at offset using EEWR register.
221*4882a593Smuzhiyun *
222*4882a593Smuzhiyun * If igc_update_nvm_checksum is not called after this function , the
223*4882a593Smuzhiyun * Shadow Ram will most likely contain an invalid checksum.
224*4882a593Smuzhiyun */
igc_write_nvm_srwr(struct igc_hw * hw,u16 offset,u16 words,u16 * data)225*4882a593Smuzhiyun static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
226*4882a593Smuzhiyun u16 *data)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct igc_nvm_info *nvm = &hw->nvm;
229*4882a593Smuzhiyun s32 ret_val = -IGC_ERR_NVM;
230*4882a593Smuzhiyun u32 attempts = 100000;
231*4882a593Smuzhiyun u32 i, k, eewr = 0;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* A check for invalid values: offset too large, too many words,
234*4882a593Smuzhiyun * too many words for the offset, and not enough words.
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
237*4882a593Smuzhiyun words == 0) {
238*4882a593Smuzhiyun hw_dbg("nvm parameter(s) out of bounds\n");
239*4882a593Smuzhiyun goto out;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun for (i = 0; i < words; i++) {
243*4882a593Smuzhiyun eewr = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) |
244*4882a593Smuzhiyun (data[i] << IGC_NVM_RW_REG_DATA) |
245*4882a593Smuzhiyun IGC_NVM_RW_REG_START;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun wr32(IGC_SRWR, eewr);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun for (k = 0; k < attempts; k++) {
250*4882a593Smuzhiyun if (IGC_NVM_RW_REG_DONE &
251*4882a593Smuzhiyun rd32(IGC_SRWR)) {
252*4882a593Smuzhiyun ret_val = 0;
253*4882a593Smuzhiyun break;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun udelay(5);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (ret_val) {
259*4882a593Smuzhiyun hw_dbg("Shadow RAM write EEWR timed out\n");
260*4882a593Smuzhiyun break;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun out:
265*4882a593Smuzhiyun return ret_val;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /**
269*4882a593Smuzhiyun * igc_write_nvm_srwr_i225 - Write to Shadow RAM using EEWR
270*4882a593Smuzhiyun * @hw: pointer to the HW structure
271*4882a593Smuzhiyun * @offset: offset within the Shadow RAM to be written to
272*4882a593Smuzhiyun * @words: number of words to write
273*4882a593Smuzhiyun * @data: 16 bit word(s) to be written to the Shadow RAM
274*4882a593Smuzhiyun *
275*4882a593Smuzhiyun * Writes data to Shadow RAM at offset using EEWR register.
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * If igc_update_nvm_checksum is not called after this function , the
278*4882a593Smuzhiyun * data will not be committed to FLASH and also Shadow RAM will most likely
279*4882a593Smuzhiyun * contain an invalid checksum.
280*4882a593Smuzhiyun *
281*4882a593Smuzhiyun * If error code is returned, data and Shadow RAM may be inconsistent - buffer
282*4882a593Smuzhiyun * partially written.
283*4882a593Smuzhiyun */
igc_write_nvm_srwr_i225(struct igc_hw * hw,u16 offset,u16 words,u16 * data)284*4882a593Smuzhiyun static s32 igc_write_nvm_srwr_i225(struct igc_hw *hw, u16 offset, u16 words,
285*4882a593Smuzhiyun u16 *data)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun s32 status = 0;
288*4882a593Smuzhiyun u16 i, count;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* We cannot hold synchronization semaphores for too long,
291*4882a593Smuzhiyun * because of forceful takeover procedure. However it is more efficient
292*4882a593Smuzhiyun * to write in bursts than synchronizing access for each word.
293*4882a593Smuzhiyun */
294*4882a593Smuzhiyun for (i = 0; i < words; i += IGC_EERD_EEWR_MAX_COUNT) {
295*4882a593Smuzhiyun count = (words - i) / IGC_EERD_EEWR_MAX_COUNT > 0 ?
296*4882a593Smuzhiyun IGC_EERD_EEWR_MAX_COUNT : (words - i);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun status = hw->nvm.ops.acquire(hw);
299*4882a593Smuzhiyun if (status)
300*4882a593Smuzhiyun break;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun status = igc_write_nvm_srwr(hw, offset, count, data + i);
303*4882a593Smuzhiyun hw->nvm.ops.release(hw);
304*4882a593Smuzhiyun if (status)
305*4882a593Smuzhiyun break;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun return status;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /**
312*4882a593Smuzhiyun * igc_validate_nvm_checksum_i225 - Validate EEPROM checksum
313*4882a593Smuzhiyun * @hw: pointer to the HW structure
314*4882a593Smuzhiyun *
315*4882a593Smuzhiyun * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
316*4882a593Smuzhiyun * and then verifies that the sum of the EEPROM is equal to 0xBABA.
317*4882a593Smuzhiyun */
igc_validate_nvm_checksum_i225(struct igc_hw * hw)318*4882a593Smuzhiyun static s32 igc_validate_nvm_checksum_i225(struct igc_hw *hw)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun s32 (*read_op_ptr)(struct igc_hw *hw, u16 offset, u16 count,
321*4882a593Smuzhiyun u16 *data);
322*4882a593Smuzhiyun s32 status = 0;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun status = hw->nvm.ops.acquire(hw);
325*4882a593Smuzhiyun if (status)
326*4882a593Smuzhiyun goto out;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* Replace the read function with semaphore grabbing with
329*4882a593Smuzhiyun * the one that skips this for a while.
330*4882a593Smuzhiyun * We have semaphore taken already here.
331*4882a593Smuzhiyun */
332*4882a593Smuzhiyun read_op_ptr = hw->nvm.ops.read;
333*4882a593Smuzhiyun hw->nvm.ops.read = igc_read_nvm_eerd;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun status = igc_validate_nvm_checksum(hw);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* Revert original read operation. */
338*4882a593Smuzhiyun hw->nvm.ops.read = read_op_ptr;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun hw->nvm.ops.release(hw);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun out:
343*4882a593Smuzhiyun return status;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /**
347*4882a593Smuzhiyun * igc_pool_flash_update_done_i225 - Pool FLUDONE status
348*4882a593Smuzhiyun * @hw: pointer to the HW structure
349*4882a593Smuzhiyun */
igc_pool_flash_update_done_i225(struct igc_hw * hw)350*4882a593Smuzhiyun static s32 igc_pool_flash_update_done_i225(struct igc_hw *hw)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun s32 ret_val = -IGC_ERR_NVM;
353*4882a593Smuzhiyun u32 i, reg;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun for (i = 0; i < IGC_FLUDONE_ATTEMPTS; i++) {
356*4882a593Smuzhiyun reg = rd32(IGC_EECD);
357*4882a593Smuzhiyun if (reg & IGC_EECD_FLUDONE_I225) {
358*4882a593Smuzhiyun ret_val = 0;
359*4882a593Smuzhiyun break;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun udelay(5);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return ret_val;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /**
368*4882a593Smuzhiyun * igc_update_flash_i225 - Commit EEPROM to the flash
369*4882a593Smuzhiyun * @hw: pointer to the HW structure
370*4882a593Smuzhiyun */
igc_update_flash_i225(struct igc_hw * hw)371*4882a593Smuzhiyun static s32 igc_update_flash_i225(struct igc_hw *hw)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun s32 ret_val = 0;
374*4882a593Smuzhiyun u32 flup;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun ret_val = igc_pool_flash_update_done_i225(hw);
377*4882a593Smuzhiyun if (ret_val == -IGC_ERR_NVM) {
378*4882a593Smuzhiyun hw_dbg("Flash update time out\n");
379*4882a593Smuzhiyun goto out;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun flup = rd32(IGC_EECD) | IGC_EECD_FLUPD_I225;
383*4882a593Smuzhiyun wr32(IGC_EECD, flup);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun ret_val = igc_pool_flash_update_done_i225(hw);
386*4882a593Smuzhiyun if (ret_val)
387*4882a593Smuzhiyun hw_dbg("Flash update time out\n");
388*4882a593Smuzhiyun else
389*4882a593Smuzhiyun hw_dbg("Flash update complete\n");
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun out:
392*4882a593Smuzhiyun return ret_val;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /**
396*4882a593Smuzhiyun * igc_update_nvm_checksum_i225 - Update EEPROM checksum
397*4882a593Smuzhiyun * @hw: pointer to the HW structure
398*4882a593Smuzhiyun *
399*4882a593Smuzhiyun * Updates the EEPROM checksum by reading/adding each word of the EEPROM
400*4882a593Smuzhiyun * up to the checksum. Then calculates the EEPROM checksum and writes the
401*4882a593Smuzhiyun * value to the EEPROM. Next commit EEPROM data onto the Flash.
402*4882a593Smuzhiyun */
igc_update_nvm_checksum_i225(struct igc_hw * hw)403*4882a593Smuzhiyun static s32 igc_update_nvm_checksum_i225(struct igc_hw *hw)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun u16 checksum = 0;
406*4882a593Smuzhiyun s32 ret_val = 0;
407*4882a593Smuzhiyun u16 i, nvm_data;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* Read the first word from the EEPROM. If this times out or fails, do
410*4882a593Smuzhiyun * not continue or we could be in for a very long wait while every
411*4882a593Smuzhiyun * EEPROM read fails
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun ret_val = igc_read_nvm_eerd(hw, 0, 1, &nvm_data);
414*4882a593Smuzhiyun if (ret_val) {
415*4882a593Smuzhiyun hw_dbg("EEPROM read failed\n");
416*4882a593Smuzhiyun goto out;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun ret_val = hw->nvm.ops.acquire(hw);
420*4882a593Smuzhiyun if (ret_val)
421*4882a593Smuzhiyun goto out;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
424*4882a593Smuzhiyun * because we do not want to take the synchronization
425*4882a593Smuzhiyun * semaphores twice here.
426*4882a593Smuzhiyun */
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun for (i = 0; i < NVM_CHECKSUM_REG; i++) {
429*4882a593Smuzhiyun ret_val = igc_read_nvm_eerd(hw, i, 1, &nvm_data);
430*4882a593Smuzhiyun if (ret_val) {
431*4882a593Smuzhiyun hw->nvm.ops.release(hw);
432*4882a593Smuzhiyun hw_dbg("NVM Read Error while updating checksum.\n");
433*4882a593Smuzhiyun goto out;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun checksum += nvm_data;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun checksum = (u16)NVM_SUM - checksum;
438*4882a593Smuzhiyun ret_val = igc_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
439*4882a593Smuzhiyun &checksum);
440*4882a593Smuzhiyun if (ret_val) {
441*4882a593Smuzhiyun hw->nvm.ops.release(hw);
442*4882a593Smuzhiyun hw_dbg("NVM Write Error while updating checksum.\n");
443*4882a593Smuzhiyun goto out;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun hw->nvm.ops.release(hw);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun ret_val = igc_update_flash_i225(hw);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun out:
451*4882a593Smuzhiyun return ret_val;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /**
455*4882a593Smuzhiyun * igc_get_flash_presence_i225 - Check if flash device is detected
456*4882a593Smuzhiyun * @hw: pointer to the HW structure
457*4882a593Smuzhiyun */
igc_get_flash_presence_i225(struct igc_hw * hw)458*4882a593Smuzhiyun bool igc_get_flash_presence_i225(struct igc_hw *hw)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun bool ret_val = false;
461*4882a593Smuzhiyun u32 eec = 0;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun eec = rd32(IGC_EECD);
464*4882a593Smuzhiyun if (eec & IGC_EECD_FLASH_DETECTED_I225)
465*4882a593Smuzhiyun ret_val = true;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun return ret_val;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /**
471*4882a593Smuzhiyun * igc_init_nvm_params_i225 - Init NVM func ptrs.
472*4882a593Smuzhiyun * @hw: pointer to the HW structure
473*4882a593Smuzhiyun */
igc_init_nvm_params_i225(struct igc_hw * hw)474*4882a593Smuzhiyun s32 igc_init_nvm_params_i225(struct igc_hw *hw)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun struct igc_nvm_info *nvm = &hw->nvm;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun nvm->ops.acquire = igc_acquire_nvm_i225;
479*4882a593Smuzhiyun nvm->ops.release = igc_release_nvm_i225;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /* NVM Function Pointers */
482*4882a593Smuzhiyun if (igc_get_flash_presence_i225(hw)) {
483*4882a593Smuzhiyun hw->nvm.type = igc_nvm_flash_hw;
484*4882a593Smuzhiyun nvm->ops.read = igc_read_nvm_srrd_i225;
485*4882a593Smuzhiyun nvm->ops.write = igc_write_nvm_srwr_i225;
486*4882a593Smuzhiyun nvm->ops.validate = igc_validate_nvm_checksum_i225;
487*4882a593Smuzhiyun nvm->ops.update = igc_update_nvm_checksum_i225;
488*4882a593Smuzhiyun } else {
489*4882a593Smuzhiyun hw->nvm.type = igc_nvm_invm;
490*4882a593Smuzhiyun nvm->ops.read = igc_read_nvm_eerd;
491*4882a593Smuzhiyun nvm->ops.write = NULL;
492*4882a593Smuzhiyun nvm->ops.validate = NULL;
493*4882a593Smuzhiyun nvm->ops.update = NULL;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun return 0;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /**
499*4882a593Smuzhiyun * igc_set_eee_i225 - Enable/disable EEE support
500*4882a593Smuzhiyun * @hw: pointer to the HW structure
501*4882a593Smuzhiyun * @adv2p5G: boolean flag enabling 2.5G EEE advertisement
502*4882a593Smuzhiyun * @adv1G: boolean flag enabling 1G EEE advertisement
503*4882a593Smuzhiyun * @adv100M: boolean flag enabling 100M EEE advertisement
504*4882a593Smuzhiyun *
505*4882a593Smuzhiyun * Enable/disable EEE based on setting in dev_spec structure.
506*4882a593Smuzhiyun **/
igc_set_eee_i225(struct igc_hw * hw,bool adv2p5G,bool adv1G,bool adv100M)507*4882a593Smuzhiyun s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
508*4882a593Smuzhiyun bool adv100M)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun u32 ipcnfg, eeer;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun ipcnfg = rd32(IGC_IPCNFG);
513*4882a593Smuzhiyun eeer = rd32(IGC_EEER);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /* enable or disable per user setting */
516*4882a593Smuzhiyun if (hw->dev_spec._base.eee_enable) {
517*4882a593Smuzhiyun u32 eee_su = rd32(IGC_EEE_SU);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (adv100M)
520*4882a593Smuzhiyun ipcnfg |= IGC_IPCNFG_EEE_100M_AN;
521*4882a593Smuzhiyun else
522*4882a593Smuzhiyun ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (adv1G)
525*4882a593Smuzhiyun ipcnfg |= IGC_IPCNFG_EEE_1G_AN;
526*4882a593Smuzhiyun else
527*4882a593Smuzhiyun ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun if (adv2p5G)
530*4882a593Smuzhiyun ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN;
531*4882a593Smuzhiyun else
532*4882a593Smuzhiyun ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
535*4882a593Smuzhiyun IGC_EEER_LPI_FC);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* This bit should not be set in normal operation. */
538*4882a593Smuzhiyun if (eee_su & IGC_EEE_SU_LPI_CLK_STP)
539*4882a593Smuzhiyun hw_dbg("LPI Clock Stop Bit should not be set!\n");
540*4882a593Smuzhiyun } else {
541*4882a593Smuzhiyun ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN |
542*4882a593Smuzhiyun IGC_IPCNFG_EEE_100M_AN);
543*4882a593Smuzhiyun eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
544*4882a593Smuzhiyun IGC_EEER_LPI_FC);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun wr32(IGC_IPCNFG, ipcnfg);
547*4882a593Smuzhiyun wr32(IGC_EEER, eeer);
548*4882a593Smuzhiyun rd32(IGC_IPCNFG);
549*4882a593Smuzhiyun rd32(IGC_EEER);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun return IGC_SUCCESS;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds
555*4882a593Smuzhiyun * @hw: pointer to the HW structure
556*4882a593Smuzhiyun * @link: bool indicating link status
557*4882a593Smuzhiyun *
558*4882a593Smuzhiyun * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC
559*4882a593Smuzhiyun * settings, otherwise specify that there is no LTR requirement.
560*4882a593Smuzhiyun */
igc_set_ltr_i225(struct igc_hw * hw,bool link)561*4882a593Smuzhiyun s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max;
564*4882a593Smuzhiyun u16 speed, duplex;
565*4882a593Smuzhiyun s32 size;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /* If we do not have link, LTR thresholds are zero. */
568*4882a593Smuzhiyun if (link) {
569*4882a593Smuzhiyun hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /* Check if using copper interface with EEE enabled or if the
572*4882a593Smuzhiyun * link speed is 10 Mbps.
573*4882a593Smuzhiyun */
574*4882a593Smuzhiyun if (hw->dev_spec._base.eee_enable &&
575*4882a593Smuzhiyun speed != SPEED_10) {
576*4882a593Smuzhiyun /* EEE enabled, so send LTRMAX threshold. */
577*4882a593Smuzhiyun ltrc = rd32(IGC_LTRC) |
578*4882a593Smuzhiyun IGC_LTRC_EEEMS_EN;
579*4882a593Smuzhiyun wr32(IGC_LTRC, ltrc);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /* Calculate tw_system (nsec). */
582*4882a593Smuzhiyun if (speed == SPEED_100) {
583*4882a593Smuzhiyun tw_system = ((rd32(IGC_EEE_SU) &
584*4882a593Smuzhiyun IGC_TW_SYSTEM_100_MASK) >>
585*4882a593Smuzhiyun IGC_TW_SYSTEM_100_SHIFT) * 500;
586*4882a593Smuzhiyun } else {
587*4882a593Smuzhiyun tw_system = (rd32(IGC_EEE_SU) &
588*4882a593Smuzhiyun IGC_TW_SYSTEM_1000_MASK) * 500;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun } else {
591*4882a593Smuzhiyun tw_system = 0;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /* Get the Rx packet buffer size. */
595*4882a593Smuzhiyun size = rd32(IGC_RXPBS) &
596*4882a593Smuzhiyun IGC_RXPBS_SIZE_I225_MASK;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /* Calculations vary based on DMAC settings. */
599*4882a593Smuzhiyun if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) {
600*4882a593Smuzhiyun size -= (rd32(IGC_DMACR) &
601*4882a593Smuzhiyun IGC_DMACR_DMACTHR_MASK) >>
602*4882a593Smuzhiyun IGC_DMACR_DMACTHR_SHIFT;
603*4882a593Smuzhiyun /* Convert size to bits. */
604*4882a593Smuzhiyun size *= 1024 * 8;
605*4882a593Smuzhiyun } else {
606*4882a593Smuzhiyun /* Convert size to bytes, subtract the MTU, and then
607*4882a593Smuzhiyun * convert the size to bits.
608*4882a593Smuzhiyun */
609*4882a593Smuzhiyun size *= 1024;
610*4882a593Smuzhiyun size *= 8;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun if (size < 0) {
614*4882a593Smuzhiyun hw_dbg("Invalid effective Rx buffer size %d\n",
615*4882a593Smuzhiyun size);
616*4882a593Smuzhiyun return -IGC_ERR_CONFIG;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun /* Calculate the thresholds. Since speed is in Mbps, simplify
620*4882a593Smuzhiyun * the calculation by multiplying size/speed by 1000 for result
621*4882a593Smuzhiyun * to be in nsec before dividing by the scale in nsec. Set the
622*4882a593Smuzhiyun * scale such that the LTR threshold fits in the register.
623*4882a593Smuzhiyun */
624*4882a593Smuzhiyun ltr_min = (1000 * size) / speed;
625*4882a593Smuzhiyun ltr_max = ltr_min + tw_system;
626*4882a593Smuzhiyun scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 :
627*4882a593Smuzhiyun IGC_LTRMINV_SCALE_32768;
628*4882a593Smuzhiyun scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 :
629*4882a593Smuzhiyun IGC_LTRMAXV_SCALE_32768;
630*4882a593Smuzhiyun ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768;
631*4882a593Smuzhiyun ltr_min -= 1;
632*4882a593Smuzhiyun ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768;
633*4882a593Smuzhiyun ltr_max -= 1;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun /* Only write the LTR thresholds if they differ from before. */
636*4882a593Smuzhiyun ltrv = rd32(IGC_LTRMINV);
637*4882a593Smuzhiyun if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) {
638*4882a593Smuzhiyun ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min |
639*4882a593Smuzhiyun (scale_min << IGC_LTRMINV_SCALE_SHIFT);
640*4882a593Smuzhiyun wr32(IGC_LTRMINV, ltrv);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun ltrv = rd32(IGC_LTRMAXV);
644*4882a593Smuzhiyun if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
645*4882a593Smuzhiyun ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
646*4882a593Smuzhiyun (scale_max << IGC_LTRMAXV_SCALE_SHIFT);
647*4882a593Smuzhiyun wr32(IGC_LTRMAXV, ltrv);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun return IGC_SUCCESS;
652*4882a593Smuzhiyun }
653