1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 2013 - 2018 Intel Corporation. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include "i40e_status.h"
5*4882a593Smuzhiyun #include "i40e_type.h"
6*4882a593Smuzhiyun #include "i40e_register.h"
7*4882a593Smuzhiyun #include "i40e_adminq.h"
8*4882a593Smuzhiyun #include "i40e_prototype.h"
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun static void i40e_resume_aq(struct i40e_hw *hw);
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /**
13*4882a593Smuzhiyun * i40e_adminq_init_regs - Initialize AdminQ registers
14*4882a593Smuzhiyun * @hw: pointer to the hardware structure
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * This assumes the alloc_asq and alloc_arq functions have already been called
17*4882a593Smuzhiyun **/
i40e_adminq_init_regs(struct i40e_hw * hw)18*4882a593Smuzhiyun static void i40e_adminq_init_regs(struct i40e_hw *hw)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun /* set head and tail registers in our local struct */
21*4882a593Smuzhiyun if (i40e_is_vf(hw)) {
22*4882a593Smuzhiyun hw->aq.asq.tail = I40E_VF_ATQT1;
23*4882a593Smuzhiyun hw->aq.asq.head = I40E_VF_ATQH1;
24*4882a593Smuzhiyun hw->aq.asq.len = I40E_VF_ATQLEN1;
25*4882a593Smuzhiyun hw->aq.asq.bal = I40E_VF_ATQBAL1;
26*4882a593Smuzhiyun hw->aq.asq.bah = I40E_VF_ATQBAH1;
27*4882a593Smuzhiyun hw->aq.arq.tail = I40E_VF_ARQT1;
28*4882a593Smuzhiyun hw->aq.arq.head = I40E_VF_ARQH1;
29*4882a593Smuzhiyun hw->aq.arq.len = I40E_VF_ARQLEN1;
30*4882a593Smuzhiyun hw->aq.arq.bal = I40E_VF_ARQBAL1;
31*4882a593Smuzhiyun hw->aq.arq.bah = I40E_VF_ARQBAH1;
32*4882a593Smuzhiyun } else {
33*4882a593Smuzhiyun hw->aq.asq.tail = I40E_PF_ATQT;
34*4882a593Smuzhiyun hw->aq.asq.head = I40E_PF_ATQH;
35*4882a593Smuzhiyun hw->aq.asq.len = I40E_PF_ATQLEN;
36*4882a593Smuzhiyun hw->aq.asq.bal = I40E_PF_ATQBAL;
37*4882a593Smuzhiyun hw->aq.asq.bah = I40E_PF_ATQBAH;
38*4882a593Smuzhiyun hw->aq.arq.tail = I40E_PF_ARQT;
39*4882a593Smuzhiyun hw->aq.arq.head = I40E_PF_ARQH;
40*4882a593Smuzhiyun hw->aq.arq.len = I40E_PF_ARQLEN;
41*4882a593Smuzhiyun hw->aq.arq.bal = I40E_PF_ARQBAL;
42*4882a593Smuzhiyun hw->aq.arq.bah = I40E_PF_ARQBAH;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /**
47*4882a593Smuzhiyun * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
48*4882a593Smuzhiyun * @hw: pointer to the hardware structure
49*4882a593Smuzhiyun **/
i40e_alloc_adminq_asq_ring(struct i40e_hw * hw)50*4882a593Smuzhiyun static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun i40e_status ret_code;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
55*4882a593Smuzhiyun i40e_mem_atq_ring,
56*4882a593Smuzhiyun (hw->aq.num_asq_entries *
57*4882a593Smuzhiyun sizeof(struct i40e_aq_desc)),
58*4882a593Smuzhiyun I40E_ADMINQ_DESC_ALIGNMENT);
59*4882a593Smuzhiyun if (ret_code)
60*4882a593Smuzhiyun return ret_code;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
63*4882a593Smuzhiyun (hw->aq.num_asq_entries *
64*4882a593Smuzhiyun sizeof(struct i40e_asq_cmd_details)));
65*4882a593Smuzhiyun if (ret_code) {
66*4882a593Smuzhiyun i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
67*4882a593Smuzhiyun return ret_code;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun return ret_code;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /**
74*4882a593Smuzhiyun * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
75*4882a593Smuzhiyun * @hw: pointer to the hardware structure
76*4882a593Smuzhiyun **/
i40e_alloc_adminq_arq_ring(struct i40e_hw * hw)77*4882a593Smuzhiyun static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun i40e_status ret_code;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
82*4882a593Smuzhiyun i40e_mem_arq_ring,
83*4882a593Smuzhiyun (hw->aq.num_arq_entries *
84*4882a593Smuzhiyun sizeof(struct i40e_aq_desc)),
85*4882a593Smuzhiyun I40E_ADMINQ_DESC_ALIGNMENT);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun return ret_code;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun * i40e_free_adminq_asq - Free Admin Queue send rings
92*4882a593Smuzhiyun * @hw: pointer to the hardware structure
93*4882a593Smuzhiyun *
94*4882a593Smuzhiyun * This assumes the posted send buffers have already been cleaned
95*4882a593Smuzhiyun * and de-allocated
96*4882a593Smuzhiyun **/
i40e_free_adminq_asq(struct i40e_hw * hw)97*4882a593Smuzhiyun static void i40e_free_adminq_asq(struct i40e_hw *hw)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /**
103*4882a593Smuzhiyun * i40e_free_adminq_arq - Free Admin Queue receive rings
104*4882a593Smuzhiyun * @hw: pointer to the hardware structure
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * This assumes the posted receive buffers have already been cleaned
107*4882a593Smuzhiyun * and de-allocated
108*4882a593Smuzhiyun **/
i40e_free_adminq_arq(struct i40e_hw * hw)109*4882a593Smuzhiyun static void i40e_free_adminq_arq(struct i40e_hw *hw)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
116*4882a593Smuzhiyun * @hw: pointer to the hardware structure
117*4882a593Smuzhiyun **/
i40e_alloc_arq_bufs(struct i40e_hw * hw)118*4882a593Smuzhiyun static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun i40e_status ret_code;
121*4882a593Smuzhiyun struct i40e_aq_desc *desc;
122*4882a593Smuzhiyun struct i40e_dma_mem *bi;
123*4882a593Smuzhiyun int i;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* We'll be allocating the buffer info memory first, then we can
126*4882a593Smuzhiyun * allocate the mapped buffers for the event processing
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* buffer_info structures do not need alignment */
130*4882a593Smuzhiyun ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
131*4882a593Smuzhiyun (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
132*4882a593Smuzhiyun if (ret_code)
133*4882a593Smuzhiyun goto alloc_arq_bufs;
134*4882a593Smuzhiyun hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* allocate the mapped buffers */
137*4882a593Smuzhiyun for (i = 0; i < hw->aq.num_arq_entries; i++) {
138*4882a593Smuzhiyun bi = &hw->aq.arq.r.arq_bi[i];
139*4882a593Smuzhiyun ret_code = i40e_allocate_dma_mem(hw, bi,
140*4882a593Smuzhiyun i40e_mem_arq_buf,
141*4882a593Smuzhiyun hw->aq.arq_buf_size,
142*4882a593Smuzhiyun I40E_ADMINQ_DESC_ALIGNMENT);
143*4882a593Smuzhiyun if (ret_code)
144*4882a593Smuzhiyun goto unwind_alloc_arq_bufs;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* now configure the descriptors for use */
147*4882a593Smuzhiyun desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
150*4882a593Smuzhiyun if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
151*4882a593Smuzhiyun desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
152*4882a593Smuzhiyun desc->opcode = 0;
153*4882a593Smuzhiyun /* This is in accordance with Admin queue design, there is no
154*4882a593Smuzhiyun * register for buffer size configuration
155*4882a593Smuzhiyun */
156*4882a593Smuzhiyun desc->datalen = cpu_to_le16((u16)bi->size);
157*4882a593Smuzhiyun desc->retval = 0;
158*4882a593Smuzhiyun desc->cookie_high = 0;
159*4882a593Smuzhiyun desc->cookie_low = 0;
160*4882a593Smuzhiyun desc->params.external.addr_high =
161*4882a593Smuzhiyun cpu_to_le32(upper_32_bits(bi->pa));
162*4882a593Smuzhiyun desc->params.external.addr_low =
163*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(bi->pa));
164*4882a593Smuzhiyun desc->params.external.param0 = 0;
165*4882a593Smuzhiyun desc->params.external.param1 = 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun alloc_arq_bufs:
169*4882a593Smuzhiyun return ret_code;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun unwind_alloc_arq_bufs:
172*4882a593Smuzhiyun /* don't try to free the one that failed... */
173*4882a593Smuzhiyun i--;
174*4882a593Smuzhiyun for (; i >= 0; i--)
175*4882a593Smuzhiyun i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
176*4882a593Smuzhiyun i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return ret_code;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /**
182*4882a593Smuzhiyun * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
183*4882a593Smuzhiyun * @hw: pointer to the hardware structure
184*4882a593Smuzhiyun **/
i40e_alloc_asq_bufs(struct i40e_hw * hw)185*4882a593Smuzhiyun static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun i40e_status ret_code;
188*4882a593Smuzhiyun struct i40e_dma_mem *bi;
189*4882a593Smuzhiyun int i;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* No mapped memory needed yet, just the buffer info structures */
192*4882a593Smuzhiyun ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
193*4882a593Smuzhiyun (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
194*4882a593Smuzhiyun if (ret_code)
195*4882a593Smuzhiyun goto alloc_asq_bufs;
196*4882a593Smuzhiyun hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* allocate the mapped buffers */
199*4882a593Smuzhiyun for (i = 0; i < hw->aq.num_asq_entries; i++) {
200*4882a593Smuzhiyun bi = &hw->aq.asq.r.asq_bi[i];
201*4882a593Smuzhiyun ret_code = i40e_allocate_dma_mem(hw, bi,
202*4882a593Smuzhiyun i40e_mem_asq_buf,
203*4882a593Smuzhiyun hw->aq.asq_buf_size,
204*4882a593Smuzhiyun I40E_ADMINQ_DESC_ALIGNMENT);
205*4882a593Smuzhiyun if (ret_code)
206*4882a593Smuzhiyun goto unwind_alloc_asq_bufs;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun alloc_asq_bufs:
209*4882a593Smuzhiyun return ret_code;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun unwind_alloc_asq_bufs:
212*4882a593Smuzhiyun /* don't try to free the one that failed... */
213*4882a593Smuzhiyun i--;
214*4882a593Smuzhiyun for (; i >= 0; i--)
215*4882a593Smuzhiyun i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
216*4882a593Smuzhiyun i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return ret_code;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /**
222*4882a593Smuzhiyun * i40e_free_arq_bufs - Free receive queue buffer info elements
223*4882a593Smuzhiyun * @hw: pointer to the hardware structure
224*4882a593Smuzhiyun **/
i40e_free_arq_bufs(struct i40e_hw * hw)225*4882a593Smuzhiyun static void i40e_free_arq_bufs(struct i40e_hw *hw)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun int i;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* free descriptors */
230*4882a593Smuzhiyun for (i = 0; i < hw->aq.num_arq_entries; i++)
231*4882a593Smuzhiyun i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* free the descriptor memory */
234*4882a593Smuzhiyun i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* free the dma header */
237*4882a593Smuzhiyun i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /**
241*4882a593Smuzhiyun * i40e_free_asq_bufs - Free send queue buffer info elements
242*4882a593Smuzhiyun * @hw: pointer to the hardware structure
243*4882a593Smuzhiyun **/
i40e_free_asq_bufs(struct i40e_hw * hw)244*4882a593Smuzhiyun static void i40e_free_asq_bufs(struct i40e_hw *hw)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun int i;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* only unmap if the address is non-NULL */
249*4882a593Smuzhiyun for (i = 0; i < hw->aq.num_asq_entries; i++)
250*4882a593Smuzhiyun if (hw->aq.asq.r.asq_bi[i].pa)
251*4882a593Smuzhiyun i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* free the buffer info list */
254*4882a593Smuzhiyun i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* free the descriptor memory */
257*4882a593Smuzhiyun i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /* free the dma header */
260*4882a593Smuzhiyun i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /**
264*4882a593Smuzhiyun * i40e_config_asq_regs - configure ASQ registers
265*4882a593Smuzhiyun * @hw: pointer to the hardware structure
266*4882a593Smuzhiyun *
267*4882a593Smuzhiyun * Configure base address and length registers for the transmit queue
268*4882a593Smuzhiyun **/
i40e_config_asq_regs(struct i40e_hw * hw)269*4882a593Smuzhiyun static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun i40e_status ret_code = 0;
272*4882a593Smuzhiyun u32 reg = 0;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /* Clear Head and Tail */
275*4882a593Smuzhiyun wr32(hw, hw->aq.asq.head, 0);
276*4882a593Smuzhiyun wr32(hw, hw->aq.asq.tail, 0);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* set starting point */
279*4882a593Smuzhiyun wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
280*4882a593Smuzhiyun I40E_PF_ATQLEN_ATQENABLE_MASK));
281*4882a593Smuzhiyun wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
282*4882a593Smuzhiyun wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* Check one register to verify that config was applied */
285*4882a593Smuzhiyun reg = rd32(hw, hw->aq.asq.bal);
286*4882a593Smuzhiyun if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
287*4882a593Smuzhiyun ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun return ret_code;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun * i40e_config_arq_regs - ARQ register configuration
294*4882a593Smuzhiyun * @hw: pointer to the hardware structure
295*4882a593Smuzhiyun *
296*4882a593Smuzhiyun * Configure base address and length registers for the receive (event queue)
297*4882a593Smuzhiyun **/
i40e_config_arq_regs(struct i40e_hw * hw)298*4882a593Smuzhiyun static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun i40e_status ret_code = 0;
301*4882a593Smuzhiyun u32 reg = 0;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* Clear Head and Tail */
304*4882a593Smuzhiyun wr32(hw, hw->aq.arq.head, 0);
305*4882a593Smuzhiyun wr32(hw, hw->aq.arq.tail, 0);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* set starting point */
308*4882a593Smuzhiyun wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
309*4882a593Smuzhiyun I40E_PF_ARQLEN_ARQENABLE_MASK));
310*4882a593Smuzhiyun wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
311*4882a593Smuzhiyun wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* Update tail in the HW to post pre-allocated buffers */
314*4882a593Smuzhiyun wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* Check one register to verify that config was applied */
317*4882a593Smuzhiyun reg = rd32(hw, hw->aq.arq.bal);
318*4882a593Smuzhiyun if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
319*4882a593Smuzhiyun ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return ret_code;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /**
325*4882a593Smuzhiyun * i40e_init_asq - main initialization routine for ASQ
326*4882a593Smuzhiyun * @hw: pointer to the hardware structure
327*4882a593Smuzhiyun *
328*4882a593Smuzhiyun * This is the main initialization routine for the Admin Send Queue
329*4882a593Smuzhiyun * Prior to calling this function, drivers *MUST* set the following fields
330*4882a593Smuzhiyun * in the hw->aq structure:
331*4882a593Smuzhiyun * - hw->aq.num_asq_entries
332*4882a593Smuzhiyun * - hw->aq.arq_buf_size
333*4882a593Smuzhiyun *
334*4882a593Smuzhiyun * Do *NOT* hold the lock when calling this as the memory allocation routines
335*4882a593Smuzhiyun * called are not going to be atomic context safe
336*4882a593Smuzhiyun **/
i40e_init_asq(struct i40e_hw * hw)337*4882a593Smuzhiyun static i40e_status i40e_init_asq(struct i40e_hw *hw)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun i40e_status ret_code = 0;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun if (hw->aq.asq.count > 0) {
342*4882a593Smuzhiyun /* queue already initialized */
343*4882a593Smuzhiyun ret_code = I40E_ERR_NOT_READY;
344*4882a593Smuzhiyun goto init_adminq_exit;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* verify input for valid configuration */
348*4882a593Smuzhiyun if ((hw->aq.num_asq_entries == 0) ||
349*4882a593Smuzhiyun (hw->aq.asq_buf_size == 0)) {
350*4882a593Smuzhiyun ret_code = I40E_ERR_CONFIG;
351*4882a593Smuzhiyun goto init_adminq_exit;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun hw->aq.asq.next_to_use = 0;
355*4882a593Smuzhiyun hw->aq.asq.next_to_clean = 0;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* allocate the ring memory */
358*4882a593Smuzhiyun ret_code = i40e_alloc_adminq_asq_ring(hw);
359*4882a593Smuzhiyun if (ret_code)
360*4882a593Smuzhiyun goto init_adminq_exit;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* allocate buffers in the rings */
363*4882a593Smuzhiyun ret_code = i40e_alloc_asq_bufs(hw);
364*4882a593Smuzhiyun if (ret_code)
365*4882a593Smuzhiyun goto init_adminq_free_rings;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* initialize base registers */
368*4882a593Smuzhiyun ret_code = i40e_config_asq_regs(hw);
369*4882a593Smuzhiyun if (ret_code)
370*4882a593Smuzhiyun goto init_adminq_free_rings;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* success! */
373*4882a593Smuzhiyun hw->aq.asq.count = hw->aq.num_asq_entries;
374*4882a593Smuzhiyun goto init_adminq_exit;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun init_adminq_free_rings:
377*4882a593Smuzhiyun i40e_free_adminq_asq(hw);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun init_adminq_exit:
380*4882a593Smuzhiyun return ret_code;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /**
384*4882a593Smuzhiyun * i40e_init_arq - initialize ARQ
385*4882a593Smuzhiyun * @hw: pointer to the hardware structure
386*4882a593Smuzhiyun *
387*4882a593Smuzhiyun * The main initialization routine for the Admin Receive (Event) Queue.
388*4882a593Smuzhiyun * Prior to calling this function, drivers *MUST* set the following fields
389*4882a593Smuzhiyun * in the hw->aq structure:
390*4882a593Smuzhiyun * - hw->aq.num_asq_entries
391*4882a593Smuzhiyun * - hw->aq.arq_buf_size
392*4882a593Smuzhiyun *
393*4882a593Smuzhiyun * Do *NOT* hold the lock when calling this as the memory allocation routines
394*4882a593Smuzhiyun * called are not going to be atomic context safe
395*4882a593Smuzhiyun **/
i40e_init_arq(struct i40e_hw * hw)396*4882a593Smuzhiyun static i40e_status i40e_init_arq(struct i40e_hw *hw)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun i40e_status ret_code = 0;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun if (hw->aq.arq.count > 0) {
401*4882a593Smuzhiyun /* queue already initialized */
402*4882a593Smuzhiyun ret_code = I40E_ERR_NOT_READY;
403*4882a593Smuzhiyun goto init_adminq_exit;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* verify input for valid configuration */
407*4882a593Smuzhiyun if ((hw->aq.num_arq_entries == 0) ||
408*4882a593Smuzhiyun (hw->aq.arq_buf_size == 0)) {
409*4882a593Smuzhiyun ret_code = I40E_ERR_CONFIG;
410*4882a593Smuzhiyun goto init_adminq_exit;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun hw->aq.arq.next_to_use = 0;
414*4882a593Smuzhiyun hw->aq.arq.next_to_clean = 0;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* allocate the ring memory */
417*4882a593Smuzhiyun ret_code = i40e_alloc_adminq_arq_ring(hw);
418*4882a593Smuzhiyun if (ret_code)
419*4882a593Smuzhiyun goto init_adminq_exit;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* allocate buffers in the rings */
422*4882a593Smuzhiyun ret_code = i40e_alloc_arq_bufs(hw);
423*4882a593Smuzhiyun if (ret_code)
424*4882a593Smuzhiyun goto init_adminq_free_rings;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /* initialize base registers */
427*4882a593Smuzhiyun ret_code = i40e_config_arq_regs(hw);
428*4882a593Smuzhiyun if (ret_code)
429*4882a593Smuzhiyun goto init_adminq_free_rings;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* success! */
432*4882a593Smuzhiyun hw->aq.arq.count = hw->aq.num_arq_entries;
433*4882a593Smuzhiyun goto init_adminq_exit;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun init_adminq_free_rings:
436*4882a593Smuzhiyun i40e_free_adminq_arq(hw);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun init_adminq_exit:
439*4882a593Smuzhiyun return ret_code;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /**
443*4882a593Smuzhiyun * i40e_shutdown_asq - shutdown the ASQ
444*4882a593Smuzhiyun * @hw: pointer to the hardware structure
445*4882a593Smuzhiyun *
446*4882a593Smuzhiyun * The main shutdown routine for the Admin Send Queue
447*4882a593Smuzhiyun **/
i40e_shutdown_asq(struct i40e_hw * hw)448*4882a593Smuzhiyun static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun i40e_status ret_code = 0;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun mutex_lock(&hw->aq.asq_mutex);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (hw->aq.asq.count == 0) {
455*4882a593Smuzhiyun ret_code = I40E_ERR_NOT_READY;
456*4882a593Smuzhiyun goto shutdown_asq_out;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* Stop firmware AdminQ processing */
460*4882a593Smuzhiyun wr32(hw, hw->aq.asq.head, 0);
461*4882a593Smuzhiyun wr32(hw, hw->aq.asq.tail, 0);
462*4882a593Smuzhiyun wr32(hw, hw->aq.asq.len, 0);
463*4882a593Smuzhiyun wr32(hw, hw->aq.asq.bal, 0);
464*4882a593Smuzhiyun wr32(hw, hw->aq.asq.bah, 0);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun hw->aq.asq.count = 0; /* to indicate uninitialized queue */
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /* free ring buffers */
469*4882a593Smuzhiyun i40e_free_asq_bufs(hw);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun shutdown_asq_out:
472*4882a593Smuzhiyun mutex_unlock(&hw->aq.asq_mutex);
473*4882a593Smuzhiyun return ret_code;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /**
477*4882a593Smuzhiyun * i40e_shutdown_arq - shutdown ARQ
478*4882a593Smuzhiyun * @hw: pointer to the hardware structure
479*4882a593Smuzhiyun *
480*4882a593Smuzhiyun * The main shutdown routine for the Admin Receive Queue
481*4882a593Smuzhiyun **/
i40e_shutdown_arq(struct i40e_hw * hw)482*4882a593Smuzhiyun static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun i40e_status ret_code = 0;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun mutex_lock(&hw->aq.arq_mutex);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun if (hw->aq.arq.count == 0) {
489*4882a593Smuzhiyun ret_code = I40E_ERR_NOT_READY;
490*4882a593Smuzhiyun goto shutdown_arq_out;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /* Stop firmware AdminQ processing */
494*4882a593Smuzhiyun wr32(hw, hw->aq.arq.head, 0);
495*4882a593Smuzhiyun wr32(hw, hw->aq.arq.tail, 0);
496*4882a593Smuzhiyun wr32(hw, hw->aq.arq.len, 0);
497*4882a593Smuzhiyun wr32(hw, hw->aq.arq.bal, 0);
498*4882a593Smuzhiyun wr32(hw, hw->aq.arq.bah, 0);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun hw->aq.arq.count = 0; /* to indicate uninitialized queue */
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* free ring buffers */
503*4882a593Smuzhiyun i40e_free_arq_bufs(hw);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun shutdown_arq_out:
506*4882a593Smuzhiyun mutex_unlock(&hw->aq.arq_mutex);
507*4882a593Smuzhiyun return ret_code;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /**
511*4882a593Smuzhiyun * i40e_set_hw_flags - set HW flags
512*4882a593Smuzhiyun * @hw: pointer to the hardware structure
513*4882a593Smuzhiyun **/
i40e_set_hw_flags(struct i40e_hw * hw)514*4882a593Smuzhiyun static void i40e_set_hw_flags(struct i40e_hw *hw)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun struct i40e_adminq_info *aq = &hw->aq;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun hw->flags = 0;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun switch (hw->mac.type) {
521*4882a593Smuzhiyun case I40E_MAC_XL710:
522*4882a593Smuzhiyun if (aq->api_maj_ver > 1 ||
523*4882a593Smuzhiyun (aq->api_maj_ver == 1 &&
524*4882a593Smuzhiyun aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
525*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
526*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
527*4882a593Smuzhiyun /* The ability to RX (not drop) 802.1ad frames */
528*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun break;
531*4882a593Smuzhiyun case I40E_MAC_X722:
532*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
533*4882a593Smuzhiyun I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (aq->api_maj_ver > 1 ||
536*4882a593Smuzhiyun (aq->api_maj_ver == 1 &&
537*4882a593Smuzhiyun aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
538*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun if (aq->api_maj_ver > 1 ||
541*4882a593Smuzhiyun (aq->api_maj_ver == 1 &&
542*4882a593Smuzhiyun aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
543*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun if (aq->api_maj_ver > 1 ||
546*4882a593Smuzhiyun (aq->api_maj_ver == 1 &&
547*4882a593Smuzhiyun aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
548*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun fallthrough;
551*4882a593Smuzhiyun default:
552*4882a593Smuzhiyun break;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /* Newer versions of firmware require lock when reading the NVM */
556*4882a593Smuzhiyun if (aq->api_maj_ver > 1 ||
557*4882a593Smuzhiyun (aq->api_maj_ver == 1 &&
558*4882a593Smuzhiyun aq->api_min_ver >= 5))
559*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (aq->api_maj_ver > 1 ||
562*4882a593Smuzhiyun (aq->api_maj_ver == 1 &&
563*4882a593Smuzhiyun aq->api_min_ver >= 8)) {
564*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
565*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_DROP_MODE;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun if (aq->api_maj_ver > 1 ||
569*4882a593Smuzhiyun (aq->api_maj_ver == 1 &&
570*4882a593Smuzhiyun aq->api_min_ver >= 9))
571*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /**
575*4882a593Smuzhiyun * i40e_init_adminq - main initialization routine for Admin Queue
576*4882a593Smuzhiyun * @hw: pointer to the hardware structure
577*4882a593Smuzhiyun *
578*4882a593Smuzhiyun * Prior to calling this function, drivers *MUST* set the following fields
579*4882a593Smuzhiyun * in the hw->aq structure:
580*4882a593Smuzhiyun * - hw->aq.num_asq_entries
581*4882a593Smuzhiyun * - hw->aq.num_arq_entries
582*4882a593Smuzhiyun * - hw->aq.arq_buf_size
583*4882a593Smuzhiyun * - hw->aq.asq_buf_size
584*4882a593Smuzhiyun **/
i40e_init_adminq(struct i40e_hw * hw)585*4882a593Smuzhiyun i40e_status i40e_init_adminq(struct i40e_hw *hw)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun u16 cfg_ptr, oem_hi, oem_lo;
588*4882a593Smuzhiyun u16 eetrack_lo, eetrack_hi;
589*4882a593Smuzhiyun i40e_status ret_code;
590*4882a593Smuzhiyun int retry = 0;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* verify input for valid configuration */
593*4882a593Smuzhiyun if ((hw->aq.num_arq_entries == 0) ||
594*4882a593Smuzhiyun (hw->aq.num_asq_entries == 0) ||
595*4882a593Smuzhiyun (hw->aq.arq_buf_size == 0) ||
596*4882a593Smuzhiyun (hw->aq.asq_buf_size == 0)) {
597*4882a593Smuzhiyun ret_code = I40E_ERR_CONFIG;
598*4882a593Smuzhiyun goto init_adminq_exit;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun /* Set up register offsets */
602*4882a593Smuzhiyun i40e_adminq_init_regs(hw);
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun /* setup ASQ command write back timeout */
605*4882a593Smuzhiyun hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /* allocate the ASQ */
608*4882a593Smuzhiyun ret_code = i40e_init_asq(hw);
609*4882a593Smuzhiyun if (ret_code)
610*4882a593Smuzhiyun goto init_adminq_destroy_locks;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun /* allocate the ARQ */
613*4882a593Smuzhiyun ret_code = i40e_init_arq(hw);
614*4882a593Smuzhiyun if (ret_code)
615*4882a593Smuzhiyun goto init_adminq_free_asq;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* There are some cases where the firmware may not be quite ready
618*4882a593Smuzhiyun * for AdminQ operations, so we retry the AdminQ setup a few times
619*4882a593Smuzhiyun * if we see timeouts in this first AQ call.
620*4882a593Smuzhiyun */
621*4882a593Smuzhiyun do {
622*4882a593Smuzhiyun ret_code = i40e_aq_get_firmware_version(hw,
623*4882a593Smuzhiyun &hw->aq.fw_maj_ver,
624*4882a593Smuzhiyun &hw->aq.fw_min_ver,
625*4882a593Smuzhiyun &hw->aq.fw_build,
626*4882a593Smuzhiyun &hw->aq.api_maj_ver,
627*4882a593Smuzhiyun &hw->aq.api_min_ver,
628*4882a593Smuzhiyun NULL);
629*4882a593Smuzhiyun if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
630*4882a593Smuzhiyun break;
631*4882a593Smuzhiyun retry++;
632*4882a593Smuzhiyun msleep(100);
633*4882a593Smuzhiyun i40e_resume_aq(hw);
634*4882a593Smuzhiyun } while (retry < 10);
635*4882a593Smuzhiyun if (ret_code != I40E_SUCCESS)
636*4882a593Smuzhiyun goto init_adminq_free_arq;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /* Some features were introduced in different FW API version
639*4882a593Smuzhiyun * for different MAC type.
640*4882a593Smuzhiyun */
641*4882a593Smuzhiyun i40e_set_hw_flags(hw);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* get the NVM version info */
644*4882a593Smuzhiyun i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
645*4882a593Smuzhiyun &hw->nvm.version);
646*4882a593Smuzhiyun i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
647*4882a593Smuzhiyun i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
648*4882a593Smuzhiyun hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
649*4882a593Smuzhiyun i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
650*4882a593Smuzhiyun i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
651*4882a593Smuzhiyun &oem_hi);
652*4882a593Smuzhiyun i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
653*4882a593Smuzhiyun &oem_lo);
654*4882a593Smuzhiyun hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (hw->mac.type == I40E_MAC_XL710 &&
657*4882a593Smuzhiyun hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
658*4882a593Smuzhiyun hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
659*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
660*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun if (hw->mac.type == I40E_MAC_X722 &&
663*4882a593Smuzhiyun hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
664*4882a593Smuzhiyun hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
665*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
669*4882a593Smuzhiyun if (hw->aq.api_maj_ver > 1 ||
670*4882a593Smuzhiyun (hw->aq.api_maj_ver == 1 &&
671*4882a593Smuzhiyun hw->aq.api_min_ver >= 7))
672*4882a593Smuzhiyun hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
675*4882a593Smuzhiyun ret_code = I40E_ERR_FIRMWARE_API_VERSION;
676*4882a593Smuzhiyun goto init_adminq_free_arq;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /* pre-emptive resource lock release */
680*4882a593Smuzhiyun i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
681*4882a593Smuzhiyun hw->nvm_release_on_done = false;
682*4882a593Smuzhiyun hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun ret_code = 0;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun /* success! */
687*4882a593Smuzhiyun goto init_adminq_exit;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun init_adminq_free_arq:
690*4882a593Smuzhiyun i40e_shutdown_arq(hw);
691*4882a593Smuzhiyun init_adminq_free_asq:
692*4882a593Smuzhiyun i40e_shutdown_asq(hw);
693*4882a593Smuzhiyun init_adminq_destroy_locks:
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun init_adminq_exit:
696*4882a593Smuzhiyun return ret_code;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /**
700*4882a593Smuzhiyun * i40e_shutdown_adminq - shutdown routine for the Admin Queue
701*4882a593Smuzhiyun * @hw: pointer to the hardware structure
702*4882a593Smuzhiyun **/
i40e_shutdown_adminq(struct i40e_hw * hw)703*4882a593Smuzhiyun void i40e_shutdown_adminq(struct i40e_hw *hw)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun if (i40e_check_asq_alive(hw))
706*4882a593Smuzhiyun i40e_aq_queue_shutdown(hw, true);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun i40e_shutdown_asq(hw);
709*4882a593Smuzhiyun i40e_shutdown_arq(hw);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun if (hw->nvm_buff.va)
712*4882a593Smuzhiyun i40e_free_virt_mem(hw, &hw->nvm_buff);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /**
716*4882a593Smuzhiyun * i40e_clean_asq - cleans Admin send queue
717*4882a593Smuzhiyun * @hw: pointer to the hardware structure
718*4882a593Smuzhiyun *
719*4882a593Smuzhiyun * returns the number of free desc
720*4882a593Smuzhiyun **/
i40e_clean_asq(struct i40e_hw * hw)721*4882a593Smuzhiyun static u16 i40e_clean_asq(struct i40e_hw *hw)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun struct i40e_adminq_ring *asq = &(hw->aq.asq);
724*4882a593Smuzhiyun struct i40e_asq_cmd_details *details;
725*4882a593Smuzhiyun u16 ntc = asq->next_to_clean;
726*4882a593Smuzhiyun struct i40e_aq_desc desc_cb;
727*4882a593Smuzhiyun struct i40e_aq_desc *desc;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun desc = I40E_ADMINQ_DESC(*asq, ntc);
730*4882a593Smuzhiyun details = I40E_ADMINQ_DETAILS(*asq, ntc);
731*4882a593Smuzhiyun while (rd32(hw, hw->aq.asq.head) != ntc) {
732*4882a593Smuzhiyun i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
733*4882a593Smuzhiyun "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (details->callback) {
736*4882a593Smuzhiyun I40E_ADMINQ_CALLBACK cb_func =
737*4882a593Smuzhiyun (I40E_ADMINQ_CALLBACK)details->callback;
738*4882a593Smuzhiyun desc_cb = *desc;
739*4882a593Smuzhiyun cb_func(hw, &desc_cb);
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun memset(desc, 0, sizeof(*desc));
742*4882a593Smuzhiyun memset(details, 0, sizeof(*details));
743*4882a593Smuzhiyun ntc++;
744*4882a593Smuzhiyun if (ntc == asq->count)
745*4882a593Smuzhiyun ntc = 0;
746*4882a593Smuzhiyun desc = I40E_ADMINQ_DESC(*asq, ntc);
747*4882a593Smuzhiyun details = I40E_ADMINQ_DETAILS(*asq, ntc);
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun asq->next_to_clean = ntc;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun return I40E_DESC_UNUSED(asq);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /**
756*4882a593Smuzhiyun * i40e_asq_done - check if FW has processed the Admin Send Queue
757*4882a593Smuzhiyun * @hw: pointer to the hw struct
758*4882a593Smuzhiyun *
759*4882a593Smuzhiyun * Returns true if the firmware has processed all descriptors on the
760*4882a593Smuzhiyun * admin send queue. Returns false if there are still requests pending.
761*4882a593Smuzhiyun **/
i40e_asq_done(struct i40e_hw * hw)762*4882a593Smuzhiyun static bool i40e_asq_done(struct i40e_hw *hw)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun /* AQ designers suggest use of head for better
765*4882a593Smuzhiyun * timing reliability than DD bit
766*4882a593Smuzhiyun */
767*4882a593Smuzhiyun return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun /**
772*4882a593Smuzhiyun * i40e_asq_send_command - send command to Admin Queue
773*4882a593Smuzhiyun * @hw: pointer to the hw struct
774*4882a593Smuzhiyun * @desc: prefilled descriptor describing the command (non DMA mem)
775*4882a593Smuzhiyun * @buff: buffer to use for indirect commands
776*4882a593Smuzhiyun * @buff_size: size of buffer for indirect commands
777*4882a593Smuzhiyun * @cmd_details: pointer to command details structure
778*4882a593Smuzhiyun *
779*4882a593Smuzhiyun * This is the main send command driver routine for the Admin Queue send
780*4882a593Smuzhiyun * queue. It runs the queue, cleans the queue, etc
781*4882a593Smuzhiyun **/
i40e_asq_send_command(struct i40e_hw * hw,struct i40e_aq_desc * desc,void * buff,u16 buff_size,struct i40e_asq_cmd_details * cmd_details)782*4882a593Smuzhiyun i40e_status i40e_asq_send_command(struct i40e_hw *hw,
783*4882a593Smuzhiyun struct i40e_aq_desc *desc,
784*4882a593Smuzhiyun void *buff, /* can be NULL */
785*4882a593Smuzhiyun u16 buff_size,
786*4882a593Smuzhiyun struct i40e_asq_cmd_details *cmd_details)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun i40e_status status = 0;
789*4882a593Smuzhiyun struct i40e_dma_mem *dma_buff = NULL;
790*4882a593Smuzhiyun struct i40e_asq_cmd_details *details;
791*4882a593Smuzhiyun struct i40e_aq_desc *desc_on_ring;
792*4882a593Smuzhiyun bool cmd_completed = false;
793*4882a593Smuzhiyun u16 retval = 0;
794*4882a593Smuzhiyun u32 val = 0;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun mutex_lock(&hw->aq.asq_mutex);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (hw->aq.asq.count == 0) {
799*4882a593Smuzhiyun i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
800*4882a593Smuzhiyun "AQTX: Admin queue not initialized.\n");
801*4882a593Smuzhiyun status = I40E_ERR_QUEUE_EMPTY;
802*4882a593Smuzhiyun goto asq_send_command_error;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun hw->aq.asq_last_status = I40E_AQ_RC_OK;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun val = rd32(hw, hw->aq.asq.head);
808*4882a593Smuzhiyun if (val >= hw->aq.num_asq_entries) {
809*4882a593Smuzhiyun i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
810*4882a593Smuzhiyun "AQTX: head overrun at %d\n", val);
811*4882a593Smuzhiyun status = I40E_ERR_ADMIN_QUEUE_FULL;
812*4882a593Smuzhiyun goto asq_send_command_error;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
816*4882a593Smuzhiyun if (cmd_details) {
817*4882a593Smuzhiyun *details = *cmd_details;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /* If the cmd_details are defined copy the cookie. The
820*4882a593Smuzhiyun * cpu_to_le32 is not needed here because the data is ignored
821*4882a593Smuzhiyun * by the FW, only used by the driver
822*4882a593Smuzhiyun */
823*4882a593Smuzhiyun if (details->cookie) {
824*4882a593Smuzhiyun desc->cookie_high =
825*4882a593Smuzhiyun cpu_to_le32(upper_32_bits(details->cookie));
826*4882a593Smuzhiyun desc->cookie_low =
827*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(details->cookie));
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun } else {
830*4882a593Smuzhiyun memset(details, 0, sizeof(struct i40e_asq_cmd_details));
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun /* clear requested flags and then set additional flags if defined */
834*4882a593Smuzhiyun desc->flags &= ~cpu_to_le16(details->flags_dis);
835*4882a593Smuzhiyun desc->flags |= cpu_to_le16(details->flags_ena);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun if (buff_size > hw->aq.asq_buf_size) {
838*4882a593Smuzhiyun i40e_debug(hw,
839*4882a593Smuzhiyun I40E_DEBUG_AQ_MESSAGE,
840*4882a593Smuzhiyun "AQTX: Invalid buffer size: %d.\n",
841*4882a593Smuzhiyun buff_size);
842*4882a593Smuzhiyun status = I40E_ERR_INVALID_SIZE;
843*4882a593Smuzhiyun goto asq_send_command_error;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (details->postpone && !details->async) {
847*4882a593Smuzhiyun i40e_debug(hw,
848*4882a593Smuzhiyun I40E_DEBUG_AQ_MESSAGE,
849*4882a593Smuzhiyun "AQTX: Async flag not set along with postpone flag");
850*4882a593Smuzhiyun status = I40E_ERR_PARAM;
851*4882a593Smuzhiyun goto asq_send_command_error;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun /* call clean and check queue available function to reclaim the
855*4882a593Smuzhiyun * descriptors that were processed by FW, the function returns the
856*4882a593Smuzhiyun * number of desc available
857*4882a593Smuzhiyun */
858*4882a593Smuzhiyun /* the clean function called here could be called in a separate thread
859*4882a593Smuzhiyun * in case of asynchronous completions
860*4882a593Smuzhiyun */
861*4882a593Smuzhiyun if (i40e_clean_asq(hw) == 0) {
862*4882a593Smuzhiyun i40e_debug(hw,
863*4882a593Smuzhiyun I40E_DEBUG_AQ_MESSAGE,
864*4882a593Smuzhiyun "AQTX: Error queue is full.\n");
865*4882a593Smuzhiyun status = I40E_ERR_ADMIN_QUEUE_FULL;
866*4882a593Smuzhiyun goto asq_send_command_error;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /* initialize the temp desc pointer with the right desc */
870*4882a593Smuzhiyun desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun /* if the desc is available copy the temp desc to the right place */
873*4882a593Smuzhiyun *desc_on_ring = *desc;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun /* if buff is not NULL assume indirect command */
876*4882a593Smuzhiyun if (buff != NULL) {
877*4882a593Smuzhiyun dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
878*4882a593Smuzhiyun /* copy the user buff into the respective DMA buff */
879*4882a593Smuzhiyun memcpy(dma_buff->va, buff, buff_size);
880*4882a593Smuzhiyun desc_on_ring->datalen = cpu_to_le16(buff_size);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun /* Update the address values in the desc with the pa value
883*4882a593Smuzhiyun * for respective buffer
884*4882a593Smuzhiyun */
885*4882a593Smuzhiyun desc_on_ring->params.external.addr_high =
886*4882a593Smuzhiyun cpu_to_le32(upper_32_bits(dma_buff->pa));
887*4882a593Smuzhiyun desc_on_ring->params.external.addr_low =
888*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(dma_buff->pa));
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /* bump the tail */
892*4882a593Smuzhiyun i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
893*4882a593Smuzhiyun i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
894*4882a593Smuzhiyun buff, buff_size);
895*4882a593Smuzhiyun (hw->aq.asq.next_to_use)++;
896*4882a593Smuzhiyun if (hw->aq.asq.next_to_use == hw->aq.asq.count)
897*4882a593Smuzhiyun hw->aq.asq.next_to_use = 0;
898*4882a593Smuzhiyun if (!details->postpone)
899*4882a593Smuzhiyun wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun /* if cmd_details are not defined or async flag is not set,
902*4882a593Smuzhiyun * we need to wait for desc write back
903*4882a593Smuzhiyun */
904*4882a593Smuzhiyun if (!details->async && !details->postpone) {
905*4882a593Smuzhiyun u32 total_delay = 0;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun do {
908*4882a593Smuzhiyun /* AQ designers suggest use of head for better
909*4882a593Smuzhiyun * timing reliability than DD bit
910*4882a593Smuzhiyun */
911*4882a593Smuzhiyun if (i40e_asq_done(hw))
912*4882a593Smuzhiyun break;
913*4882a593Smuzhiyun udelay(50);
914*4882a593Smuzhiyun total_delay += 50;
915*4882a593Smuzhiyun } while (total_delay < hw->aq.asq_cmd_timeout);
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun /* if ready, copy the desc back to temp */
919*4882a593Smuzhiyun if (i40e_asq_done(hw)) {
920*4882a593Smuzhiyun *desc = *desc_on_ring;
921*4882a593Smuzhiyun if (buff != NULL)
922*4882a593Smuzhiyun memcpy(buff, dma_buff->va, buff_size);
923*4882a593Smuzhiyun retval = le16_to_cpu(desc->retval);
924*4882a593Smuzhiyun if (retval != 0) {
925*4882a593Smuzhiyun i40e_debug(hw,
926*4882a593Smuzhiyun I40E_DEBUG_AQ_MESSAGE,
927*4882a593Smuzhiyun "AQTX: Command completed with error 0x%X.\n",
928*4882a593Smuzhiyun retval);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun /* strip off FW internal code */
931*4882a593Smuzhiyun retval &= 0xff;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun cmd_completed = true;
934*4882a593Smuzhiyun if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
935*4882a593Smuzhiyun status = 0;
936*4882a593Smuzhiyun else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
937*4882a593Smuzhiyun status = I40E_ERR_NOT_READY;
938*4882a593Smuzhiyun else
939*4882a593Smuzhiyun status = I40E_ERR_ADMIN_QUEUE_ERROR;
940*4882a593Smuzhiyun hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
944*4882a593Smuzhiyun "AQTX: desc and buffer writeback:\n");
945*4882a593Smuzhiyun i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun /* save writeback aq if requested */
948*4882a593Smuzhiyun if (details->wb_desc)
949*4882a593Smuzhiyun *details->wb_desc = *desc_on_ring;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /* update the error if time out occurred */
952*4882a593Smuzhiyun if ((!cmd_completed) &&
953*4882a593Smuzhiyun (!details->async && !details->postpone)) {
954*4882a593Smuzhiyun if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
955*4882a593Smuzhiyun i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
956*4882a593Smuzhiyun "AQTX: AQ Critical error.\n");
957*4882a593Smuzhiyun status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
958*4882a593Smuzhiyun } else {
959*4882a593Smuzhiyun i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
960*4882a593Smuzhiyun "AQTX: Writeback timeout.\n");
961*4882a593Smuzhiyun status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun asq_send_command_error:
966*4882a593Smuzhiyun mutex_unlock(&hw->aq.asq_mutex);
967*4882a593Smuzhiyun return status;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /**
971*4882a593Smuzhiyun * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
972*4882a593Smuzhiyun * @desc: pointer to the temp descriptor (non DMA mem)
973*4882a593Smuzhiyun * @opcode: the opcode can be used to decide which flags to turn off or on
974*4882a593Smuzhiyun *
975*4882a593Smuzhiyun * Fill the desc with default values
976*4882a593Smuzhiyun **/
i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc * desc,u16 opcode)977*4882a593Smuzhiyun void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
978*4882a593Smuzhiyun u16 opcode)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun /* zero out the desc */
981*4882a593Smuzhiyun memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
982*4882a593Smuzhiyun desc->opcode = cpu_to_le16(opcode);
983*4882a593Smuzhiyun desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /**
987*4882a593Smuzhiyun * i40e_clean_arq_element
988*4882a593Smuzhiyun * @hw: pointer to the hw struct
989*4882a593Smuzhiyun * @e: event info from the receive descriptor, includes any buffers
990*4882a593Smuzhiyun * @pending: number of events that could be left to process
991*4882a593Smuzhiyun *
992*4882a593Smuzhiyun * This function cleans one Admin Receive Queue element and returns
993*4882a593Smuzhiyun * the contents through e. It can also return how many events are
994*4882a593Smuzhiyun * left to process through 'pending'
995*4882a593Smuzhiyun **/
i40e_clean_arq_element(struct i40e_hw * hw,struct i40e_arq_event_info * e,u16 * pending)996*4882a593Smuzhiyun i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
997*4882a593Smuzhiyun struct i40e_arq_event_info *e,
998*4882a593Smuzhiyun u16 *pending)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun i40e_status ret_code = 0;
1001*4882a593Smuzhiyun u16 ntc = hw->aq.arq.next_to_clean;
1002*4882a593Smuzhiyun struct i40e_aq_desc *desc;
1003*4882a593Smuzhiyun struct i40e_dma_mem *bi;
1004*4882a593Smuzhiyun u16 desc_idx;
1005*4882a593Smuzhiyun u16 datalen;
1006*4882a593Smuzhiyun u16 flags;
1007*4882a593Smuzhiyun u16 ntu;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun /* pre-clean the event info */
1010*4882a593Smuzhiyun memset(&e->desc, 0, sizeof(e->desc));
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun /* take the lock before we start messing with the ring */
1013*4882a593Smuzhiyun mutex_lock(&hw->aq.arq_mutex);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun if (hw->aq.arq.count == 0) {
1016*4882a593Smuzhiyun i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1017*4882a593Smuzhiyun "AQRX: Admin queue not initialized.\n");
1018*4882a593Smuzhiyun ret_code = I40E_ERR_QUEUE_EMPTY;
1019*4882a593Smuzhiyun goto clean_arq_element_err;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun /* set next_to_use to head */
1023*4882a593Smuzhiyun ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1024*4882a593Smuzhiyun if (ntu == ntc) {
1025*4882a593Smuzhiyun /* nothing to do - shouldn't need to update ring's values */
1026*4882a593Smuzhiyun ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1027*4882a593Smuzhiyun goto clean_arq_element_out;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun /* now clean the next descriptor */
1031*4882a593Smuzhiyun desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1032*4882a593Smuzhiyun desc_idx = ntc;
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun hw->aq.arq_last_status =
1035*4882a593Smuzhiyun (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
1036*4882a593Smuzhiyun flags = le16_to_cpu(desc->flags);
1037*4882a593Smuzhiyun if (flags & I40E_AQ_FLAG_ERR) {
1038*4882a593Smuzhiyun ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1039*4882a593Smuzhiyun i40e_debug(hw,
1040*4882a593Smuzhiyun I40E_DEBUG_AQ_MESSAGE,
1041*4882a593Smuzhiyun "AQRX: Event received with error 0x%X.\n",
1042*4882a593Smuzhiyun hw->aq.arq_last_status);
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun e->desc = *desc;
1046*4882a593Smuzhiyun datalen = le16_to_cpu(desc->datalen);
1047*4882a593Smuzhiyun e->msg_len = min(datalen, e->buf_len);
1048*4882a593Smuzhiyun if (e->msg_buf != NULL && (e->msg_len != 0))
1049*4882a593Smuzhiyun memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
1050*4882a593Smuzhiyun e->msg_len);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1053*4882a593Smuzhiyun i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1054*4882a593Smuzhiyun hw->aq.arq_buf_size);
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun /* Restore the original datalen and buffer address in the desc,
1057*4882a593Smuzhiyun * FW updates datalen to indicate the event message
1058*4882a593Smuzhiyun * size
1059*4882a593Smuzhiyun */
1060*4882a593Smuzhiyun bi = &hw->aq.arq.r.arq_bi[ntc];
1061*4882a593Smuzhiyun memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
1064*4882a593Smuzhiyun if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1065*4882a593Smuzhiyun desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
1066*4882a593Smuzhiyun desc->datalen = cpu_to_le16((u16)bi->size);
1067*4882a593Smuzhiyun desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1068*4882a593Smuzhiyun desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun /* set tail = the last cleaned desc index. */
1071*4882a593Smuzhiyun wr32(hw, hw->aq.arq.tail, ntc);
1072*4882a593Smuzhiyun /* ntc is updated to tail + 1 */
1073*4882a593Smuzhiyun ntc++;
1074*4882a593Smuzhiyun if (ntc == hw->aq.num_arq_entries)
1075*4882a593Smuzhiyun ntc = 0;
1076*4882a593Smuzhiyun hw->aq.arq.next_to_clean = ntc;
1077*4882a593Smuzhiyun hw->aq.arq.next_to_use = ntu;
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
1080*4882a593Smuzhiyun clean_arq_element_out:
1081*4882a593Smuzhiyun /* Set pending if needed, unlock and return */
1082*4882a593Smuzhiyun if (pending)
1083*4882a593Smuzhiyun *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1084*4882a593Smuzhiyun clean_arq_element_err:
1085*4882a593Smuzhiyun mutex_unlock(&hw->aq.arq_mutex);
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun return ret_code;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun
i40e_resume_aq(struct i40e_hw * hw)1090*4882a593Smuzhiyun static void i40e_resume_aq(struct i40e_hw *hw)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun /* Registers are reset after PF reset */
1093*4882a593Smuzhiyun hw->aq.asq.next_to_use = 0;
1094*4882a593Smuzhiyun hw->aq.asq.next_to_clean = 0;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun i40e_config_asq_regs(hw);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun hw->aq.arq.next_to_use = 0;
1099*4882a593Smuzhiyun hw->aq.arq.next_to_clean = 0;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun i40e_config_arq_regs(hw);
1102*4882a593Smuzhiyun }
1103