xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/ice/ice_controlq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2018, Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include "ice_common.h"
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #define ICE_CQ_INIT_REGS(qinfo, prefix)				\
7*4882a593Smuzhiyun do {								\
8*4882a593Smuzhiyun 	(qinfo)->sq.head = prefix##_ATQH;			\
9*4882a593Smuzhiyun 	(qinfo)->sq.tail = prefix##_ATQT;			\
10*4882a593Smuzhiyun 	(qinfo)->sq.len = prefix##_ATQLEN;			\
11*4882a593Smuzhiyun 	(qinfo)->sq.bah = prefix##_ATQBAH;			\
12*4882a593Smuzhiyun 	(qinfo)->sq.bal = prefix##_ATQBAL;			\
13*4882a593Smuzhiyun 	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
14*4882a593Smuzhiyun 	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
15*4882a593Smuzhiyun 	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
16*4882a593Smuzhiyun 	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
17*4882a593Smuzhiyun 	(qinfo)->rq.head = prefix##_ARQH;			\
18*4882a593Smuzhiyun 	(qinfo)->rq.tail = prefix##_ARQT;			\
19*4882a593Smuzhiyun 	(qinfo)->rq.len = prefix##_ARQLEN;			\
20*4882a593Smuzhiyun 	(qinfo)->rq.bah = prefix##_ARQBAH;			\
21*4882a593Smuzhiyun 	(qinfo)->rq.bal = prefix##_ARQBAL;			\
22*4882a593Smuzhiyun 	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
23*4882a593Smuzhiyun 	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
24*4882a593Smuzhiyun 	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
25*4882a593Smuzhiyun 	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
26*4882a593Smuzhiyun } while (0)
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /**
29*4882a593Smuzhiyun  * ice_adminq_init_regs - Initialize AdminQ registers
30*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  * This assumes the alloc_sq and alloc_rq functions have already been called
33*4882a593Smuzhiyun  */
ice_adminq_init_regs(struct ice_hw * hw)34*4882a593Smuzhiyun static void ice_adminq_init_regs(struct ice_hw *hw)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	struct ice_ctl_q_info *cq = &hw->adminq;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	ICE_CQ_INIT_REGS(cq, PF_FW);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun  * ice_mailbox_init_regs - Initialize Mailbox registers
43*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  * This assumes the alloc_sq and alloc_rq functions have already been called
46*4882a593Smuzhiyun  */
ice_mailbox_init_regs(struct ice_hw * hw)47*4882a593Smuzhiyun static void ice_mailbox_init_regs(struct ice_hw *hw)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct ice_ctl_q_info *cq = &hw->mailboxq;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	ICE_CQ_INIT_REGS(cq, PF_MBX);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /**
55*4882a593Smuzhiyun  * ice_check_sq_alive
56*4882a593Smuzhiyun  * @hw: pointer to the HW struct
57*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  * Returns true if Queue is enabled else false.
60*4882a593Smuzhiyun  */
ice_check_sq_alive(struct ice_hw * hw,struct ice_ctl_q_info * cq)61*4882a593Smuzhiyun bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	/* check both queue-length and queue-enable fields */
64*4882a593Smuzhiyun 	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
65*4882a593Smuzhiyun 		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
66*4882a593Smuzhiyun 						cq->sq.len_ena_mask)) ==
67*4882a593Smuzhiyun 			(cq->num_sq_entries | cq->sq.len_ena_mask);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	return false;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun  * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
74*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
75*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun static enum ice_status
ice_alloc_ctrlq_sq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)78*4882a593Smuzhiyun ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
83*4882a593Smuzhiyun 						 &cq->sq.desc_buf.pa,
84*4882a593Smuzhiyun 						 GFP_KERNEL | __GFP_ZERO);
85*4882a593Smuzhiyun 	if (!cq->sq.desc_buf.va)
86*4882a593Smuzhiyun 		return ICE_ERR_NO_MEMORY;
87*4882a593Smuzhiyun 	cq->sq.desc_buf.size = size;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
90*4882a593Smuzhiyun 				      sizeof(struct ice_sq_cd), GFP_KERNEL);
91*4882a593Smuzhiyun 	if (!cq->sq.cmd_buf) {
92*4882a593Smuzhiyun 		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
93*4882a593Smuzhiyun 				   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
94*4882a593Smuzhiyun 		cq->sq.desc_buf.va = NULL;
95*4882a593Smuzhiyun 		cq->sq.desc_buf.pa = 0;
96*4882a593Smuzhiyun 		cq->sq.desc_buf.size = 0;
97*4882a593Smuzhiyun 		return ICE_ERR_NO_MEMORY;
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	return 0;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /**
104*4882a593Smuzhiyun  * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
105*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
106*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun static enum ice_status
ice_alloc_ctrlq_rq_ring(struct ice_hw * hw,struct ice_ctl_q_info * cq)109*4882a593Smuzhiyun ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
114*4882a593Smuzhiyun 						 &cq->rq.desc_buf.pa,
115*4882a593Smuzhiyun 						 GFP_KERNEL | __GFP_ZERO);
116*4882a593Smuzhiyun 	if (!cq->rq.desc_buf.va)
117*4882a593Smuzhiyun 		return ICE_ERR_NO_MEMORY;
118*4882a593Smuzhiyun 	cq->rq.desc_buf.size = size;
119*4882a593Smuzhiyun 	return 0;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /**
123*4882a593Smuzhiyun  * ice_free_cq_ring - Free control queue ring
124*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
125*4882a593Smuzhiyun  * @ring: pointer to the specific control queue ring
126*4882a593Smuzhiyun  *
127*4882a593Smuzhiyun  * This assumes the posted buffers have already been cleaned
128*4882a593Smuzhiyun  * and de-allocated
129*4882a593Smuzhiyun  */
ice_free_cq_ring(struct ice_hw * hw,struct ice_ctl_q_ring * ring)130*4882a593Smuzhiyun static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
133*4882a593Smuzhiyun 			   ring->desc_buf.va, ring->desc_buf.pa);
134*4882a593Smuzhiyun 	ring->desc_buf.va = NULL;
135*4882a593Smuzhiyun 	ring->desc_buf.pa = 0;
136*4882a593Smuzhiyun 	ring->desc_buf.size = 0;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun  * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
141*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
142*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
143*4882a593Smuzhiyun  */
144*4882a593Smuzhiyun static enum ice_status
ice_alloc_rq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)145*4882a593Smuzhiyun ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	int i;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* We'll be allocating the buffer info memory first, then we can
150*4882a593Smuzhiyun 	 * allocate the mapped buffers for the event processing
151*4882a593Smuzhiyun 	 */
152*4882a593Smuzhiyun 	cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
153*4882a593Smuzhiyun 				       sizeof(cq->rq.desc_buf), GFP_KERNEL);
154*4882a593Smuzhiyun 	if (!cq->rq.dma_head)
155*4882a593Smuzhiyun 		return ICE_ERR_NO_MEMORY;
156*4882a593Smuzhiyun 	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* allocate the mapped buffers */
159*4882a593Smuzhiyun 	for (i = 0; i < cq->num_rq_entries; i++) {
160*4882a593Smuzhiyun 		struct ice_aq_desc *desc;
161*4882a593Smuzhiyun 		struct ice_dma_mem *bi;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 		bi = &cq->rq.r.rq_bi[i];
164*4882a593Smuzhiyun 		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
165*4882a593Smuzhiyun 					     cq->rq_buf_size, &bi->pa,
166*4882a593Smuzhiyun 					     GFP_KERNEL | __GFP_ZERO);
167*4882a593Smuzhiyun 		if (!bi->va)
168*4882a593Smuzhiyun 			goto unwind_alloc_rq_bufs;
169*4882a593Smuzhiyun 		bi->size = cq->rq_buf_size;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 		/* now configure the descriptors for use */
172*4882a593Smuzhiyun 		desc = ICE_CTL_Q_DESC(cq->rq, i);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 		desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
175*4882a593Smuzhiyun 		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
176*4882a593Smuzhiyun 			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
177*4882a593Smuzhiyun 		desc->opcode = 0;
178*4882a593Smuzhiyun 		/* This is in accordance with Admin queue design, there is no
179*4882a593Smuzhiyun 		 * register for buffer size configuration
180*4882a593Smuzhiyun 		 */
181*4882a593Smuzhiyun 		desc->datalen = cpu_to_le16(bi->size);
182*4882a593Smuzhiyun 		desc->retval = 0;
183*4882a593Smuzhiyun 		desc->cookie_high = 0;
184*4882a593Smuzhiyun 		desc->cookie_low = 0;
185*4882a593Smuzhiyun 		desc->params.generic.addr_high =
186*4882a593Smuzhiyun 			cpu_to_le32(upper_32_bits(bi->pa));
187*4882a593Smuzhiyun 		desc->params.generic.addr_low =
188*4882a593Smuzhiyun 			cpu_to_le32(lower_32_bits(bi->pa));
189*4882a593Smuzhiyun 		desc->params.generic.param0 = 0;
190*4882a593Smuzhiyun 		desc->params.generic.param1 = 0;
191*4882a593Smuzhiyun 	}
192*4882a593Smuzhiyun 	return 0;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun unwind_alloc_rq_bufs:
195*4882a593Smuzhiyun 	/* don't try to free the one that failed... */
196*4882a593Smuzhiyun 	i--;
197*4882a593Smuzhiyun 	for (; i >= 0; i--) {
198*4882a593Smuzhiyun 		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
199*4882a593Smuzhiyun 				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
200*4882a593Smuzhiyun 		cq->rq.r.rq_bi[i].va = NULL;
201*4882a593Smuzhiyun 		cq->rq.r.rq_bi[i].pa = 0;
202*4882a593Smuzhiyun 		cq->rq.r.rq_bi[i].size = 0;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 	cq->rq.r.rq_bi = NULL;
205*4882a593Smuzhiyun 	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
206*4882a593Smuzhiyun 	cq->rq.dma_head = NULL;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	return ICE_ERR_NO_MEMORY;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /**
212*4882a593Smuzhiyun  * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
213*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
214*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
215*4882a593Smuzhiyun  */
216*4882a593Smuzhiyun static enum ice_status
ice_alloc_sq_bufs(struct ice_hw * hw,struct ice_ctl_q_info * cq)217*4882a593Smuzhiyun ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	int i;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/* No mapped memory needed yet, just the buffer info structures */
222*4882a593Smuzhiyun 	cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
223*4882a593Smuzhiyun 				       sizeof(cq->sq.desc_buf), GFP_KERNEL);
224*4882a593Smuzhiyun 	if (!cq->sq.dma_head)
225*4882a593Smuzhiyun 		return ICE_ERR_NO_MEMORY;
226*4882a593Smuzhiyun 	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/* allocate the mapped buffers */
229*4882a593Smuzhiyun 	for (i = 0; i < cq->num_sq_entries; i++) {
230*4882a593Smuzhiyun 		struct ice_dma_mem *bi;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 		bi = &cq->sq.r.sq_bi[i];
233*4882a593Smuzhiyun 		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
234*4882a593Smuzhiyun 					     cq->sq_buf_size, &bi->pa,
235*4882a593Smuzhiyun 					     GFP_KERNEL | __GFP_ZERO);
236*4882a593Smuzhiyun 		if (!bi->va)
237*4882a593Smuzhiyun 			goto unwind_alloc_sq_bufs;
238*4882a593Smuzhiyun 		bi->size = cq->sq_buf_size;
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 	return 0;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun unwind_alloc_sq_bufs:
243*4882a593Smuzhiyun 	/* don't try to free the one that failed... */
244*4882a593Smuzhiyun 	i--;
245*4882a593Smuzhiyun 	for (; i >= 0; i--) {
246*4882a593Smuzhiyun 		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
247*4882a593Smuzhiyun 				   cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
248*4882a593Smuzhiyun 		cq->sq.r.sq_bi[i].va = NULL;
249*4882a593Smuzhiyun 		cq->sq.r.sq_bi[i].pa = 0;
250*4882a593Smuzhiyun 		cq->sq.r.sq_bi[i].size = 0;
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 	cq->sq.r.sq_bi = NULL;
253*4882a593Smuzhiyun 	devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
254*4882a593Smuzhiyun 	cq->sq.dma_head = NULL;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	return ICE_ERR_NO_MEMORY;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun static enum ice_status
ice_cfg_cq_regs(struct ice_hw * hw,struct ice_ctl_q_ring * ring,u16 num_entries)260*4882a593Smuzhiyun ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	/* Clear Head and Tail */
263*4882a593Smuzhiyun 	wr32(hw, ring->head, 0);
264*4882a593Smuzhiyun 	wr32(hw, ring->tail, 0);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/* set starting point */
267*4882a593Smuzhiyun 	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
268*4882a593Smuzhiyun 	wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
269*4882a593Smuzhiyun 	wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	/* Check one register to verify that config was applied */
272*4882a593Smuzhiyun 	if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
273*4882a593Smuzhiyun 		return ICE_ERR_AQ_ERROR;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	return 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun /**
279*4882a593Smuzhiyun  * ice_cfg_sq_regs - configure Control ATQ registers
280*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
281*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
282*4882a593Smuzhiyun  *
283*4882a593Smuzhiyun  * Configure base address and length registers for the transmit queue
284*4882a593Smuzhiyun  */
285*4882a593Smuzhiyun static enum ice_status
ice_cfg_sq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq)286*4882a593Smuzhiyun ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /**
292*4882a593Smuzhiyun  * ice_cfg_rq_regs - configure Control ARQ register
293*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
294*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
295*4882a593Smuzhiyun  *
296*4882a593Smuzhiyun  * Configure base address and length registers for the receive (event queue)
297*4882a593Smuzhiyun  */
298*4882a593Smuzhiyun static enum ice_status
ice_cfg_rq_regs(struct ice_hw * hw,struct ice_ctl_q_info * cq)299*4882a593Smuzhiyun ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	enum ice_status status;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
304*4882a593Smuzhiyun 	if (status)
305*4882a593Smuzhiyun 		return status;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* Update tail in the HW to post pre-allocated buffers */
308*4882a593Smuzhiyun 	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun #define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
314*4882a593Smuzhiyun do {									\
315*4882a593Smuzhiyun 	/* free descriptors */						\
316*4882a593Smuzhiyun 	if ((qi)->ring.r.ring##_bi) {					\
317*4882a593Smuzhiyun 		int i;							\
318*4882a593Smuzhiyun 									\
319*4882a593Smuzhiyun 		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
320*4882a593Smuzhiyun 			if ((qi)->ring.r.ring##_bi[i].pa) {		\
321*4882a593Smuzhiyun 				dmam_free_coherent(ice_hw_to_dev(hw),	\
322*4882a593Smuzhiyun 					(qi)->ring.r.ring##_bi[i].size,	\
323*4882a593Smuzhiyun 					(qi)->ring.r.ring##_bi[i].va,	\
324*4882a593Smuzhiyun 					(qi)->ring.r.ring##_bi[i].pa);	\
325*4882a593Smuzhiyun 					(qi)->ring.r.ring##_bi[i].va = NULL;\
326*4882a593Smuzhiyun 					(qi)->ring.r.ring##_bi[i].pa = 0;\
327*4882a593Smuzhiyun 					(qi)->ring.r.ring##_bi[i].size = 0;\
328*4882a593Smuzhiyun 		}							\
329*4882a593Smuzhiyun 	}								\
330*4882a593Smuzhiyun 	/* free the buffer info list */					\
331*4882a593Smuzhiyun 	if ((qi)->ring.cmd_buf)						\
332*4882a593Smuzhiyun 		devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);	\
333*4882a593Smuzhiyun 	/* free DMA head */						\
334*4882a593Smuzhiyun 	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
335*4882a593Smuzhiyun } while (0)
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun /**
338*4882a593Smuzhiyun  * ice_init_sq - main initialization routine for Control ATQ
339*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
340*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
341*4882a593Smuzhiyun  *
342*4882a593Smuzhiyun  * This is the main initialization routine for the Control Send Queue
343*4882a593Smuzhiyun  * Prior to calling this function, the driver *MUST* set the following fields
344*4882a593Smuzhiyun  * in the cq->structure:
345*4882a593Smuzhiyun  *     - cq->num_sq_entries
346*4882a593Smuzhiyun  *     - cq->sq_buf_size
347*4882a593Smuzhiyun  *
348*4882a593Smuzhiyun  * Do *NOT* hold the lock when calling this as the memory allocation routines
349*4882a593Smuzhiyun  * called are not going to be atomic context safe
350*4882a593Smuzhiyun  */
ice_init_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)351*4882a593Smuzhiyun static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	enum ice_status ret_code;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (cq->sq.count > 0) {
356*4882a593Smuzhiyun 		/* queue already initialized */
357*4882a593Smuzhiyun 		ret_code = ICE_ERR_NOT_READY;
358*4882a593Smuzhiyun 		goto init_ctrlq_exit;
359*4882a593Smuzhiyun 	}
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* verify input for valid configuration */
362*4882a593Smuzhiyun 	if (!cq->num_sq_entries || !cq->sq_buf_size) {
363*4882a593Smuzhiyun 		ret_code = ICE_ERR_CFG;
364*4882a593Smuzhiyun 		goto init_ctrlq_exit;
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	cq->sq.next_to_use = 0;
368*4882a593Smuzhiyun 	cq->sq.next_to_clean = 0;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	/* allocate the ring memory */
371*4882a593Smuzhiyun 	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
372*4882a593Smuzhiyun 	if (ret_code)
373*4882a593Smuzhiyun 		goto init_ctrlq_exit;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/* allocate buffers in the rings */
376*4882a593Smuzhiyun 	ret_code = ice_alloc_sq_bufs(hw, cq);
377*4882a593Smuzhiyun 	if (ret_code)
378*4882a593Smuzhiyun 		goto init_ctrlq_free_rings;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/* initialize base registers */
381*4882a593Smuzhiyun 	ret_code = ice_cfg_sq_regs(hw, cq);
382*4882a593Smuzhiyun 	if (ret_code)
383*4882a593Smuzhiyun 		goto init_ctrlq_free_rings;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/* success! */
386*4882a593Smuzhiyun 	cq->sq.count = cq->num_sq_entries;
387*4882a593Smuzhiyun 	goto init_ctrlq_exit;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun init_ctrlq_free_rings:
390*4882a593Smuzhiyun 	ICE_FREE_CQ_BUFS(hw, cq, sq);
391*4882a593Smuzhiyun 	ice_free_cq_ring(hw, &cq->sq);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun init_ctrlq_exit:
394*4882a593Smuzhiyun 	return ret_code;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun /**
398*4882a593Smuzhiyun  * ice_init_rq - initialize ARQ
399*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
400*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
401*4882a593Smuzhiyun  *
402*4882a593Smuzhiyun  * The main initialization routine for the Admin Receive (Event) Queue.
403*4882a593Smuzhiyun  * Prior to calling this function, the driver *MUST* set the following fields
404*4882a593Smuzhiyun  * in the cq->structure:
405*4882a593Smuzhiyun  *     - cq->num_rq_entries
406*4882a593Smuzhiyun  *     - cq->rq_buf_size
407*4882a593Smuzhiyun  *
408*4882a593Smuzhiyun  * Do *NOT* hold the lock when calling this as the memory allocation routines
409*4882a593Smuzhiyun  * called are not going to be atomic context safe
410*4882a593Smuzhiyun  */
ice_init_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq)411*4882a593Smuzhiyun static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	enum ice_status ret_code;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (cq->rq.count > 0) {
416*4882a593Smuzhiyun 		/* queue already initialized */
417*4882a593Smuzhiyun 		ret_code = ICE_ERR_NOT_READY;
418*4882a593Smuzhiyun 		goto init_ctrlq_exit;
419*4882a593Smuzhiyun 	}
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/* verify input for valid configuration */
422*4882a593Smuzhiyun 	if (!cq->num_rq_entries || !cq->rq_buf_size) {
423*4882a593Smuzhiyun 		ret_code = ICE_ERR_CFG;
424*4882a593Smuzhiyun 		goto init_ctrlq_exit;
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	cq->rq.next_to_use = 0;
428*4882a593Smuzhiyun 	cq->rq.next_to_clean = 0;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	/* allocate the ring memory */
431*4882a593Smuzhiyun 	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
432*4882a593Smuzhiyun 	if (ret_code)
433*4882a593Smuzhiyun 		goto init_ctrlq_exit;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	/* allocate buffers in the rings */
436*4882a593Smuzhiyun 	ret_code = ice_alloc_rq_bufs(hw, cq);
437*4882a593Smuzhiyun 	if (ret_code)
438*4882a593Smuzhiyun 		goto init_ctrlq_free_rings;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	/* initialize base registers */
441*4882a593Smuzhiyun 	ret_code = ice_cfg_rq_regs(hw, cq);
442*4882a593Smuzhiyun 	if (ret_code)
443*4882a593Smuzhiyun 		goto init_ctrlq_free_rings;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	/* success! */
446*4882a593Smuzhiyun 	cq->rq.count = cq->num_rq_entries;
447*4882a593Smuzhiyun 	goto init_ctrlq_exit;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun init_ctrlq_free_rings:
450*4882a593Smuzhiyun 	ICE_FREE_CQ_BUFS(hw, cq, rq);
451*4882a593Smuzhiyun 	ice_free_cq_ring(hw, &cq->rq);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun init_ctrlq_exit:
454*4882a593Smuzhiyun 	return ret_code;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun /**
458*4882a593Smuzhiyun  * ice_shutdown_sq - shutdown the Control ATQ
459*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
460*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
461*4882a593Smuzhiyun  *
462*4882a593Smuzhiyun  * The main shutdown routine for the Control Transmit Queue
463*4882a593Smuzhiyun  */
464*4882a593Smuzhiyun static enum ice_status
ice_shutdown_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)465*4882a593Smuzhiyun ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	enum ice_status ret_code = 0;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	mutex_lock(&cq->sq_lock);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (!cq->sq.count) {
472*4882a593Smuzhiyun 		ret_code = ICE_ERR_NOT_READY;
473*4882a593Smuzhiyun 		goto shutdown_sq_out;
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	/* Stop firmware AdminQ processing */
477*4882a593Smuzhiyun 	wr32(hw, cq->sq.head, 0);
478*4882a593Smuzhiyun 	wr32(hw, cq->sq.tail, 0);
479*4882a593Smuzhiyun 	wr32(hw, cq->sq.len, 0);
480*4882a593Smuzhiyun 	wr32(hw, cq->sq.bal, 0);
481*4882a593Smuzhiyun 	wr32(hw, cq->sq.bah, 0);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	cq->sq.count = 0;	/* to indicate uninitialized queue */
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	/* free ring buffers and the ring itself */
486*4882a593Smuzhiyun 	ICE_FREE_CQ_BUFS(hw, cq, sq);
487*4882a593Smuzhiyun 	ice_free_cq_ring(hw, &cq->sq);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun shutdown_sq_out:
490*4882a593Smuzhiyun 	mutex_unlock(&cq->sq_lock);
491*4882a593Smuzhiyun 	return ret_code;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun /**
495*4882a593Smuzhiyun  * ice_aq_ver_check - Check the reported AQ API version.
496*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
497*4882a593Smuzhiyun  *
498*4882a593Smuzhiyun  * Checks if the driver should load on a given AQ API version.
499*4882a593Smuzhiyun  *
500*4882a593Smuzhiyun  * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
501*4882a593Smuzhiyun  */
ice_aq_ver_check(struct ice_hw * hw)502*4882a593Smuzhiyun static bool ice_aq_ver_check(struct ice_hw *hw)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
505*4882a593Smuzhiyun 		/* Major API version is newer than expected, don't load */
506*4882a593Smuzhiyun 		dev_warn(ice_hw_to_dev(hw),
507*4882a593Smuzhiyun 			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
508*4882a593Smuzhiyun 		return false;
509*4882a593Smuzhiyun 	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
510*4882a593Smuzhiyun 		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
511*4882a593Smuzhiyun 			dev_info(ice_hw_to_dev(hw),
512*4882a593Smuzhiyun 				 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
513*4882a593Smuzhiyun 		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
514*4882a593Smuzhiyun 			dev_info(ice_hw_to_dev(hw),
515*4882a593Smuzhiyun 				 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
516*4882a593Smuzhiyun 	} else {
517*4882a593Smuzhiyun 		/* Major API version is older than expected, log a warning */
518*4882a593Smuzhiyun 		dev_info(ice_hw_to_dev(hw),
519*4882a593Smuzhiyun 			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 	return true;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun /**
525*4882a593Smuzhiyun  * ice_shutdown_rq - shutdown Control ARQ
526*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
527*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
528*4882a593Smuzhiyun  *
529*4882a593Smuzhiyun  * The main shutdown routine for the Control Receive Queue
530*4882a593Smuzhiyun  */
531*4882a593Smuzhiyun static enum ice_status
ice_shutdown_rq(struct ice_hw * hw,struct ice_ctl_q_info * cq)532*4882a593Smuzhiyun ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	enum ice_status ret_code = 0;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	mutex_lock(&cq->rq_lock);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	if (!cq->rq.count) {
539*4882a593Smuzhiyun 		ret_code = ICE_ERR_NOT_READY;
540*4882a593Smuzhiyun 		goto shutdown_rq_out;
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* Stop Control Queue processing */
544*4882a593Smuzhiyun 	wr32(hw, cq->rq.head, 0);
545*4882a593Smuzhiyun 	wr32(hw, cq->rq.tail, 0);
546*4882a593Smuzhiyun 	wr32(hw, cq->rq.len, 0);
547*4882a593Smuzhiyun 	wr32(hw, cq->rq.bal, 0);
548*4882a593Smuzhiyun 	wr32(hw, cq->rq.bah, 0);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	/* set rq.count to 0 to indicate uninitialized queue */
551*4882a593Smuzhiyun 	cq->rq.count = 0;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	/* free ring buffers and the ring itself */
554*4882a593Smuzhiyun 	ICE_FREE_CQ_BUFS(hw, cq, rq);
555*4882a593Smuzhiyun 	ice_free_cq_ring(hw, &cq->rq);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun shutdown_rq_out:
558*4882a593Smuzhiyun 	mutex_unlock(&cq->rq_lock);
559*4882a593Smuzhiyun 	return ret_code;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun /**
563*4882a593Smuzhiyun  * ice_init_check_adminq - Check version for Admin Queue to know if its alive
564*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
565*4882a593Smuzhiyun  */
ice_init_check_adminq(struct ice_hw * hw)566*4882a593Smuzhiyun static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun 	struct ice_ctl_q_info *cq = &hw->adminq;
569*4882a593Smuzhiyun 	enum ice_status status;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	status = ice_aq_get_fw_ver(hw, NULL);
572*4882a593Smuzhiyun 	if (status)
573*4882a593Smuzhiyun 		goto init_ctrlq_free_rq;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	if (!ice_aq_ver_check(hw)) {
576*4882a593Smuzhiyun 		status = ICE_ERR_FW_API_VER;
577*4882a593Smuzhiyun 		goto init_ctrlq_free_rq;
578*4882a593Smuzhiyun 	}
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	return 0;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun init_ctrlq_free_rq:
583*4882a593Smuzhiyun 	ice_shutdown_rq(hw, cq);
584*4882a593Smuzhiyun 	ice_shutdown_sq(hw, cq);
585*4882a593Smuzhiyun 	return status;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun /**
589*4882a593Smuzhiyun  * ice_init_ctrlq - main initialization routine for any control Queue
590*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
591*4882a593Smuzhiyun  * @q_type: specific Control queue type
592*4882a593Smuzhiyun  *
593*4882a593Smuzhiyun  * Prior to calling this function, the driver *MUST* set the following fields
594*4882a593Smuzhiyun  * in the cq->structure:
595*4882a593Smuzhiyun  *     - cq->num_sq_entries
596*4882a593Smuzhiyun  *     - cq->num_rq_entries
597*4882a593Smuzhiyun  *     - cq->rq_buf_size
598*4882a593Smuzhiyun  *     - cq->sq_buf_size
599*4882a593Smuzhiyun  *
600*4882a593Smuzhiyun  * NOTE: this function does not initialize the controlq locks
601*4882a593Smuzhiyun  */
ice_init_ctrlq(struct ice_hw * hw,enum ice_ctl_q q_type)602*4882a593Smuzhiyun static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct ice_ctl_q_info *cq;
605*4882a593Smuzhiyun 	enum ice_status ret_code;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	switch (q_type) {
608*4882a593Smuzhiyun 	case ICE_CTL_Q_ADMIN:
609*4882a593Smuzhiyun 		ice_adminq_init_regs(hw);
610*4882a593Smuzhiyun 		cq = &hw->adminq;
611*4882a593Smuzhiyun 		break;
612*4882a593Smuzhiyun 	case ICE_CTL_Q_MAILBOX:
613*4882a593Smuzhiyun 		ice_mailbox_init_regs(hw);
614*4882a593Smuzhiyun 		cq = &hw->mailboxq;
615*4882a593Smuzhiyun 		break;
616*4882a593Smuzhiyun 	default:
617*4882a593Smuzhiyun 		return ICE_ERR_PARAM;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 	cq->qtype = q_type;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	/* verify input for valid configuration */
622*4882a593Smuzhiyun 	if (!cq->num_rq_entries || !cq->num_sq_entries ||
623*4882a593Smuzhiyun 	    !cq->rq_buf_size || !cq->sq_buf_size) {
624*4882a593Smuzhiyun 		return ICE_ERR_CFG;
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	/* setup SQ command write back timeout */
628*4882a593Smuzhiyun 	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	/* allocate the ATQ */
631*4882a593Smuzhiyun 	ret_code = ice_init_sq(hw, cq);
632*4882a593Smuzhiyun 	if (ret_code)
633*4882a593Smuzhiyun 		return ret_code;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	/* allocate the ARQ */
636*4882a593Smuzhiyun 	ret_code = ice_init_rq(hw, cq);
637*4882a593Smuzhiyun 	if (ret_code)
638*4882a593Smuzhiyun 		goto init_ctrlq_free_sq;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/* success! */
641*4882a593Smuzhiyun 	return 0;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun init_ctrlq_free_sq:
644*4882a593Smuzhiyun 	ice_shutdown_sq(hw, cq);
645*4882a593Smuzhiyun 	return ret_code;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun /**
649*4882a593Smuzhiyun  * ice_shutdown_ctrlq - shutdown routine for any control queue
650*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
651*4882a593Smuzhiyun  * @q_type: specific Control queue type
652*4882a593Smuzhiyun  *
653*4882a593Smuzhiyun  * NOTE: this function does not destroy the control queue locks.
654*4882a593Smuzhiyun  */
ice_shutdown_ctrlq(struct ice_hw * hw,enum ice_ctl_q q_type)655*4882a593Smuzhiyun static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	struct ice_ctl_q_info *cq;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	switch (q_type) {
660*4882a593Smuzhiyun 	case ICE_CTL_Q_ADMIN:
661*4882a593Smuzhiyun 		cq = &hw->adminq;
662*4882a593Smuzhiyun 		if (ice_check_sq_alive(hw, cq))
663*4882a593Smuzhiyun 			ice_aq_q_shutdown(hw, true);
664*4882a593Smuzhiyun 		break;
665*4882a593Smuzhiyun 	case ICE_CTL_Q_MAILBOX:
666*4882a593Smuzhiyun 		cq = &hw->mailboxq;
667*4882a593Smuzhiyun 		break;
668*4882a593Smuzhiyun 	default:
669*4882a593Smuzhiyun 		return;
670*4882a593Smuzhiyun 	}
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	ice_shutdown_sq(hw, cq);
673*4882a593Smuzhiyun 	ice_shutdown_rq(hw, cq);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun /**
677*4882a593Smuzhiyun  * ice_shutdown_all_ctrlq - shutdown routine for all control queues
678*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
679*4882a593Smuzhiyun  *
680*4882a593Smuzhiyun  * NOTE: this function does not destroy the control queue locks. The driver
681*4882a593Smuzhiyun  * may call this at runtime to shutdown and later restart control queues, such
682*4882a593Smuzhiyun  * as in response to a reset event.
683*4882a593Smuzhiyun  */
ice_shutdown_all_ctrlq(struct ice_hw * hw)684*4882a593Smuzhiyun void ice_shutdown_all_ctrlq(struct ice_hw *hw)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	/* Shutdown FW admin queue */
687*4882a593Smuzhiyun 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
688*4882a593Smuzhiyun 	/* Shutdown PF-VF Mailbox */
689*4882a593Smuzhiyun 	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun /**
693*4882a593Smuzhiyun  * ice_init_all_ctrlq - main initialization routine for all control queues
694*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
695*4882a593Smuzhiyun  *
696*4882a593Smuzhiyun  * Prior to calling this function, the driver MUST* set the following fields
697*4882a593Smuzhiyun  * in the cq->structure for all control queues:
698*4882a593Smuzhiyun  *     - cq->num_sq_entries
699*4882a593Smuzhiyun  *     - cq->num_rq_entries
700*4882a593Smuzhiyun  *     - cq->rq_buf_size
701*4882a593Smuzhiyun  *     - cq->sq_buf_size
702*4882a593Smuzhiyun  *
703*4882a593Smuzhiyun  * NOTE: this function does not initialize the controlq locks.
704*4882a593Smuzhiyun  */
ice_init_all_ctrlq(struct ice_hw * hw)705*4882a593Smuzhiyun enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	enum ice_status status;
708*4882a593Smuzhiyun 	u32 retry = 0;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	/* Init FW admin queue */
711*4882a593Smuzhiyun 	do {
712*4882a593Smuzhiyun 		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
713*4882a593Smuzhiyun 		if (status)
714*4882a593Smuzhiyun 			return status;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 		status = ice_init_check_adminq(hw);
717*4882a593Smuzhiyun 		if (status != ICE_ERR_AQ_FW_CRITICAL)
718*4882a593Smuzhiyun 			break;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 		ice_debug(hw, ICE_DBG_AQ_MSG,
721*4882a593Smuzhiyun 			  "Retry Admin Queue init due to FW critical error\n");
722*4882a593Smuzhiyun 		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
723*4882a593Smuzhiyun 		msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
724*4882a593Smuzhiyun 	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	if (status)
727*4882a593Smuzhiyun 		return status;
728*4882a593Smuzhiyun 	/* Init Mailbox queue */
729*4882a593Smuzhiyun 	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun /**
733*4882a593Smuzhiyun  * ice_init_ctrlq_locks - Initialize locks for a control queue
734*4882a593Smuzhiyun  * @cq: pointer to the control queue
735*4882a593Smuzhiyun  *
736*4882a593Smuzhiyun  * Initializes the send and receive queue locks for a given control queue.
737*4882a593Smuzhiyun  */
ice_init_ctrlq_locks(struct ice_ctl_q_info * cq)738*4882a593Smuzhiyun static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun 	mutex_init(&cq->sq_lock);
741*4882a593Smuzhiyun 	mutex_init(&cq->rq_lock);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun /**
745*4882a593Smuzhiyun  * ice_create_all_ctrlq - main initialization routine for all control queues
746*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
747*4882a593Smuzhiyun  *
748*4882a593Smuzhiyun  * Prior to calling this function, the driver *MUST* set the following fields
749*4882a593Smuzhiyun  * in the cq->structure for all control queues:
750*4882a593Smuzhiyun  *     - cq->num_sq_entries
751*4882a593Smuzhiyun  *     - cq->num_rq_entries
752*4882a593Smuzhiyun  *     - cq->rq_buf_size
753*4882a593Smuzhiyun  *     - cq->sq_buf_size
754*4882a593Smuzhiyun  *
755*4882a593Smuzhiyun  * This function creates all the control queue locks and then calls
756*4882a593Smuzhiyun  * ice_init_all_ctrlq. It should be called once during driver load. If the
757*4882a593Smuzhiyun  * driver needs to re-initialize control queues at run time it should call
758*4882a593Smuzhiyun  * ice_init_all_ctrlq instead.
759*4882a593Smuzhiyun  */
ice_create_all_ctrlq(struct ice_hw * hw)760*4882a593Smuzhiyun enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun 	ice_init_ctrlq_locks(&hw->adminq);
763*4882a593Smuzhiyun 	ice_init_ctrlq_locks(&hw->mailboxq);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	return ice_init_all_ctrlq(hw);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun /**
769*4882a593Smuzhiyun  * ice_destroy_ctrlq_locks - Destroy locks for a control queue
770*4882a593Smuzhiyun  * @cq: pointer to the control queue
771*4882a593Smuzhiyun  *
772*4882a593Smuzhiyun  * Destroys the send and receive queue locks for a given control queue.
773*4882a593Smuzhiyun  */
ice_destroy_ctrlq_locks(struct ice_ctl_q_info * cq)774*4882a593Smuzhiyun static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	mutex_destroy(&cq->sq_lock);
777*4882a593Smuzhiyun 	mutex_destroy(&cq->rq_lock);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun /**
781*4882a593Smuzhiyun  * ice_destroy_all_ctrlq - exit routine for all control queues
782*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
783*4882a593Smuzhiyun  *
784*4882a593Smuzhiyun  * This function shuts down all the control queues and then destroys the
785*4882a593Smuzhiyun  * control queue locks. It should be called once during driver unload. The
786*4882a593Smuzhiyun  * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
787*4882a593Smuzhiyun  * reinitialize control queues, such as in response to a reset event.
788*4882a593Smuzhiyun  */
ice_destroy_all_ctrlq(struct ice_hw * hw)789*4882a593Smuzhiyun void ice_destroy_all_ctrlq(struct ice_hw *hw)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun 	/* shut down all the control queues first */
792*4882a593Smuzhiyun 	ice_shutdown_all_ctrlq(hw);
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	ice_destroy_ctrlq_locks(&hw->adminq);
795*4882a593Smuzhiyun 	ice_destroy_ctrlq_locks(&hw->mailboxq);
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun /**
799*4882a593Smuzhiyun  * ice_clean_sq - cleans Admin send queue (ATQ)
800*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
801*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
802*4882a593Smuzhiyun  *
803*4882a593Smuzhiyun  * returns the number of free desc
804*4882a593Smuzhiyun  */
ice_clean_sq(struct ice_hw * hw,struct ice_ctl_q_info * cq)805*4882a593Smuzhiyun static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun 	struct ice_ctl_q_ring *sq = &cq->sq;
808*4882a593Smuzhiyun 	u16 ntc = sq->next_to_clean;
809*4882a593Smuzhiyun 	struct ice_sq_cd *details;
810*4882a593Smuzhiyun 	struct ice_aq_desc *desc;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	desc = ICE_CTL_Q_DESC(*sq, ntc);
813*4882a593Smuzhiyun 	details = ICE_CTL_Q_DETAILS(*sq, ntc);
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	while (rd32(hw, cq->sq.head) != ntc) {
816*4882a593Smuzhiyun 		ice_debug(hw, ICE_DBG_AQ_MSG,
817*4882a593Smuzhiyun 			  "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
818*4882a593Smuzhiyun 		memset(desc, 0, sizeof(*desc));
819*4882a593Smuzhiyun 		memset(details, 0, sizeof(*details));
820*4882a593Smuzhiyun 		ntc++;
821*4882a593Smuzhiyun 		if (ntc == sq->count)
822*4882a593Smuzhiyun 			ntc = 0;
823*4882a593Smuzhiyun 		desc = ICE_CTL_Q_DESC(*sq, ntc);
824*4882a593Smuzhiyun 		details = ICE_CTL_Q_DETAILS(*sq, ntc);
825*4882a593Smuzhiyun 	}
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	sq->next_to_clean = ntc;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	return ICE_CTL_Q_DESC_UNUSED(sq);
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun /**
833*4882a593Smuzhiyun  * ice_debug_cq
834*4882a593Smuzhiyun  * @hw: pointer to the hardware structure
835*4882a593Smuzhiyun  * @desc: pointer to control queue descriptor
836*4882a593Smuzhiyun  * @buf: pointer to command buffer
837*4882a593Smuzhiyun  * @buf_len: max length of buf
838*4882a593Smuzhiyun  *
839*4882a593Smuzhiyun  * Dumps debug log about control command with descriptor contents.
840*4882a593Smuzhiyun  */
ice_debug_cq(struct ice_hw * hw,void * desc,void * buf,u16 buf_len)841*4882a593Smuzhiyun static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun 	struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
844*4882a593Smuzhiyun 	u16 len;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
847*4882a593Smuzhiyun 	    !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
848*4882a593Smuzhiyun 		return;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	if (!desc)
851*4882a593Smuzhiyun 		return;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	len = le16_to_cpu(cq_desc->datalen);
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	ice_debug(hw, ICE_DBG_AQ_DESC,
856*4882a593Smuzhiyun 		  "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
857*4882a593Smuzhiyun 		  le16_to_cpu(cq_desc->opcode),
858*4882a593Smuzhiyun 		  le16_to_cpu(cq_desc->flags),
859*4882a593Smuzhiyun 		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
860*4882a593Smuzhiyun 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
861*4882a593Smuzhiyun 		  le32_to_cpu(cq_desc->cookie_high),
862*4882a593Smuzhiyun 		  le32_to_cpu(cq_desc->cookie_low));
863*4882a593Smuzhiyun 	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
864*4882a593Smuzhiyun 		  le32_to_cpu(cq_desc->params.generic.param0),
865*4882a593Smuzhiyun 		  le32_to_cpu(cq_desc->params.generic.param1));
866*4882a593Smuzhiyun 	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
867*4882a593Smuzhiyun 		  le32_to_cpu(cq_desc->params.generic.addr_high),
868*4882a593Smuzhiyun 		  le32_to_cpu(cq_desc->params.generic.addr_low));
869*4882a593Smuzhiyun 	if (buf && cq_desc->datalen != 0) {
870*4882a593Smuzhiyun 		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
871*4882a593Smuzhiyun 		if (buf_len < len)
872*4882a593Smuzhiyun 			len = buf_len;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len);
875*4882a593Smuzhiyun 	}
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun /**
879*4882a593Smuzhiyun  * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
880*4882a593Smuzhiyun  * @hw: pointer to the HW struct
881*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
882*4882a593Smuzhiyun  *
883*4882a593Smuzhiyun  * Returns true if the firmware has processed all descriptors on the
884*4882a593Smuzhiyun  * admin send queue. Returns false if there are still requests pending.
885*4882a593Smuzhiyun  */
ice_sq_done(struct ice_hw * hw,struct ice_ctl_q_info * cq)886*4882a593Smuzhiyun static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun 	/* AQ designers suggest use of head for better
889*4882a593Smuzhiyun 	 * timing reliability than DD bit
890*4882a593Smuzhiyun 	 */
891*4882a593Smuzhiyun 	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun /**
895*4882a593Smuzhiyun  * ice_sq_send_cmd - send command to Control Queue (ATQ)
896*4882a593Smuzhiyun  * @hw: pointer to the HW struct
897*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
898*4882a593Smuzhiyun  * @desc: prefilled descriptor describing the command (non DMA mem)
899*4882a593Smuzhiyun  * @buf: buffer to use for indirect commands (or NULL for direct commands)
900*4882a593Smuzhiyun  * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
901*4882a593Smuzhiyun  * @cd: pointer to command details structure
902*4882a593Smuzhiyun  *
903*4882a593Smuzhiyun  * This is the main send command routine for the ATQ. It runs the queue,
904*4882a593Smuzhiyun  * cleans the queue, etc.
905*4882a593Smuzhiyun  */
906*4882a593Smuzhiyun enum ice_status
ice_sq_send_cmd(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_aq_desc * desc,void * buf,u16 buf_size,struct ice_sq_cd * cd)907*4882a593Smuzhiyun ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
908*4882a593Smuzhiyun 		struct ice_aq_desc *desc, void *buf, u16 buf_size,
909*4882a593Smuzhiyun 		struct ice_sq_cd *cd)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	struct ice_dma_mem *dma_buf = NULL;
912*4882a593Smuzhiyun 	struct ice_aq_desc *desc_on_ring;
913*4882a593Smuzhiyun 	bool cmd_completed = false;
914*4882a593Smuzhiyun 	enum ice_status status = 0;
915*4882a593Smuzhiyun 	struct ice_sq_cd *details;
916*4882a593Smuzhiyun 	u32 total_delay = 0;
917*4882a593Smuzhiyun 	u16 retval = 0;
918*4882a593Smuzhiyun 	u32 val = 0;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	/* if reset is in progress return a soft error */
921*4882a593Smuzhiyun 	if (hw->reset_ongoing)
922*4882a593Smuzhiyun 		return ICE_ERR_RESET_ONGOING;
923*4882a593Smuzhiyun 	mutex_lock(&cq->sq_lock);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	cq->sq_last_status = ICE_AQ_RC_OK;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	if (!cq->sq.count) {
928*4882a593Smuzhiyun 		ice_debug(hw, ICE_DBG_AQ_MSG,
929*4882a593Smuzhiyun 			  "Control Send queue not initialized.\n");
930*4882a593Smuzhiyun 		status = ICE_ERR_AQ_EMPTY;
931*4882a593Smuzhiyun 		goto sq_send_command_error;
932*4882a593Smuzhiyun 	}
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	if ((buf && !buf_size) || (!buf && buf_size)) {
935*4882a593Smuzhiyun 		status = ICE_ERR_PARAM;
936*4882a593Smuzhiyun 		goto sq_send_command_error;
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	if (buf) {
940*4882a593Smuzhiyun 		if (buf_size > cq->sq_buf_size) {
941*4882a593Smuzhiyun 			ice_debug(hw, ICE_DBG_AQ_MSG,
942*4882a593Smuzhiyun 				  "Invalid buffer size for Control Send queue: %d.\n",
943*4882a593Smuzhiyun 				  buf_size);
944*4882a593Smuzhiyun 			status = ICE_ERR_INVAL_SIZE;
945*4882a593Smuzhiyun 			goto sq_send_command_error;
946*4882a593Smuzhiyun 		}
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
949*4882a593Smuzhiyun 		if (buf_size > ICE_AQ_LG_BUF)
950*4882a593Smuzhiyun 			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
951*4882a593Smuzhiyun 	}
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	val = rd32(hw, cq->sq.head);
954*4882a593Smuzhiyun 	if (val >= cq->num_sq_entries) {
955*4882a593Smuzhiyun 		ice_debug(hw, ICE_DBG_AQ_MSG,
956*4882a593Smuzhiyun 			  "head overrun at %d in the Control Send Queue ring\n",
957*4882a593Smuzhiyun 			  val);
958*4882a593Smuzhiyun 		status = ICE_ERR_AQ_EMPTY;
959*4882a593Smuzhiyun 		goto sq_send_command_error;
960*4882a593Smuzhiyun 	}
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
963*4882a593Smuzhiyun 	if (cd)
964*4882a593Smuzhiyun 		*details = *cd;
965*4882a593Smuzhiyun 	else
966*4882a593Smuzhiyun 		memset(details, 0, sizeof(*details));
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	/* Call clean and check queue available function to reclaim the
969*4882a593Smuzhiyun 	 * descriptors that were processed by FW/MBX; the function returns the
970*4882a593Smuzhiyun 	 * number of desc available. The clean function called here could be
971*4882a593Smuzhiyun 	 * called in a separate thread in case of asynchronous completions.
972*4882a593Smuzhiyun 	 */
973*4882a593Smuzhiyun 	if (ice_clean_sq(hw, cq) == 0) {
974*4882a593Smuzhiyun 		ice_debug(hw, ICE_DBG_AQ_MSG,
975*4882a593Smuzhiyun 			  "Error: Control Send Queue is full.\n");
976*4882a593Smuzhiyun 		status = ICE_ERR_AQ_FULL;
977*4882a593Smuzhiyun 		goto sq_send_command_error;
978*4882a593Smuzhiyun 	}
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	/* initialize the temp desc pointer with the right desc */
981*4882a593Smuzhiyun 	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	/* if the desc is available copy the temp desc to the right place */
984*4882a593Smuzhiyun 	memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	/* if buf is not NULL assume indirect command */
987*4882a593Smuzhiyun 	if (buf) {
988*4882a593Smuzhiyun 		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
989*4882a593Smuzhiyun 		/* copy the user buf into the respective DMA buf */
990*4882a593Smuzhiyun 		memcpy(dma_buf->va, buf, buf_size);
991*4882a593Smuzhiyun 		desc_on_ring->datalen = cpu_to_le16(buf_size);
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 		/* Update the address values in the desc with the pa value
994*4882a593Smuzhiyun 		 * for respective buffer
995*4882a593Smuzhiyun 		 */
996*4882a593Smuzhiyun 		desc_on_ring->params.generic.addr_high =
997*4882a593Smuzhiyun 			cpu_to_le32(upper_32_bits(dma_buf->pa));
998*4882a593Smuzhiyun 		desc_on_ring->params.generic.addr_low =
999*4882a593Smuzhiyun 			cpu_to_le32(lower_32_bits(dma_buf->pa));
1000*4882a593Smuzhiyun 	}
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	/* Debug desc and buffer */
1003*4882a593Smuzhiyun 	ice_debug(hw, ICE_DBG_AQ_DESC,
1004*4882a593Smuzhiyun 		  "ATQ: Control Send queue desc and buffer:\n");
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	(cq->sq.next_to_use)++;
1009*4882a593Smuzhiyun 	if (cq->sq.next_to_use == cq->sq.count)
1010*4882a593Smuzhiyun 		cq->sq.next_to_use = 0;
1011*4882a593Smuzhiyun 	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	do {
1014*4882a593Smuzhiyun 		if (ice_sq_done(hw, cq))
1015*4882a593Smuzhiyun 			break;
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 		udelay(ICE_CTL_Q_SQ_CMD_USEC);
1018*4882a593Smuzhiyun 		total_delay++;
1019*4882a593Smuzhiyun 	} while (total_delay < cq->sq_cmd_timeout);
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	/* if ready, copy the desc back to temp */
1022*4882a593Smuzhiyun 	if (ice_sq_done(hw, cq)) {
1023*4882a593Smuzhiyun 		memcpy(desc, desc_on_ring, sizeof(*desc));
1024*4882a593Smuzhiyun 		if (buf) {
1025*4882a593Smuzhiyun 			/* get returned length to copy */
1026*4882a593Smuzhiyun 			u16 copy_size = le16_to_cpu(desc->datalen);
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 			if (copy_size > buf_size) {
1029*4882a593Smuzhiyun 				ice_debug(hw, ICE_DBG_AQ_MSG,
1030*4882a593Smuzhiyun 					  "Return len %d > than buf len %d\n",
1031*4882a593Smuzhiyun 					  copy_size, buf_size);
1032*4882a593Smuzhiyun 				status = ICE_ERR_AQ_ERROR;
1033*4882a593Smuzhiyun 			} else {
1034*4882a593Smuzhiyun 				memcpy(buf, dma_buf->va, copy_size);
1035*4882a593Smuzhiyun 			}
1036*4882a593Smuzhiyun 		}
1037*4882a593Smuzhiyun 		retval = le16_to_cpu(desc->retval);
1038*4882a593Smuzhiyun 		if (retval) {
1039*4882a593Smuzhiyun 			ice_debug(hw, ICE_DBG_AQ_MSG,
1040*4882a593Smuzhiyun 				  "Control Send Queue command 0x%04X completed with error 0x%X\n",
1041*4882a593Smuzhiyun 				  le16_to_cpu(desc->opcode),
1042*4882a593Smuzhiyun 				  retval);
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 			/* strip off FW internal code */
1045*4882a593Smuzhiyun 			retval &= 0xff;
1046*4882a593Smuzhiyun 		}
1047*4882a593Smuzhiyun 		cmd_completed = true;
1048*4882a593Smuzhiyun 		if (!status && retval != ICE_AQ_RC_OK)
1049*4882a593Smuzhiyun 			status = ICE_ERR_AQ_ERROR;
1050*4882a593Smuzhiyun 		cq->sq_last_status = (enum ice_aq_err)retval;
1051*4882a593Smuzhiyun 	}
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	ice_debug(hw, ICE_DBG_AQ_MSG,
1054*4882a593Smuzhiyun 		  "ATQ: desc and buffer writeback:\n");
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	ice_debug_cq(hw, (void *)desc, buf, buf_size);
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	/* save writeback AQ if requested */
1059*4882a593Smuzhiyun 	if (details->wb_desc)
1060*4882a593Smuzhiyun 		memcpy(details->wb_desc, desc_on_ring,
1061*4882a593Smuzhiyun 		       sizeof(*details->wb_desc));
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	/* update the error if time out occurred */
1064*4882a593Smuzhiyun 	if (!cmd_completed) {
1065*4882a593Smuzhiyun 		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1066*4882a593Smuzhiyun 		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1067*4882a593Smuzhiyun 			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1068*4882a593Smuzhiyun 			status = ICE_ERR_AQ_FW_CRITICAL;
1069*4882a593Smuzhiyun 		} else {
1070*4882a593Smuzhiyun 			ice_debug(hw, ICE_DBG_AQ_MSG,
1071*4882a593Smuzhiyun 				  "Control Send Queue Writeback timeout.\n");
1072*4882a593Smuzhiyun 			status = ICE_ERR_AQ_TIMEOUT;
1073*4882a593Smuzhiyun 		}
1074*4882a593Smuzhiyun 	}
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun sq_send_command_error:
1077*4882a593Smuzhiyun 	mutex_unlock(&cq->sq_lock);
1078*4882a593Smuzhiyun 	return status;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun /**
1082*4882a593Smuzhiyun  * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1083*4882a593Smuzhiyun  * @desc: pointer to the temp descriptor (non DMA mem)
1084*4882a593Smuzhiyun  * @opcode: the opcode can be used to decide which flags to turn off or on
1085*4882a593Smuzhiyun  *
1086*4882a593Smuzhiyun  * Fill the desc with default values
1087*4882a593Smuzhiyun  */
ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc * desc,u16 opcode)1088*4882a593Smuzhiyun void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun 	/* zero out the desc */
1091*4882a593Smuzhiyun 	memset(desc, 0, sizeof(*desc));
1092*4882a593Smuzhiyun 	desc->opcode = cpu_to_le16(opcode);
1093*4882a593Smuzhiyun 	desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun /**
1097*4882a593Smuzhiyun  * ice_clean_rq_elem
1098*4882a593Smuzhiyun  * @hw: pointer to the HW struct
1099*4882a593Smuzhiyun  * @cq: pointer to the specific Control queue
1100*4882a593Smuzhiyun  * @e: event info from the receive descriptor, includes any buffers
1101*4882a593Smuzhiyun  * @pending: number of events that could be left to process
1102*4882a593Smuzhiyun  *
1103*4882a593Smuzhiyun  * This function cleans one Admin Receive Queue element and returns
1104*4882a593Smuzhiyun  * the contents through e. It can also return how many events are
1105*4882a593Smuzhiyun  * left to process through 'pending'.
1106*4882a593Smuzhiyun  */
1107*4882a593Smuzhiyun enum ice_status
ice_clean_rq_elem(struct ice_hw * hw,struct ice_ctl_q_info * cq,struct ice_rq_event_info * e,u16 * pending)1108*4882a593Smuzhiyun ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1109*4882a593Smuzhiyun 		  struct ice_rq_event_info *e, u16 *pending)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun 	u16 ntc = cq->rq.next_to_clean;
1112*4882a593Smuzhiyun 	enum ice_status ret_code = 0;
1113*4882a593Smuzhiyun 	struct ice_aq_desc *desc;
1114*4882a593Smuzhiyun 	struct ice_dma_mem *bi;
1115*4882a593Smuzhiyun 	u16 desc_idx;
1116*4882a593Smuzhiyun 	u16 datalen;
1117*4882a593Smuzhiyun 	u16 flags;
1118*4882a593Smuzhiyun 	u16 ntu;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	/* pre-clean the event info */
1121*4882a593Smuzhiyun 	memset(&e->desc, 0, sizeof(e->desc));
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	/* take the lock before we start messing with the ring */
1124*4882a593Smuzhiyun 	mutex_lock(&cq->rq_lock);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	if (!cq->rq.count) {
1127*4882a593Smuzhiyun 		ice_debug(hw, ICE_DBG_AQ_MSG,
1128*4882a593Smuzhiyun 			  "Control Receive queue not initialized.\n");
1129*4882a593Smuzhiyun 		ret_code = ICE_ERR_AQ_EMPTY;
1130*4882a593Smuzhiyun 		goto clean_rq_elem_err;
1131*4882a593Smuzhiyun 	}
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	/* set next_to_use to head */
1134*4882a593Smuzhiyun 	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	if (ntu == ntc) {
1137*4882a593Smuzhiyun 		/* nothing to do - shouldn't need to update ring's values */
1138*4882a593Smuzhiyun 		ret_code = ICE_ERR_AQ_NO_WORK;
1139*4882a593Smuzhiyun 		goto clean_rq_elem_out;
1140*4882a593Smuzhiyun 	}
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	/* now clean the next descriptor */
1143*4882a593Smuzhiyun 	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1144*4882a593Smuzhiyun 	desc_idx = ntc;
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1147*4882a593Smuzhiyun 	flags = le16_to_cpu(desc->flags);
1148*4882a593Smuzhiyun 	if (flags & ICE_AQ_FLAG_ERR) {
1149*4882a593Smuzhiyun 		ret_code = ICE_ERR_AQ_ERROR;
1150*4882a593Smuzhiyun 		ice_debug(hw, ICE_DBG_AQ_MSG,
1151*4882a593Smuzhiyun 			  "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1152*4882a593Smuzhiyun 			  le16_to_cpu(desc->opcode),
1153*4882a593Smuzhiyun 			  cq->rq_last_status);
1154*4882a593Smuzhiyun 	}
1155*4882a593Smuzhiyun 	memcpy(&e->desc, desc, sizeof(e->desc));
1156*4882a593Smuzhiyun 	datalen = le16_to_cpu(desc->datalen);
1157*4882a593Smuzhiyun 	e->msg_len = min_t(u16, datalen, e->buf_len);
1158*4882a593Smuzhiyun 	if (e->msg_buf && e->msg_len)
1159*4882a593Smuzhiyun 		memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	/* Restore the original datalen and buffer address in the desc,
1166*4882a593Smuzhiyun 	 * FW updates datalen to indicate the event message size
1167*4882a593Smuzhiyun 	 */
1168*4882a593Smuzhiyun 	bi = &cq->rq.r.rq_bi[ntc];
1169*4882a593Smuzhiyun 	memset(desc, 0, sizeof(*desc));
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1172*4882a593Smuzhiyun 	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1173*4882a593Smuzhiyun 		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1174*4882a593Smuzhiyun 	desc->datalen = cpu_to_le16(bi->size);
1175*4882a593Smuzhiyun 	desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1176*4882a593Smuzhiyun 	desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	/* set tail = the last cleaned desc index. */
1179*4882a593Smuzhiyun 	wr32(hw, cq->rq.tail, ntc);
1180*4882a593Smuzhiyun 	/* ntc is updated to tail + 1 */
1181*4882a593Smuzhiyun 	ntc++;
1182*4882a593Smuzhiyun 	if (ntc == cq->num_rq_entries)
1183*4882a593Smuzhiyun 		ntc = 0;
1184*4882a593Smuzhiyun 	cq->rq.next_to_clean = ntc;
1185*4882a593Smuzhiyun 	cq->rq.next_to_use = ntu;
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun clean_rq_elem_out:
1188*4882a593Smuzhiyun 	/* Set pending if needed, unlock and return */
1189*4882a593Smuzhiyun 	if (pending) {
1190*4882a593Smuzhiyun 		/* re-read HW head to calculate actual pending messages */
1191*4882a593Smuzhiyun 		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1192*4882a593Smuzhiyun 		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1193*4882a593Smuzhiyun 	}
1194*4882a593Smuzhiyun clean_rq_elem_err:
1195*4882a593Smuzhiyun 	mutex_unlock(&cq->rq_lock);
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	return ret_code;
1198*4882a593Smuzhiyun }
1199