1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2019, Intel Corporation. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <net/xdp_sock_drv.h>
5*4882a593Smuzhiyun #include "ice_base.h"
6*4882a593Smuzhiyun #include "ice_lib.h"
7*4882a593Smuzhiyun #include "ice_dcb_lib.h"
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun /**
10*4882a593Smuzhiyun * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
11*4882a593Smuzhiyun * @qs_cfg: gathered variables needed for PF->VSI queues assignment
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
14*4882a593Smuzhiyun */
__ice_vsi_get_qs_contig(struct ice_qs_cfg * qs_cfg)15*4882a593Smuzhiyun static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun unsigned int offset, i;
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun mutex_lock(qs_cfg->qs_mutex);
20*4882a593Smuzhiyun offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
21*4882a593Smuzhiyun 0, qs_cfg->q_count, 0);
22*4882a593Smuzhiyun if (offset >= qs_cfg->pf_map_size) {
23*4882a593Smuzhiyun mutex_unlock(qs_cfg->qs_mutex);
24*4882a593Smuzhiyun return -ENOMEM;
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
28*4882a593Smuzhiyun for (i = 0; i < qs_cfg->q_count; i++)
29*4882a593Smuzhiyun qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset);
30*4882a593Smuzhiyun mutex_unlock(qs_cfg->qs_mutex);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun return 0;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /**
36*4882a593Smuzhiyun * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
37*4882a593Smuzhiyun * @qs_cfg: gathered variables needed for pf->vsi queues assignment
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
40*4882a593Smuzhiyun */
__ice_vsi_get_qs_sc(struct ice_qs_cfg * qs_cfg)41*4882a593Smuzhiyun static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun unsigned int i, index = 0;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun mutex_lock(qs_cfg->qs_mutex);
46*4882a593Smuzhiyun for (i = 0; i < qs_cfg->q_count; i++) {
47*4882a593Smuzhiyun index = find_next_zero_bit(qs_cfg->pf_map,
48*4882a593Smuzhiyun qs_cfg->pf_map_size, index);
49*4882a593Smuzhiyun if (index >= qs_cfg->pf_map_size)
50*4882a593Smuzhiyun goto err_scatter;
51*4882a593Smuzhiyun set_bit(index, qs_cfg->pf_map);
52*4882a593Smuzhiyun qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun mutex_unlock(qs_cfg->qs_mutex);
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun return 0;
57*4882a593Smuzhiyun err_scatter:
58*4882a593Smuzhiyun for (index = 0; index < i; index++) {
59*4882a593Smuzhiyun clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
60*4882a593Smuzhiyun qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun mutex_unlock(qs_cfg->qs_mutex);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun return -ENOMEM;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /**
68*4882a593Smuzhiyun * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
69*4882a593Smuzhiyun * @pf: the PF being configured
70*4882a593Smuzhiyun * @pf_q: the PF queue
71*4882a593Smuzhiyun * @ena: enable or disable state of the queue
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * This routine will wait for the given Rx queue of the PF to reach the
74*4882a593Smuzhiyun * enabled or disabled state.
75*4882a593Smuzhiyun * Returns -ETIMEDOUT in case of failing to reach the requested state after
76*4882a593Smuzhiyun * multiple retries; else will return 0 in case of success.
77*4882a593Smuzhiyun */
ice_pf_rxq_wait(struct ice_pf * pf,int pf_q,bool ena)78*4882a593Smuzhiyun static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun int i;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
83*4882a593Smuzhiyun if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
84*4882a593Smuzhiyun QRX_CTRL_QENA_STAT_M))
85*4882a593Smuzhiyun return 0;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun usleep_range(20, 40);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun return -ETIMEDOUT;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
95*4882a593Smuzhiyun * @vsi: the VSI being configured
96*4882a593Smuzhiyun * @v_idx: index of the vector in the VSI struct
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * We allocate one q_vector and set default value for ITR setting associated
99*4882a593Smuzhiyun * with this q_vector. If allocation fails we return -ENOMEM.
100*4882a593Smuzhiyun */
ice_vsi_alloc_q_vector(struct ice_vsi * vsi,u16 v_idx)101*4882a593Smuzhiyun static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun struct ice_pf *pf = vsi->back;
104*4882a593Smuzhiyun struct ice_q_vector *q_vector;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* allocate q_vector */
107*4882a593Smuzhiyun q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector),
108*4882a593Smuzhiyun GFP_KERNEL);
109*4882a593Smuzhiyun if (!q_vector)
110*4882a593Smuzhiyun return -ENOMEM;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun q_vector->vsi = vsi;
113*4882a593Smuzhiyun q_vector->v_idx = v_idx;
114*4882a593Smuzhiyun q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
115*4882a593Smuzhiyun q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
116*4882a593Smuzhiyun if (vsi->type == ICE_VSI_VF)
117*4882a593Smuzhiyun goto out;
118*4882a593Smuzhiyun /* only set affinity_mask if the CPU is online */
119*4882a593Smuzhiyun if (cpu_online(v_idx))
120*4882a593Smuzhiyun cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /* This will not be called in the driver load path because the netdev
123*4882a593Smuzhiyun * will not be created yet. All other cases with register the NAPI
124*4882a593Smuzhiyun * handler here (i.e. resume, reset/rebuild, etc.)
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun if (vsi->netdev)
127*4882a593Smuzhiyun netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
128*4882a593Smuzhiyun NAPI_POLL_WEIGHT);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun out:
131*4882a593Smuzhiyun /* tie q_vector and VSI together */
132*4882a593Smuzhiyun vsi->q_vectors[v_idx] = q_vector;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /**
138*4882a593Smuzhiyun * ice_free_q_vector - Free memory allocated for a specific interrupt vector
139*4882a593Smuzhiyun * @vsi: VSI having the memory freed
140*4882a593Smuzhiyun * @v_idx: index of the vector to be freed
141*4882a593Smuzhiyun */
ice_free_q_vector(struct ice_vsi * vsi,int v_idx)142*4882a593Smuzhiyun static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun struct ice_q_vector *q_vector;
145*4882a593Smuzhiyun struct ice_pf *pf = vsi->back;
146*4882a593Smuzhiyun struct ice_ring *ring;
147*4882a593Smuzhiyun struct device *dev;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun dev = ice_pf_to_dev(pf);
150*4882a593Smuzhiyun if (!vsi->q_vectors[v_idx]) {
151*4882a593Smuzhiyun dev_dbg(dev, "Queue vector at index %d not found\n", v_idx);
152*4882a593Smuzhiyun return;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun q_vector = vsi->q_vectors[v_idx];
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun ice_for_each_ring(ring, q_vector->tx)
157*4882a593Smuzhiyun ring->q_vector = NULL;
158*4882a593Smuzhiyun ice_for_each_ring(ring, q_vector->rx)
159*4882a593Smuzhiyun ring->q_vector = NULL;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* only VSI with an associated netdev is set up with NAPI */
162*4882a593Smuzhiyun if (vsi->netdev)
163*4882a593Smuzhiyun netif_napi_del(&q_vector->napi);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun devm_kfree(dev, q_vector);
166*4882a593Smuzhiyun vsi->q_vectors[v_idx] = NULL;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /**
170*4882a593Smuzhiyun * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
171*4882a593Smuzhiyun * @hw: board specific structure
172*4882a593Smuzhiyun */
ice_cfg_itr_gran(struct ice_hw * hw)173*4882a593Smuzhiyun static void ice_cfg_itr_gran(struct ice_hw *hw)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun u32 regval = rd32(hw, GLINT_CTL);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /* no need to update global register if ITR gran is already set */
178*4882a593Smuzhiyun if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
179*4882a593Smuzhiyun (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
180*4882a593Smuzhiyun GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
181*4882a593Smuzhiyun (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
182*4882a593Smuzhiyun GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
183*4882a593Smuzhiyun (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
184*4882a593Smuzhiyun GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
185*4882a593Smuzhiyun (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
186*4882a593Smuzhiyun GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
187*4882a593Smuzhiyun return;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
190*4882a593Smuzhiyun GLINT_CTL_ITR_GRAN_200_M) |
191*4882a593Smuzhiyun ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
192*4882a593Smuzhiyun GLINT_CTL_ITR_GRAN_100_M) |
193*4882a593Smuzhiyun ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
194*4882a593Smuzhiyun GLINT_CTL_ITR_GRAN_50_M) |
195*4882a593Smuzhiyun ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
196*4882a593Smuzhiyun GLINT_CTL_ITR_GRAN_25_M);
197*4882a593Smuzhiyun wr32(hw, GLINT_CTL, regval);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /**
201*4882a593Smuzhiyun * ice_calc_q_handle - calculate the queue handle
202*4882a593Smuzhiyun * @vsi: VSI that ring belongs to
203*4882a593Smuzhiyun * @ring: ring to get the absolute queue index
204*4882a593Smuzhiyun * @tc: traffic class number
205*4882a593Smuzhiyun */
ice_calc_q_handle(struct ice_vsi * vsi,struct ice_ring * ring,u8 tc)206*4882a593Smuzhiyun static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* Idea here for calculation is that we subtract the number of queue
211*4882a593Smuzhiyun * count from TC that ring belongs to from it's absolute queue index
212*4882a593Smuzhiyun * and as a result we get the queue's index within TC.
213*4882a593Smuzhiyun */
214*4882a593Smuzhiyun return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
219*4882a593Smuzhiyun * @ring: The Tx ring to configure
220*4882a593Smuzhiyun * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
221*4882a593Smuzhiyun * @pf_q: queue index in the PF space
222*4882a593Smuzhiyun *
223*4882a593Smuzhiyun * Configure the Tx descriptor ring in TLAN context.
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun static void
ice_setup_tx_ctx(struct ice_ring * ring,struct ice_tlan_ctx * tlan_ctx,u16 pf_q)226*4882a593Smuzhiyun ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct ice_vsi *vsi = ring->vsi;
229*4882a593Smuzhiyun struct ice_hw *hw = &vsi->back->hw;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun tlan_ctx->port_num = vsi->port_info->lport;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Transmit Queue Length */
236*4882a593Smuzhiyun tlan_ctx->qlen = ring->count;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun ice_set_cgd_num(tlan_ctx, ring);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* PF number */
241*4882a593Smuzhiyun tlan_ctx->pf_num = hw->pf_id;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* queue belongs to a specific VSI type
244*4882a593Smuzhiyun * VF / VM index should be programmed per vmvf_type setting:
245*4882a593Smuzhiyun * for vmvf_type = VF, it is VF number between 0-256
246*4882a593Smuzhiyun * for vmvf_type = VM, it is VM number between 0-767
247*4882a593Smuzhiyun * for PF or EMP this field should be set to zero
248*4882a593Smuzhiyun */
249*4882a593Smuzhiyun switch (vsi->type) {
250*4882a593Smuzhiyun case ICE_VSI_LB:
251*4882a593Smuzhiyun case ICE_VSI_CTRL:
252*4882a593Smuzhiyun case ICE_VSI_PF:
253*4882a593Smuzhiyun tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
254*4882a593Smuzhiyun break;
255*4882a593Smuzhiyun case ICE_VSI_VF:
256*4882a593Smuzhiyun /* Firmware expects vmvf_num to be absolute VF ID */
257*4882a593Smuzhiyun tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
258*4882a593Smuzhiyun tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
259*4882a593Smuzhiyun break;
260*4882a593Smuzhiyun default:
261*4882a593Smuzhiyun return;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* make sure the context is associated with the right VSI */
265*4882a593Smuzhiyun tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun tlan_ctx->tso_ena = ICE_TX_LEGACY;
268*4882a593Smuzhiyun tlan_ctx->tso_qnum = pf_q;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /* Legacy or Advanced Host Interface:
271*4882a593Smuzhiyun * 0: Advanced Host Interface
272*4882a593Smuzhiyun * 1: Legacy Host Interface
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun tlan_ctx->legacy_int = ICE_TX_LEGACY;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /**
278*4882a593Smuzhiyun * ice_setup_rx_ctx - Configure a receive ring context
279*4882a593Smuzhiyun * @ring: The Rx ring to configure
280*4882a593Smuzhiyun *
281*4882a593Smuzhiyun * Configure the Rx descriptor ring in RLAN context.
282*4882a593Smuzhiyun */
ice_setup_rx_ctx(struct ice_ring * ring)283*4882a593Smuzhiyun int ice_setup_rx_ctx(struct ice_ring *ring)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun struct device *dev = ice_pf_to_dev(ring->vsi->back);
286*4882a593Smuzhiyun int chain_len = ICE_MAX_CHAINED_RX_BUFS;
287*4882a593Smuzhiyun u16 num_bufs = ICE_DESC_UNUSED(ring);
288*4882a593Smuzhiyun struct ice_vsi *vsi = ring->vsi;
289*4882a593Smuzhiyun u32 rxdid = ICE_RXDID_FLEX_NIC;
290*4882a593Smuzhiyun struct ice_rlan_ctx rlan_ctx;
291*4882a593Smuzhiyun struct ice_hw *hw;
292*4882a593Smuzhiyun u16 pf_q;
293*4882a593Smuzhiyun int err;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun hw = &vsi->back->hw;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* what is Rx queue number in global space of 2K Rx queues */
298*4882a593Smuzhiyun pf_q = vsi->rxq_map[ring->q_index];
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* clear the context structure first */
301*4882a593Smuzhiyun memset(&rlan_ctx, 0, sizeof(rlan_ctx));
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun ring->rx_buf_len = vsi->rx_buf_len;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (ring->vsi->type == ICE_VSI_PF) {
306*4882a593Smuzhiyun if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
307*4882a593Smuzhiyun /* coverity[check_return] */
308*4882a593Smuzhiyun xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
309*4882a593Smuzhiyun ring->q_index);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun ring->xsk_pool = ice_xsk_pool(ring);
312*4882a593Smuzhiyun if (ring->xsk_pool) {
313*4882a593Smuzhiyun xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun ring->rx_buf_len =
316*4882a593Smuzhiyun xsk_pool_get_rx_frame_size(ring->xsk_pool);
317*4882a593Smuzhiyun /* For AF_XDP ZC, we disallow packets to span on
318*4882a593Smuzhiyun * multiple buffers, thus letting us skip that
319*4882a593Smuzhiyun * handling in the fast-path.
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun chain_len = 1;
322*4882a593Smuzhiyun err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
323*4882a593Smuzhiyun MEM_TYPE_XSK_BUFF_POOL,
324*4882a593Smuzhiyun NULL);
325*4882a593Smuzhiyun if (err)
326*4882a593Smuzhiyun return err;
327*4882a593Smuzhiyun xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
330*4882a593Smuzhiyun ring->q_index);
331*4882a593Smuzhiyun } else {
332*4882a593Smuzhiyun if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
333*4882a593Smuzhiyun /* coverity[check_return] */
334*4882a593Smuzhiyun xdp_rxq_info_reg(&ring->xdp_rxq,
335*4882a593Smuzhiyun ring->netdev,
336*4882a593Smuzhiyun ring->q_index);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
339*4882a593Smuzhiyun MEM_TYPE_PAGE_SHARED,
340*4882a593Smuzhiyun NULL);
341*4882a593Smuzhiyun if (err)
342*4882a593Smuzhiyun return err;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun /* Receive Queue Base Address.
346*4882a593Smuzhiyun * Indicates the starting address of the descriptor queue defined in
347*4882a593Smuzhiyun * 128 Byte units.
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun rlan_ctx.base = ring->dma >> 7;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun rlan_ctx.qlen = ring->count;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Receive Packet Data Buffer Size.
354*4882a593Smuzhiyun * The Packet Data Buffer Size is defined in 128 byte units.
355*4882a593Smuzhiyun */
356*4882a593Smuzhiyun rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /* use 32 byte descriptors */
359*4882a593Smuzhiyun rlan_ctx.dsize = 1;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* Strip the Ethernet CRC bytes before the packet is posted to host
362*4882a593Smuzhiyun * memory.
363*4882a593Smuzhiyun */
364*4882a593Smuzhiyun rlan_ctx.crcstrip = 1;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
367*4882a593Smuzhiyun rlan_ctx.l2tsel = 1;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
370*4882a593Smuzhiyun rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
371*4882a593Smuzhiyun rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /* This controls whether VLAN is stripped from inner headers
374*4882a593Smuzhiyun * The VLAN in the inner L2 header is stripped to the receive
375*4882a593Smuzhiyun * descriptor if enabled by this flag.
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun rlan_ctx.showiv = 0;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* Max packet size for this queue - must not be set to a larger value
380*4882a593Smuzhiyun * than 5 x DBUF
381*4882a593Smuzhiyun */
382*4882a593Smuzhiyun rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
383*4882a593Smuzhiyun chain_len * ring->rx_buf_len);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* Rx queue threshold in units of 64 */
386*4882a593Smuzhiyun rlan_ctx.lrxqthresh = 1;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* Enable Flexible Descriptors in the queue context which
389*4882a593Smuzhiyun * allows this driver to select a specific receive descriptor format
390*4882a593Smuzhiyun * increasing context priority to pick up profile ID; default is 0x01;
391*4882a593Smuzhiyun * setting to 0x03 to ensure profile is programming if prev context is
392*4882a593Smuzhiyun * of same priority
393*4882a593Smuzhiyun */
394*4882a593Smuzhiyun if (vsi->type != ICE_VSI_VF)
395*4882a593Smuzhiyun ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3);
396*4882a593Smuzhiyun else
397*4882a593Smuzhiyun ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* Absolute queue number out of 2K needs to be passed */
400*4882a593Smuzhiyun err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
401*4882a593Smuzhiyun if (err) {
402*4882a593Smuzhiyun dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
403*4882a593Smuzhiyun pf_q, err);
404*4882a593Smuzhiyun return -EIO;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun if (vsi->type == ICE_VSI_VF)
408*4882a593Smuzhiyun return 0;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /* configure Rx buffer alignment */
411*4882a593Smuzhiyun if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
412*4882a593Smuzhiyun ice_clear_ring_build_skb_ena(ring);
413*4882a593Smuzhiyun else
414*4882a593Smuzhiyun ice_set_ring_build_skb_ena(ring);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* init queue specific tail register */
417*4882a593Smuzhiyun ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
418*4882a593Smuzhiyun writel(0, ring->tail);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (ring->xsk_pool) {
421*4882a593Smuzhiyun if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
422*4882a593Smuzhiyun dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
423*4882a593Smuzhiyun num_bufs, ring->q_index);
424*4882a593Smuzhiyun dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun return 0;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun err = ice_alloc_rx_bufs_zc(ring, num_bufs);
430*4882a593Smuzhiyun if (err)
431*4882a593Smuzhiyun dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
432*4882a593Smuzhiyun ring->q_index, pf_q);
433*4882a593Smuzhiyun return 0;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun ice_alloc_rx_bufs(ring, num_bufs);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /**
442*4882a593Smuzhiyun * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
443*4882a593Smuzhiyun * @qs_cfg: gathered variables needed for pf->vsi queues assignment
444*4882a593Smuzhiyun *
445*4882a593Smuzhiyun * This function first tries to find contiguous space. If it is not successful,
446*4882a593Smuzhiyun * it tries with the scatter approach.
447*4882a593Smuzhiyun *
448*4882a593Smuzhiyun * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
449*4882a593Smuzhiyun */
__ice_vsi_get_qs(struct ice_qs_cfg * qs_cfg)450*4882a593Smuzhiyun int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun int ret = 0;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun ret = __ice_vsi_get_qs_contig(qs_cfg);
455*4882a593Smuzhiyun if (ret) {
456*4882a593Smuzhiyun /* contig failed, so try with scatter approach */
457*4882a593Smuzhiyun qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
458*4882a593Smuzhiyun qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count,
459*4882a593Smuzhiyun qs_cfg->scatter_count);
460*4882a593Smuzhiyun ret = __ice_vsi_get_qs_sc(qs_cfg);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun return ret;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /**
466*4882a593Smuzhiyun * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait
467*4882a593Smuzhiyun * @vsi: the VSI being configured
468*4882a593Smuzhiyun * @ena: start or stop the Rx ring
469*4882a593Smuzhiyun * @rxq_idx: 0-based Rx queue index for the VSI passed in
470*4882a593Smuzhiyun * @wait: wait or don't wait for configuration to finish in hardware
471*4882a593Smuzhiyun *
472*4882a593Smuzhiyun * Return 0 on success and negative on error.
473*4882a593Smuzhiyun */
474*4882a593Smuzhiyun int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi * vsi,bool ena,u16 rxq_idx,bool wait)475*4882a593Smuzhiyun ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun int pf_q = vsi->rxq_map[rxq_idx];
478*4882a593Smuzhiyun struct ice_pf *pf = vsi->back;
479*4882a593Smuzhiyun struct ice_hw *hw = &pf->hw;
480*4882a593Smuzhiyun u32 rx_reg;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun rx_reg = rd32(hw, QRX_CTRL(pf_q));
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* Skip if the queue is already in the requested state */
485*4882a593Smuzhiyun if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
486*4882a593Smuzhiyun return 0;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /* turn on/off the queue */
489*4882a593Smuzhiyun if (ena)
490*4882a593Smuzhiyun rx_reg |= QRX_CTRL_QENA_REQ_M;
491*4882a593Smuzhiyun else
492*4882a593Smuzhiyun rx_reg &= ~QRX_CTRL_QENA_REQ_M;
493*4882a593Smuzhiyun wr32(hw, QRX_CTRL(pf_q), rx_reg);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (!wait)
496*4882a593Smuzhiyun return 0;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun ice_flush(hw);
499*4882a593Smuzhiyun return ice_pf_rxq_wait(pf, pf_q, ena);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /**
503*4882a593Smuzhiyun * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started
504*4882a593Smuzhiyun * @vsi: the VSI being configured
505*4882a593Smuzhiyun * @ena: true/false to verify Rx ring has been enabled/disabled respectively
506*4882a593Smuzhiyun * @rxq_idx: 0-based Rx queue index for the VSI passed in
507*4882a593Smuzhiyun *
508*4882a593Smuzhiyun * This routine will wait for the given Rx queue of the VSI to reach the
509*4882a593Smuzhiyun * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach
510*4882a593Smuzhiyun * the requested state after multiple retries; else will return 0 in case of
511*4882a593Smuzhiyun * success.
512*4882a593Smuzhiyun */
ice_vsi_wait_one_rx_ring(struct ice_vsi * vsi,bool ena,u16 rxq_idx)513*4882a593Smuzhiyun int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun int pf_q = vsi->rxq_map[rxq_idx];
516*4882a593Smuzhiyun struct ice_pf *pf = vsi->back;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun return ice_pf_rxq_wait(pf, pf_q, ena);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /**
522*4882a593Smuzhiyun * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
523*4882a593Smuzhiyun * @vsi: the VSI being configured
524*4882a593Smuzhiyun *
525*4882a593Smuzhiyun * We allocate one q_vector per queue interrupt. If allocation fails we
526*4882a593Smuzhiyun * return -ENOMEM.
527*4882a593Smuzhiyun */
ice_vsi_alloc_q_vectors(struct ice_vsi * vsi)528*4882a593Smuzhiyun int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct device *dev = ice_pf_to_dev(vsi->back);
531*4882a593Smuzhiyun u16 v_idx;
532*4882a593Smuzhiyun int err;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun if (vsi->q_vectors[0]) {
535*4882a593Smuzhiyun dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
536*4882a593Smuzhiyun return -EEXIST;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
540*4882a593Smuzhiyun err = ice_vsi_alloc_q_vector(vsi, v_idx);
541*4882a593Smuzhiyun if (err)
542*4882a593Smuzhiyun goto err_out;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun return 0;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun err_out:
548*4882a593Smuzhiyun while (v_idx--)
549*4882a593Smuzhiyun ice_free_q_vector(vsi, v_idx);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
552*4882a593Smuzhiyun vsi->num_q_vectors, vsi->vsi_num, err);
553*4882a593Smuzhiyun vsi->num_q_vectors = 0;
554*4882a593Smuzhiyun return err;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /**
558*4882a593Smuzhiyun * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
559*4882a593Smuzhiyun * @vsi: the VSI being configured
560*4882a593Smuzhiyun *
561*4882a593Smuzhiyun * This function maps descriptor rings to the queue-specific vectors allotted
562*4882a593Smuzhiyun * through the MSI-X enabling code. On a constrained vector budget, we map Tx
563*4882a593Smuzhiyun * and Rx rings to the vector as "efficiently" as possible.
564*4882a593Smuzhiyun */
ice_vsi_map_rings_to_vectors(struct ice_vsi * vsi)565*4882a593Smuzhiyun void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun int q_vectors = vsi->num_q_vectors;
568*4882a593Smuzhiyun u16 tx_rings_rem, rx_rings_rem;
569*4882a593Smuzhiyun int v_id;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /* initially assigning remaining rings count to VSIs num queue value */
572*4882a593Smuzhiyun tx_rings_rem = vsi->num_txq;
573*4882a593Smuzhiyun rx_rings_rem = vsi->num_rxq;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun for (v_id = 0; v_id < q_vectors; v_id++) {
576*4882a593Smuzhiyun struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
577*4882a593Smuzhiyun u8 tx_rings_per_v, rx_rings_per_v;
578*4882a593Smuzhiyun u16 q_id, q_base;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /* Tx rings mapping to vector */
581*4882a593Smuzhiyun tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
582*4882a593Smuzhiyun q_vectors - v_id);
583*4882a593Smuzhiyun q_vector->num_ring_tx = tx_rings_per_v;
584*4882a593Smuzhiyun q_vector->tx.ring = NULL;
585*4882a593Smuzhiyun q_vector->tx.itr_idx = ICE_TX_ITR;
586*4882a593Smuzhiyun q_base = vsi->num_txq - tx_rings_rem;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
589*4882a593Smuzhiyun struct ice_ring *tx_ring = vsi->tx_rings[q_id];
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun tx_ring->q_vector = q_vector;
592*4882a593Smuzhiyun tx_ring->next = q_vector->tx.ring;
593*4882a593Smuzhiyun q_vector->tx.ring = tx_ring;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun tx_rings_rem -= tx_rings_per_v;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /* Rx rings mapping to vector */
598*4882a593Smuzhiyun rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
599*4882a593Smuzhiyun q_vectors - v_id);
600*4882a593Smuzhiyun q_vector->num_ring_rx = rx_rings_per_v;
601*4882a593Smuzhiyun q_vector->rx.ring = NULL;
602*4882a593Smuzhiyun q_vector->rx.itr_idx = ICE_RX_ITR;
603*4882a593Smuzhiyun q_base = vsi->num_rxq - rx_rings_rem;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
606*4882a593Smuzhiyun struct ice_ring *rx_ring = vsi->rx_rings[q_id];
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun rx_ring->q_vector = q_vector;
609*4882a593Smuzhiyun rx_ring->next = q_vector->rx.ring;
610*4882a593Smuzhiyun q_vector->rx.ring = rx_ring;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun rx_rings_rem -= rx_rings_per_v;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /**
617*4882a593Smuzhiyun * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
618*4882a593Smuzhiyun * @vsi: the VSI having memory freed
619*4882a593Smuzhiyun */
ice_vsi_free_q_vectors(struct ice_vsi * vsi)620*4882a593Smuzhiyun void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun int v_idx;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun ice_for_each_q_vector(vsi, v_idx)
625*4882a593Smuzhiyun ice_free_q_vector(vsi, v_idx);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /**
629*4882a593Smuzhiyun * ice_vsi_cfg_txq - Configure single Tx queue
630*4882a593Smuzhiyun * @vsi: the VSI that queue belongs to
631*4882a593Smuzhiyun * @ring: Tx ring to be configured
632*4882a593Smuzhiyun * @qg_buf: queue group buffer
633*4882a593Smuzhiyun */
634*4882a593Smuzhiyun int
ice_vsi_cfg_txq(struct ice_vsi * vsi,struct ice_ring * ring,struct ice_aqc_add_tx_qgrp * qg_buf)635*4882a593Smuzhiyun ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
636*4882a593Smuzhiyun struct ice_aqc_add_tx_qgrp *qg_buf)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun u8 buf_len = struct_size(qg_buf, txqs, 1);
639*4882a593Smuzhiyun struct ice_tlan_ctx tlan_ctx = { 0 };
640*4882a593Smuzhiyun struct ice_aqc_add_txqs_perq *txq;
641*4882a593Smuzhiyun struct ice_pf *pf = vsi->back;
642*4882a593Smuzhiyun struct ice_hw *hw = &pf->hw;
643*4882a593Smuzhiyun enum ice_status status;
644*4882a593Smuzhiyun u16 pf_q;
645*4882a593Smuzhiyun u8 tc;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun pf_q = ring->reg_idx;
648*4882a593Smuzhiyun ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
649*4882a593Smuzhiyun /* copy context contents into the qg_buf */
650*4882a593Smuzhiyun qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
651*4882a593Smuzhiyun ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
652*4882a593Smuzhiyun ice_tlan_ctx_info);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /* init queue specific tail reg. It is referred as
655*4882a593Smuzhiyun * transmit comm scheduler queue doorbell.
656*4882a593Smuzhiyun */
657*4882a593Smuzhiyun ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DCB))
660*4882a593Smuzhiyun tc = ring->dcb_tc;
661*4882a593Smuzhiyun else
662*4882a593Smuzhiyun tc = 0;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /* Add unique software queue handle of the Tx queue per
665*4882a593Smuzhiyun * TC into the VSI Tx ring
666*4882a593Smuzhiyun */
667*4882a593Smuzhiyun ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
670*4882a593Smuzhiyun 1, qg_buf, buf_len, NULL);
671*4882a593Smuzhiyun if (status) {
672*4882a593Smuzhiyun dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
673*4882a593Smuzhiyun ice_stat_str(status));
674*4882a593Smuzhiyun return -ENODEV;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* Add Tx Queue TEID into the VSI Tx ring from the
678*4882a593Smuzhiyun * response. This will complete configuring and
679*4882a593Smuzhiyun * enabling the queue.
680*4882a593Smuzhiyun */
681*4882a593Smuzhiyun txq = &qg_buf->txqs[0];
682*4882a593Smuzhiyun if (pf_q == le16_to_cpu(txq->txq_id))
683*4882a593Smuzhiyun ring->txq_teid = le32_to_cpu(txq->q_teid);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun return 0;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /**
689*4882a593Smuzhiyun * ice_cfg_itr - configure the initial interrupt throttle values
690*4882a593Smuzhiyun * @hw: pointer to the HW structure
691*4882a593Smuzhiyun * @q_vector: interrupt vector that's being configured
692*4882a593Smuzhiyun *
693*4882a593Smuzhiyun * Configure interrupt throttling values for the ring containers that are
694*4882a593Smuzhiyun * associated with the interrupt vector passed in.
695*4882a593Smuzhiyun */
ice_cfg_itr(struct ice_hw * hw,struct ice_q_vector * q_vector)696*4882a593Smuzhiyun void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun ice_cfg_itr_gran(hw);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (q_vector->num_ring_rx) {
701*4882a593Smuzhiyun struct ice_ring_container *rc = &q_vector->rx;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun rc->target_itr = ITR_TO_REG(rc->itr_setting);
704*4882a593Smuzhiyun rc->next_update = jiffies + 1;
705*4882a593Smuzhiyun rc->current_itr = rc->target_itr;
706*4882a593Smuzhiyun wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
707*4882a593Smuzhiyun ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if (q_vector->num_ring_tx) {
711*4882a593Smuzhiyun struct ice_ring_container *rc = &q_vector->tx;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun rc->target_itr = ITR_TO_REG(rc->itr_setting);
714*4882a593Smuzhiyun rc->next_update = jiffies + 1;
715*4882a593Smuzhiyun rc->current_itr = rc->target_itr;
716*4882a593Smuzhiyun wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
717*4882a593Smuzhiyun ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /**
722*4882a593Smuzhiyun * ice_cfg_txq_interrupt - configure interrupt on Tx queue
723*4882a593Smuzhiyun * @vsi: the VSI being configured
724*4882a593Smuzhiyun * @txq: Tx queue being mapped to MSI-X vector
725*4882a593Smuzhiyun * @msix_idx: MSI-X vector index within the function
726*4882a593Smuzhiyun * @itr_idx: ITR index of the interrupt cause
727*4882a593Smuzhiyun *
728*4882a593Smuzhiyun * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
729*4882a593Smuzhiyun * within the function space.
730*4882a593Smuzhiyun */
731*4882a593Smuzhiyun void
ice_cfg_txq_interrupt(struct ice_vsi * vsi,u16 txq,u16 msix_idx,u16 itr_idx)732*4882a593Smuzhiyun ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun struct ice_pf *pf = vsi->back;
735*4882a593Smuzhiyun struct ice_hw *hw = &pf->hw;
736*4882a593Smuzhiyun u32 val;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
741*4882a593Smuzhiyun ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
744*4882a593Smuzhiyun if (ice_is_xdp_ena_vsi(vsi)) {
745*4882a593Smuzhiyun u32 xdp_txq = txq + vsi->num_xdp_txq;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]),
748*4882a593Smuzhiyun val);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun ice_flush(hw);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun /**
754*4882a593Smuzhiyun * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
755*4882a593Smuzhiyun * @vsi: the VSI being configured
756*4882a593Smuzhiyun * @rxq: Rx queue being mapped to MSI-X vector
757*4882a593Smuzhiyun * @msix_idx: MSI-X vector index within the function
758*4882a593Smuzhiyun * @itr_idx: ITR index of the interrupt cause
759*4882a593Smuzhiyun *
760*4882a593Smuzhiyun * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
761*4882a593Smuzhiyun * within the function space.
762*4882a593Smuzhiyun */
763*4882a593Smuzhiyun void
ice_cfg_rxq_interrupt(struct ice_vsi * vsi,u16 rxq,u16 msix_idx,u16 itr_idx)764*4882a593Smuzhiyun ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun struct ice_pf *pf = vsi->back;
767*4882a593Smuzhiyun struct ice_hw *hw = &pf->hw;
768*4882a593Smuzhiyun u32 val;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
773*4882a593Smuzhiyun ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun ice_flush(hw);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /**
781*4882a593Smuzhiyun * ice_trigger_sw_intr - trigger a software interrupt
782*4882a593Smuzhiyun * @hw: pointer to the HW structure
783*4882a593Smuzhiyun * @q_vector: interrupt vector to trigger the software interrupt for
784*4882a593Smuzhiyun */
ice_trigger_sw_intr(struct ice_hw * hw,struct ice_q_vector * q_vector)785*4882a593Smuzhiyun void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
788*4882a593Smuzhiyun (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
789*4882a593Smuzhiyun GLINT_DYN_CTL_SWINT_TRIG_M |
790*4882a593Smuzhiyun GLINT_DYN_CTL_INTENA_M);
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /**
794*4882a593Smuzhiyun * ice_vsi_stop_tx_ring - Disable single Tx ring
795*4882a593Smuzhiyun * @vsi: the VSI being configured
796*4882a593Smuzhiyun * @rst_src: reset source
797*4882a593Smuzhiyun * @rel_vmvf_num: Relative ID of VF/VM
798*4882a593Smuzhiyun * @ring: Tx ring to be stopped
799*4882a593Smuzhiyun * @txq_meta: Meta data of Tx ring to be stopped
800*4882a593Smuzhiyun */
801*4882a593Smuzhiyun int
ice_vsi_stop_tx_ring(struct ice_vsi * vsi,enum ice_disq_rst_src rst_src,u16 rel_vmvf_num,struct ice_ring * ring,struct ice_txq_meta * txq_meta)802*4882a593Smuzhiyun ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
803*4882a593Smuzhiyun u16 rel_vmvf_num, struct ice_ring *ring,
804*4882a593Smuzhiyun struct ice_txq_meta *txq_meta)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun struct ice_pf *pf = vsi->back;
807*4882a593Smuzhiyun struct ice_q_vector *q_vector;
808*4882a593Smuzhiyun struct ice_hw *hw = &pf->hw;
809*4882a593Smuzhiyun enum ice_status status;
810*4882a593Smuzhiyun u32 val;
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun /* clear cause_ena bit for disabled queues */
813*4882a593Smuzhiyun val = rd32(hw, QINT_TQCTL(ring->reg_idx));
814*4882a593Smuzhiyun val &= ~QINT_TQCTL_CAUSE_ENA_M;
815*4882a593Smuzhiyun wr32(hw, QINT_TQCTL(ring->reg_idx), val);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /* software is expected to wait for 100 ns */
818*4882a593Smuzhiyun ndelay(100);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun /* trigger a software interrupt for the vector
821*4882a593Smuzhiyun * associated to the queue to schedule NAPI handler
822*4882a593Smuzhiyun */
823*4882a593Smuzhiyun q_vector = ring->q_vector;
824*4882a593Smuzhiyun if (q_vector)
825*4882a593Smuzhiyun ice_trigger_sw_intr(hw, q_vector);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
828*4882a593Smuzhiyun txq_meta->tc, 1, &txq_meta->q_handle,
829*4882a593Smuzhiyun &txq_meta->q_id, &txq_meta->q_teid, rst_src,
830*4882a593Smuzhiyun rel_vmvf_num, NULL);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun /* if the disable queue command was exercised during an
833*4882a593Smuzhiyun * active reset flow, ICE_ERR_RESET_ONGOING is returned.
834*4882a593Smuzhiyun * This is not an error as the reset operation disables
835*4882a593Smuzhiyun * queues at the hardware level anyway.
836*4882a593Smuzhiyun */
837*4882a593Smuzhiyun if (status == ICE_ERR_RESET_ONGOING) {
838*4882a593Smuzhiyun dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
839*4882a593Smuzhiyun } else if (status == ICE_ERR_DOES_NOT_EXIST) {
840*4882a593Smuzhiyun dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
841*4882a593Smuzhiyun } else if (status) {
842*4882a593Smuzhiyun dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
843*4882a593Smuzhiyun ice_stat_str(status));
844*4882a593Smuzhiyun return -ENODEV;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun return 0;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /**
851*4882a593Smuzhiyun * ice_fill_txq_meta - Prepare the Tx queue's meta data
852*4882a593Smuzhiyun * @vsi: VSI that ring belongs to
853*4882a593Smuzhiyun * @ring: ring that txq_meta will be based on
854*4882a593Smuzhiyun * @txq_meta: a helper struct that wraps Tx queue's information
855*4882a593Smuzhiyun *
856*4882a593Smuzhiyun * Set up a helper struct that will contain all the necessary fields that
857*4882a593Smuzhiyun * are needed for stopping Tx queue
858*4882a593Smuzhiyun */
859*4882a593Smuzhiyun void
ice_fill_txq_meta(struct ice_vsi * vsi,struct ice_ring * ring,struct ice_txq_meta * txq_meta)860*4882a593Smuzhiyun ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
861*4882a593Smuzhiyun struct ice_txq_meta *txq_meta)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun u8 tc;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_DCB))
866*4882a593Smuzhiyun tc = ring->dcb_tc;
867*4882a593Smuzhiyun else
868*4882a593Smuzhiyun tc = 0;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun txq_meta->q_id = ring->reg_idx;
871*4882a593Smuzhiyun txq_meta->q_teid = ring->txq_teid;
872*4882a593Smuzhiyun txq_meta->q_handle = ring->q_handle;
873*4882a593Smuzhiyun txq_meta->vsi_idx = vsi->idx;
874*4882a593Smuzhiyun txq_meta->tc = tc;
875*4882a593Smuzhiyun }
876