1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2*4882a593Smuzhiyun /* QLogic qed NIC Driver
3*4882a593Smuzhiyun * Copyright (c) 2015-2017 QLogic Corporation
4*4882a593Smuzhiyun * Copyright (c) 2019-2020 Marvell International Ltd.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/etherdevice.h>
8*4882a593Smuzhiyun #include <linux/crc32.h>
9*4882a593Smuzhiyun #include <linux/vmalloc.h>
10*4882a593Smuzhiyun #include <linux/crash_dump.h>
11*4882a593Smuzhiyun #include <linux/qed/qed_iov_if.h>
12*4882a593Smuzhiyun #include "qed_cxt.h"
13*4882a593Smuzhiyun #include "qed_hsi.h"
14*4882a593Smuzhiyun #include "qed_hw.h"
15*4882a593Smuzhiyun #include "qed_init_ops.h"
16*4882a593Smuzhiyun #include "qed_int.h"
17*4882a593Smuzhiyun #include "qed_mcp.h"
18*4882a593Smuzhiyun #include "qed_reg_addr.h"
19*4882a593Smuzhiyun #include "qed_sp.h"
20*4882a593Smuzhiyun #include "qed_sriov.h"
21*4882a593Smuzhiyun #include "qed_vf.h"
22*4882a593Smuzhiyun static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
23*4882a593Smuzhiyun u8 opcode,
24*4882a593Smuzhiyun __le16 echo,
25*4882a593Smuzhiyun union event_ring_data *data, u8 fw_return_code);
26*4882a593Smuzhiyun static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
27*4882a593Smuzhiyun
qed_vf_calculate_legacy(struct qed_vf_info * p_vf)28*4882a593Smuzhiyun static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun u8 legacy = 0;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
33*4882a593Smuzhiyun ETH_HSI_VER_NO_PKT_LEN_TUNN)
34*4882a593Smuzhiyun legacy |= QED_QCID_LEGACY_VF_RX_PROD;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun if (!(p_vf->acquire.vfdev_info.capabilities &
37*4882a593Smuzhiyun VFPF_ACQUIRE_CAP_QUEUE_QIDS))
38*4882a593Smuzhiyun legacy |= QED_QCID_LEGACY_VF_CID;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun return legacy;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* IOV ramrods */
qed_sp_vf_start(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf)44*4882a593Smuzhiyun static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun struct vf_start_ramrod_data *p_ramrod = NULL;
47*4882a593Smuzhiyun struct qed_spq_entry *p_ent = NULL;
48*4882a593Smuzhiyun struct qed_sp_init_data init_data;
49*4882a593Smuzhiyun int rc = -EINVAL;
50*4882a593Smuzhiyun u8 fp_minor;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Get SPQ entry */
53*4882a593Smuzhiyun memset(&init_data, 0, sizeof(init_data));
54*4882a593Smuzhiyun init_data.cid = qed_spq_get_cid(p_hwfn);
55*4882a593Smuzhiyun init_data.opaque_fid = p_vf->opaque_fid;
56*4882a593Smuzhiyun init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun rc = qed_sp_init_request(p_hwfn, &p_ent,
59*4882a593Smuzhiyun COMMON_RAMROD_VF_START,
60*4882a593Smuzhiyun PROTOCOLID_COMMON, &init_data);
61*4882a593Smuzhiyun if (rc)
62*4882a593Smuzhiyun return rc;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun p_ramrod = &p_ent->ramrod.vf_start;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
67*4882a593Smuzhiyun p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun switch (p_hwfn->hw_info.personality) {
70*4882a593Smuzhiyun case QED_PCI_ETH:
71*4882a593Smuzhiyun p_ramrod->personality = PERSONALITY_ETH;
72*4882a593Smuzhiyun break;
73*4882a593Smuzhiyun case QED_PCI_ETH_ROCE:
74*4882a593Smuzhiyun case QED_PCI_ETH_IWARP:
75*4882a593Smuzhiyun p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
76*4882a593Smuzhiyun break;
77*4882a593Smuzhiyun default:
78*4882a593Smuzhiyun DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
79*4882a593Smuzhiyun p_hwfn->hw_info.personality);
80*4882a593Smuzhiyun qed_sp_destroy_request(p_hwfn, p_ent);
81*4882a593Smuzhiyun return -EINVAL;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
85*4882a593Smuzhiyun if (fp_minor > ETH_HSI_VER_MINOR &&
86*4882a593Smuzhiyun fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
87*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
88*4882a593Smuzhiyun QED_MSG_IOV,
89*4882a593Smuzhiyun "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
90*4882a593Smuzhiyun p_vf->abs_vf_id,
91*4882a593Smuzhiyun ETH_HSI_VER_MAJOR,
92*4882a593Smuzhiyun fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
93*4882a593Smuzhiyun fp_minor = ETH_HSI_VER_MINOR;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
97*4882a593Smuzhiyun p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
100*4882a593Smuzhiyun "VF[%d] - Starting using HSI %02x.%02x\n",
101*4882a593Smuzhiyun p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return qed_spq_post(p_hwfn, p_ent, NULL);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
qed_sp_vf_stop(struct qed_hwfn * p_hwfn,u32 concrete_vfid,u16 opaque_vfid)106*4882a593Smuzhiyun static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
107*4882a593Smuzhiyun u32 concrete_vfid, u16 opaque_vfid)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun struct vf_stop_ramrod_data *p_ramrod = NULL;
110*4882a593Smuzhiyun struct qed_spq_entry *p_ent = NULL;
111*4882a593Smuzhiyun struct qed_sp_init_data init_data;
112*4882a593Smuzhiyun int rc = -EINVAL;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* Get SPQ entry */
115*4882a593Smuzhiyun memset(&init_data, 0, sizeof(init_data));
116*4882a593Smuzhiyun init_data.cid = qed_spq_get_cid(p_hwfn);
117*4882a593Smuzhiyun init_data.opaque_fid = opaque_vfid;
118*4882a593Smuzhiyun init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun rc = qed_sp_init_request(p_hwfn, &p_ent,
121*4882a593Smuzhiyun COMMON_RAMROD_VF_STOP,
122*4882a593Smuzhiyun PROTOCOLID_COMMON, &init_data);
123*4882a593Smuzhiyun if (rc)
124*4882a593Smuzhiyun return rc;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun p_ramrod = &p_ent->ramrod.vf_stop;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun return qed_spq_post(p_hwfn, p_ent, NULL);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
qed_iov_is_valid_vfid(struct qed_hwfn * p_hwfn,int rel_vf_id,bool b_enabled_only,bool b_non_malicious)133*4882a593Smuzhiyun bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
134*4882a593Smuzhiyun int rel_vf_id,
135*4882a593Smuzhiyun bool b_enabled_only, bool b_non_malicious)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun if (!p_hwfn->pf_iov_info) {
138*4882a593Smuzhiyun DP_NOTICE(p_hwfn->cdev, "No iov info\n");
139*4882a593Smuzhiyun return false;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
143*4882a593Smuzhiyun (rel_vf_id < 0))
144*4882a593Smuzhiyun return false;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
147*4882a593Smuzhiyun b_enabled_only)
148*4882a593Smuzhiyun return false;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
151*4882a593Smuzhiyun b_non_malicious)
152*4882a593Smuzhiyun return false;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun return true;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
qed_iov_get_vf_info(struct qed_hwfn * p_hwfn,u16 relative_vf_id,bool b_enabled_only)157*4882a593Smuzhiyun static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
158*4882a593Smuzhiyun u16 relative_vf_id,
159*4882a593Smuzhiyun bool b_enabled_only)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct qed_vf_info *vf = NULL;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (!p_hwfn->pf_iov_info) {
164*4882a593Smuzhiyun DP_NOTICE(p_hwfn->cdev, "No iov info\n");
165*4882a593Smuzhiyun return NULL;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
169*4882a593Smuzhiyun b_enabled_only, false))
170*4882a593Smuzhiyun vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
171*4882a593Smuzhiyun else
172*4882a593Smuzhiyun DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
173*4882a593Smuzhiyun relative_vf_id);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun return vf;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun static struct qed_queue_cid *
qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue * p_queue)179*4882a593Smuzhiyun qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun int i;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
184*4882a593Smuzhiyun if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
185*4882a593Smuzhiyun return p_queue->cids[i].p_cid;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun return NULL;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun enum qed_iov_validate_q_mode {
192*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_NA,
193*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_ENABLE,
194*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_DISABLE,
195*4882a593Smuzhiyun };
196*4882a593Smuzhiyun
qed_iov_validate_queue_mode(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u16 qid,enum qed_iov_validate_q_mode mode,bool b_is_tx)197*4882a593Smuzhiyun static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
198*4882a593Smuzhiyun struct qed_vf_info *p_vf,
199*4882a593Smuzhiyun u16 qid,
200*4882a593Smuzhiyun enum qed_iov_validate_q_mode mode,
201*4882a593Smuzhiyun bool b_is_tx)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun int i;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (mode == QED_IOV_VALIDATE_Q_NA)
206*4882a593Smuzhiyun return true;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
209*4882a593Smuzhiyun struct qed_vf_queue_cid *p_qcid;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun p_qcid = &p_vf->vf_queues[qid].cids[i];
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (!p_qcid->p_cid)
214*4882a593Smuzhiyun continue;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (p_qcid->b_is_tx != b_is_tx)
217*4882a593Smuzhiyun continue;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return mode == QED_IOV_VALIDATE_Q_ENABLE;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* In case we haven't found any valid cid, then its disabled */
223*4882a593Smuzhiyun return mode == QED_IOV_VALIDATE_Q_DISABLE;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
qed_iov_validate_rxq(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u16 rx_qid,enum qed_iov_validate_q_mode mode)226*4882a593Smuzhiyun static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
227*4882a593Smuzhiyun struct qed_vf_info *p_vf,
228*4882a593Smuzhiyun u16 rx_qid,
229*4882a593Smuzhiyun enum qed_iov_validate_q_mode mode)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun if (rx_qid >= p_vf->num_rxqs) {
232*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
233*4882a593Smuzhiyun QED_MSG_IOV,
234*4882a593Smuzhiyun "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
235*4882a593Smuzhiyun p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
236*4882a593Smuzhiyun return false;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
qed_iov_validate_txq(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u16 tx_qid,enum qed_iov_validate_q_mode mode)242*4882a593Smuzhiyun static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
243*4882a593Smuzhiyun struct qed_vf_info *p_vf,
244*4882a593Smuzhiyun u16 tx_qid,
245*4882a593Smuzhiyun enum qed_iov_validate_q_mode mode)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun if (tx_qid >= p_vf->num_txqs) {
248*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
249*4882a593Smuzhiyun QED_MSG_IOV,
250*4882a593Smuzhiyun "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
251*4882a593Smuzhiyun p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
252*4882a593Smuzhiyun return false;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
qed_iov_validate_sb(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u16 sb_idx)258*4882a593Smuzhiyun static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
259*4882a593Smuzhiyun struct qed_vf_info *p_vf, u16 sb_idx)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun int i;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun for (i = 0; i < p_vf->num_sbs; i++)
264*4882a593Smuzhiyun if (p_vf->igu_sbs[i] == sb_idx)
265*4882a593Smuzhiyun return true;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
268*4882a593Smuzhiyun QED_MSG_IOV,
269*4882a593Smuzhiyun "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
270*4882a593Smuzhiyun p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return false;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
qed_iov_validate_active_rxq(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf)275*4882a593Smuzhiyun static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
276*4882a593Smuzhiyun struct qed_vf_info *p_vf)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun u8 i;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun for (i = 0; i < p_vf->num_rxqs; i++)
281*4882a593Smuzhiyun if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
282*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_ENABLE,
283*4882a593Smuzhiyun false))
284*4882a593Smuzhiyun return true;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun return false;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
qed_iov_validate_active_txq(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf)289*4882a593Smuzhiyun static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
290*4882a593Smuzhiyun struct qed_vf_info *p_vf)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun u8 i;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun for (i = 0; i < p_vf->num_txqs; i++)
295*4882a593Smuzhiyun if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
296*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_ENABLE,
297*4882a593Smuzhiyun true))
298*4882a593Smuzhiyun return true;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun return false;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
qed_iov_post_vf_bulletin(struct qed_hwfn * p_hwfn,int vfid,struct qed_ptt * p_ptt)303*4882a593Smuzhiyun static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
304*4882a593Smuzhiyun int vfid, struct qed_ptt *p_ptt)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct qed_bulletin_content *p_bulletin;
307*4882a593Smuzhiyun int crc_size = sizeof(p_bulletin->crc);
308*4882a593Smuzhiyun struct qed_dmae_params params;
309*4882a593Smuzhiyun struct qed_vf_info *p_vf;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
312*4882a593Smuzhiyun if (!p_vf)
313*4882a593Smuzhiyun return -EINVAL;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (!p_vf->vf_bulletin)
316*4882a593Smuzhiyun return -EINVAL;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun p_bulletin = p_vf->bulletin.p_virt;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /* Increment bulletin board version and compute crc */
321*4882a593Smuzhiyun p_bulletin->version++;
322*4882a593Smuzhiyun p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
323*4882a593Smuzhiyun p_vf->bulletin.size - crc_size);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
326*4882a593Smuzhiyun "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
327*4882a593Smuzhiyun p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* propagate bulletin board via dmae to vm memory */
330*4882a593Smuzhiyun memset(¶ms, 0, sizeof(params));
331*4882a593Smuzhiyun SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
332*4882a593Smuzhiyun params.dst_vfid = p_vf->abs_vf_id;
333*4882a593Smuzhiyun return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
334*4882a593Smuzhiyun p_vf->vf_bulletin, p_vf->bulletin.size / 4,
335*4882a593Smuzhiyun ¶ms);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
qed_iov_pci_cfg_info(struct qed_dev * cdev)338*4882a593Smuzhiyun static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun struct qed_hw_sriov_info *iov = cdev->p_iov_info;
341*4882a593Smuzhiyun int pos = iov->pos;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
344*4882a593Smuzhiyun pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun pci_read_config_word(cdev->pdev,
347*4882a593Smuzhiyun pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
348*4882a593Smuzhiyun pci_read_config_word(cdev->pdev,
349*4882a593Smuzhiyun pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
352*4882a593Smuzhiyun if (iov->num_vfs) {
353*4882a593Smuzhiyun DP_VERBOSE(cdev,
354*4882a593Smuzhiyun QED_MSG_IOV,
355*4882a593Smuzhiyun "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
356*4882a593Smuzhiyun iov->num_vfs = 0;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun pci_read_config_word(cdev->pdev,
360*4882a593Smuzhiyun pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun pci_read_config_word(cdev->pdev,
363*4882a593Smuzhiyun pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun pci_read_config_word(cdev->pdev,
366*4882a593Smuzhiyun pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun pci_read_config_dword(cdev->pdev,
369*4882a593Smuzhiyun pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun DP_VERBOSE(cdev,
376*4882a593Smuzhiyun QED_MSG_IOV,
377*4882a593Smuzhiyun "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
378*4882a593Smuzhiyun iov->nres,
379*4882a593Smuzhiyun iov->cap,
380*4882a593Smuzhiyun iov->ctrl,
381*4882a593Smuzhiyun iov->total_vfs,
382*4882a593Smuzhiyun iov->initial_vfs,
383*4882a593Smuzhiyun iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* Some sanity checks */
386*4882a593Smuzhiyun if (iov->num_vfs > NUM_OF_VFS(cdev) ||
387*4882a593Smuzhiyun iov->total_vfs > NUM_OF_VFS(cdev)) {
388*4882a593Smuzhiyun /* This can happen only due to a bug. In this case we set
389*4882a593Smuzhiyun * num_vfs to zero to avoid memory corruption in the code that
390*4882a593Smuzhiyun * assumes max number of vfs
391*4882a593Smuzhiyun */
392*4882a593Smuzhiyun DP_NOTICE(cdev,
393*4882a593Smuzhiyun "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
394*4882a593Smuzhiyun iov->num_vfs);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun iov->num_vfs = 0;
397*4882a593Smuzhiyun iov->total_vfs = 0;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun return 0;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
qed_iov_setup_vfdb(struct qed_hwfn * p_hwfn)403*4882a593Smuzhiyun static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
406*4882a593Smuzhiyun struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
407*4882a593Smuzhiyun struct qed_bulletin_content *p_bulletin_virt;
408*4882a593Smuzhiyun dma_addr_t req_p, rply_p, bulletin_p;
409*4882a593Smuzhiyun union pfvf_tlvs *p_reply_virt_addr;
410*4882a593Smuzhiyun union vfpf_tlvs *p_req_virt_addr;
411*4882a593Smuzhiyun u8 idx = 0;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
416*4882a593Smuzhiyun req_p = p_iov_info->mbx_msg_phys_addr;
417*4882a593Smuzhiyun p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
418*4882a593Smuzhiyun rply_p = p_iov_info->mbx_reply_phys_addr;
419*4882a593Smuzhiyun p_bulletin_virt = p_iov_info->p_bulletins;
420*4882a593Smuzhiyun bulletin_p = p_iov_info->bulletins_phys;
421*4882a593Smuzhiyun if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
422*4882a593Smuzhiyun DP_ERR(p_hwfn,
423*4882a593Smuzhiyun "qed_iov_setup_vfdb called without allocating mem first\n");
424*4882a593Smuzhiyun return;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun for (idx = 0; idx < p_iov->total_vfs; idx++) {
428*4882a593Smuzhiyun struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
429*4882a593Smuzhiyun u32 concrete;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun vf->vf_mbx.req_virt = p_req_virt_addr + idx;
432*4882a593Smuzhiyun vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
433*4882a593Smuzhiyun vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
434*4882a593Smuzhiyun vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun vf->state = VF_STOPPED;
437*4882a593Smuzhiyun vf->b_init = false;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun vf->bulletin.phys = idx *
440*4882a593Smuzhiyun sizeof(struct qed_bulletin_content) +
441*4882a593Smuzhiyun bulletin_p;
442*4882a593Smuzhiyun vf->bulletin.p_virt = p_bulletin_virt + idx;
443*4882a593Smuzhiyun vf->bulletin.size = sizeof(struct qed_bulletin_content);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun vf->relative_vf_id = idx;
446*4882a593Smuzhiyun vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
447*4882a593Smuzhiyun concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
448*4882a593Smuzhiyun vf->concrete_fid = concrete;
449*4882a593Smuzhiyun vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
450*4882a593Smuzhiyun (vf->abs_vf_id << 8);
451*4882a593Smuzhiyun vf->vport_id = idx + 1;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
454*4882a593Smuzhiyun vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
qed_iov_allocate_vfdb(struct qed_hwfn * p_hwfn)458*4882a593Smuzhiyun static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
461*4882a593Smuzhiyun void **p_v_addr;
462*4882a593Smuzhiyun u16 num_vfs = 0;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
467*4882a593Smuzhiyun "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* Allocate PF Mailbox buffer (per-VF) */
470*4882a593Smuzhiyun p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
471*4882a593Smuzhiyun p_v_addr = &p_iov_info->mbx_msg_virt_addr;
472*4882a593Smuzhiyun *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
473*4882a593Smuzhiyun p_iov_info->mbx_msg_size,
474*4882a593Smuzhiyun &p_iov_info->mbx_msg_phys_addr,
475*4882a593Smuzhiyun GFP_KERNEL);
476*4882a593Smuzhiyun if (!*p_v_addr)
477*4882a593Smuzhiyun return -ENOMEM;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /* Allocate PF Mailbox Reply buffer (per-VF) */
480*4882a593Smuzhiyun p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
481*4882a593Smuzhiyun p_v_addr = &p_iov_info->mbx_reply_virt_addr;
482*4882a593Smuzhiyun *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
483*4882a593Smuzhiyun p_iov_info->mbx_reply_size,
484*4882a593Smuzhiyun &p_iov_info->mbx_reply_phys_addr,
485*4882a593Smuzhiyun GFP_KERNEL);
486*4882a593Smuzhiyun if (!*p_v_addr)
487*4882a593Smuzhiyun return -ENOMEM;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
490*4882a593Smuzhiyun num_vfs;
491*4882a593Smuzhiyun p_v_addr = &p_iov_info->p_bulletins;
492*4882a593Smuzhiyun *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
493*4882a593Smuzhiyun p_iov_info->bulletins_size,
494*4882a593Smuzhiyun &p_iov_info->bulletins_phys,
495*4882a593Smuzhiyun GFP_KERNEL);
496*4882a593Smuzhiyun if (!*p_v_addr)
497*4882a593Smuzhiyun return -ENOMEM;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
500*4882a593Smuzhiyun QED_MSG_IOV,
501*4882a593Smuzhiyun "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
502*4882a593Smuzhiyun p_iov_info->mbx_msg_virt_addr,
503*4882a593Smuzhiyun (u64) p_iov_info->mbx_msg_phys_addr,
504*4882a593Smuzhiyun p_iov_info->mbx_reply_virt_addr,
505*4882a593Smuzhiyun (u64) p_iov_info->mbx_reply_phys_addr,
506*4882a593Smuzhiyun p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun return 0;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
qed_iov_free_vfdb(struct qed_hwfn * p_hwfn)511*4882a593Smuzhiyun static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
516*4882a593Smuzhiyun dma_free_coherent(&p_hwfn->cdev->pdev->dev,
517*4882a593Smuzhiyun p_iov_info->mbx_msg_size,
518*4882a593Smuzhiyun p_iov_info->mbx_msg_virt_addr,
519*4882a593Smuzhiyun p_iov_info->mbx_msg_phys_addr);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
522*4882a593Smuzhiyun dma_free_coherent(&p_hwfn->cdev->pdev->dev,
523*4882a593Smuzhiyun p_iov_info->mbx_reply_size,
524*4882a593Smuzhiyun p_iov_info->mbx_reply_virt_addr,
525*4882a593Smuzhiyun p_iov_info->mbx_reply_phys_addr);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun if (p_iov_info->p_bulletins)
528*4882a593Smuzhiyun dma_free_coherent(&p_hwfn->cdev->pdev->dev,
529*4882a593Smuzhiyun p_iov_info->bulletins_size,
530*4882a593Smuzhiyun p_iov_info->p_bulletins,
531*4882a593Smuzhiyun p_iov_info->bulletins_phys);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
qed_iov_alloc(struct qed_hwfn * p_hwfn)534*4882a593Smuzhiyun int qed_iov_alloc(struct qed_hwfn *p_hwfn)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun struct qed_pf_iov *p_sriov;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (!IS_PF_SRIOV(p_hwfn)) {
539*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
540*4882a593Smuzhiyun "No SR-IOV - no need for IOV db\n");
541*4882a593Smuzhiyun return 0;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
545*4882a593Smuzhiyun if (!p_sriov)
546*4882a593Smuzhiyun return -ENOMEM;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun p_hwfn->pf_iov_info = p_sriov;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
551*4882a593Smuzhiyun qed_sriov_eqe_event);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun return qed_iov_allocate_vfdb(p_hwfn);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
qed_iov_setup(struct qed_hwfn * p_hwfn)556*4882a593Smuzhiyun void qed_iov_setup(struct qed_hwfn *p_hwfn)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
559*4882a593Smuzhiyun return;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun qed_iov_setup_vfdb(p_hwfn);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
qed_iov_free(struct qed_hwfn * p_hwfn)564*4882a593Smuzhiyun void qed_iov_free(struct qed_hwfn *p_hwfn)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
569*4882a593Smuzhiyun qed_iov_free_vfdb(p_hwfn);
570*4882a593Smuzhiyun kfree(p_hwfn->pf_iov_info);
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
qed_iov_free_hw_info(struct qed_dev * cdev)574*4882a593Smuzhiyun void qed_iov_free_hw_info(struct qed_dev *cdev)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun kfree(cdev->p_iov_info);
577*4882a593Smuzhiyun cdev->p_iov_info = NULL;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
qed_iov_hw_info(struct qed_hwfn * p_hwfn)580*4882a593Smuzhiyun int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun struct qed_dev *cdev = p_hwfn->cdev;
583*4882a593Smuzhiyun int pos;
584*4882a593Smuzhiyun int rc;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (is_kdump_kernel())
587*4882a593Smuzhiyun return 0;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (IS_VF(p_hwfn->cdev))
590*4882a593Smuzhiyun return 0;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* Learn the PCI configuration */
593*4882a593Smuzhiyun pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
594*4882a593Smuzhiyun PCI_EXT_CAP_ID_SRIOV);
595*4882a593Smuzhiyun if (!pos) {
596*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
597*4882a593Smuzhiyun return 0;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun /* Allocate a new struct for IOV information */
601*4882a593Smuzhiyun cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
602*4882a593Smuzhiyun if (!cdev->p_iov_info)
603*4882a593Smuzhiyun return -ENOMEM;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun cdev->p_iov_info->pos = pos;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun rc = qed_iov_pci_cfg_info(cdev);
608*4882a593Smuzhiyun if (rc)
609*4882a593Smuzhiyun return rc;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /* We want PF IOV to be synonemous with the existance of p_iov_info;
612*4882a593Smuzhiyun * In case the capability is published but there are no VFs, simply
613*4882a593Smuzhiyun * de-allocate the struct.
614*4882a593Smuzhiyun */
615*4882a593Smuzhiyun if (!cdev->p_iov_info->total_vfs) {
616*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
617*4882a593Smuzhiyun "IOV capabilities, but no VFs are published\n");
618*4882a593Smuzhiyun kfree(cdev->p_iov_info);
619*4882a593Smuzhiyun cdev->p_iov_info = NULL;
620*4882a593Smuzhiyun return 0;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /* First VF index based on offset is tricky:
624*4882a593Smuzhiyun * - If ARI is supported [likely], offset - (16 - pf_id) would
625*4882a593Smuzhiyun * provide the number for eng0. 2nd engine Vfs would begin
626*4882a593Smuzhiyun * after the first engine's VFs.
627*4882a593Smuzhiyun * - If !ARI, VFs would start on next device.
628*4882a593Smuzhiyun * so offset - (256 - pf_id) would provide the number.
629*4882a593Smuzhiyun * Utilize the fact that (256 - pf_id) is achieved only by later
630*4882a593Smuzhiyun * to differentiate between the two.
631*4882a593Smuzhiyun */
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
634*4882a593Smuzhiyun u32 first = p_hwfn->cdev->p_iov_info->offset +
635*4882a593Smuzhiyun p_hwfn->abs_pf_id - 16;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun cdev->p_iov_info->first_vf_in_pf = first;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun if (QED_PATH_ID(p_hwfn))
640*4882a593Smuzhiyun cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
641*4882a593Smuzhiyun } else {
642*4882a593Smuzhiyun u32 first = p_hwfn->cdev->p_iov_info->offset +
643*4882a593Smuzhiyun p_hwfn->abs_pf_id - 256;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun cdev->p_iov_info->first_vf_in_pf = first;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
649*4882a593Smuzhiyun "First VF in hwfn 0x%08x\n",
650*4882a593Smuzhiyun cdev->p_iov_info->first_vf_in_pf);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun return 0;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
_qed_iov_pf_sanity_check(struct qed_hwfn * p_hwfn,int vfid,bool b_fail_malicious)655*4882a593Smuzhiyun static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
656*4882a593Smuzhiyun int vfid, bool b_fail_malicious)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun /* Check PF supports sriov */
659*4882a593Smuzhiyun if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
660*4882a593Smuzhiyun !IS_PF_SRIOV_ALLOC(p_hwfn))
661*4882a593Smuzhiyun return false;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /* Check VF validity */
664*4882a593Smuzhiyun if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
665*4882a593Smuzhiyun return false;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun return true;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
qed_iov_pf_sanity_check(struct qed_hwfn * p_hwfn,int vfid)670*4882a593Smuzhiyun static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
qed_iov_set_vf_to_disable(struct qed_dev * cdev,u16 rel_vf_id,u8 to_disable)675*4882a593Smuzhiyun static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
676*4882a593Smuzhiyun u16 rel_vf_id, u8 to_disable)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun struct qed_vf_info *vf;
679*4882a593Smuzhiyun int i;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun for_each_hwfn(cdev, i) {
682*4882a593Smuzhiyun struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
685*4882a593Smuzhiyun if (!vf)
686*4882a593Smuzhiyun continue;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun vf->to_disable = to_disable;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
qed_iov_set_vfs_to_disable(struct qed_dev * cdev,u8 to_disable)692*4882a593Smuzhiyun static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun u16 i;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (!IS_QED_SRIOV(cdev))
697*4882a593Smuzhiyun return;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
700*4882a593Smuzhiyun qed_iov_set_vf_to_disable(cdev, i, to_disable);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
qed_iov_vf_pglue_clear_err(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 abs_vfid)703*4882a593Smuzhiyun static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
704*4882a593Smuzhiyun struct qed_ptt *p_ptt, u8 abs_vfid)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun qed_wr(p_hwfn, p_ptt,
707*4882a593Smuzhiyun PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
708*4882a593Smuzhiyun 1 << (abs_vfid & 0x1f));
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
qed_iov_vf_igu_reset(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)711*4882a593Smuzhiyun static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
712*4882a593Smuzhiyun struct qed_ptt *p_ptt, struct qed_vf_info *vf)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun int i;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /* Set VF masks and configuration - pretend */
717*4882a593Smuzhiyun qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /* unpretend */
722*4882a593Smuzhiyun qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* iterate over all queues, clear sb consumer */
725*4882a593Smuzhiyun for (i = 0; i < vf->num_sbs; i++)
726*4882a593Smuzhiyun qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
727*4882a593Smuzhiyun vf->igu_sbs[i],
728*4882a593Smuzhiyun vf->opaque_fid, true);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
qed_iov_vf_igu_set_int(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf,bool enable)731*4882a593Smuzhiyun static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
732*4882a593Smuzhiyun struct qed_ptt *p_ptt,
733*4882a593Smuzhiyun struct qed_vf_info *vf, bool enable)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun u32 igu_vf_conf;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun if (enable)
742*4882a593Smuzhiyun igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
743*4882a593Smuzhiyun else
744*4882a593Smuzhiyun igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /* unpretend */
749*4882a593Smuzhiyun qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun static int
qed_iov_enable_vf_access_msix(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 abs_vf_id,u8 num_sbs)753*4882a593Smuzhiyun qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
754*4882a593Smuzhiyun struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun u8 current_max = 0;
757*4882a593Smuzhiyun int i;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun /* For AH onward, configuration is per-PF. Find maximum of all
760*4882a593Smuzhiyun * the currently enabled child VFs, and set the number to be that.
761*4882a593Smuzhiyun */
762*4882a593Smuzhiyun if (!QED_IS_BB(p_hwfn->cdev)) {
763*4882a593Smuzhiyun qed_for_each_vf(p_hwfn, i) {
764*4882a593Smuzhiyun struct qed_vf_info *p_vf;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
767*4882a593Smuzhiyun if (!p_vf)
768*4882a593Smuzhiyun continue;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun current_max = max_t(u8, current_max, p_vf->num_sbs);
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun if (num_sbs > current_max)
775*4882a593Smuzhiyun return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
776*4882a593Smuzhiyun abs_vf_id, num_sbs);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun return 0;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
qed_iov_enable_vf_access(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)781*4882a593Smuzhiyun static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
782*4882a593Smuzhiyun struct qed_ptt *p_ptt,
783*4882a593Smuzhiyun struct qed_vf_info *vf)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
786*4882a593Smuzhiyun int rc;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun /* It's possible VF was previously considered malicious -
789*4882a593Smuzhiyun * clear the indication even if we're only going to disable VF.
790*4882a593Smuzhiyun */
791*4882a593Smuzhiyun vf->b_malicious = false;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun if (vf->to_disable)
794*4882a593Smuzhiyun return 0;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
797*4882a593Smuzhiyun QED_MSG_IOV,
798*4882a593Smuzhiyun "Enable internal access for vf %x [abs %x]\n",
799*4882a593Smuzhiyun vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
806*4882a593Smuzhiyun vf->abs_vf_id, vf->num_sbs);
807*4882a593Smuzhiyun if (rc)
808*4882a593Smuzhiyun return rc;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
813*4882a593Smuzhiyun STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
816*4882a593Smuzhiyun p_hwfn->hw_info.hw_mode);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun /* unpretend */
819*4882a593Smuzhiyun qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun vf->state = VF_FREE;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun return rc;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /**
827*4882a593Smuzhiyun * qed_iov_config_perm_table() - Configure the permission zone table.
828*4882a593Smuzhiyun *
829*4882a593Smuzhiyun * @p_hwfn: HW device data.
830*4882a593Smuzhiyun * @p_ptt: PTT window for writing the registers.
831*4882a593Smuzhiyun * @vf: VF info data.
832*4882a593Smuzhiyun * @enable: The actual permision for this VF.
833*4882a593Smuzhiyun *
834*4882a593Smuzhiyun * In E4, queue zone permission table size is 320x9. There
835*4882a593Smuzhiyun * are 320 VF queues for single engine device (256 for dual
836*4882a593Smuzhiyun * engine device), and each entry has the following format:
837*4882a593Smuzhiyun * {Valid, VF[7:0]}
838*4882a593Smuzhiyun */
qed_iov_config_perm_table(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf,u8 enable)839*4882a593Smuzhiyun static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
840*4882a593Smuzhiyun struct qed_ptt *p_ptt,
841*4882a593Smuzhiyun struct qed_vf_info *vf, u8 enable)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun u32 reg_addr, val;
844*4882a593Smuzhiyun u16 qzone_id = 0;
845*4882a593Smuzhiyun int qid;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun for (qid = 0; qid < vf->num_rxqs; qid++) {
848*4882a593Smuzhiyun qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
849*4882a593Smuzhiyun &qzone_id);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
852*4882a593Smuzhiyun val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
853*4882a593Smuzhiyun qed_wr(p_hwfn, p_ptt, reg_addr, val);
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
qed_iov_enable_vf_traffic(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)857*4882a593Smuzhiyun static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
858*4882a593Smuzhiyun struct qed_ptt *p_ptt,
859*4882a593Smuzhiyun struct qed_vf_info *vf)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun /* Reset vf in IGU - interrupts are still disabled */
862*4882a593Smuzhiyun qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /* Permission Table */
867*4882a593Smuzhiyun qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
qed_iov_alloc_vf_igu_sbs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf,u16 num_rx_queues)870*4882a593Smuzhiyun static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
871*4882a593Smuzhiyun struct qed_ptt *p_ptt,
872*4882a593Smuzhiyun struct qed_vf_info *vf, u16 num_rx_queues)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun struct qed_igu_block *p_block;
875*4882a593Smuzhiyun struct cau_sb_entry sb_entry;
876*4882a593Smuzhiyun int qid = 0;
877*4882a593Smuzhiyun u32 val = 0;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
880*4882a593Smuzhiyun num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
881*4882a593Smuzhiyun p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
884*4882a593Smuzhiyun SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
885*4882a593Smuzhiyun SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun for (qid = 0; qid < num_rx_queues; qid++) {
888*4882a593Smuzhiyun p_block = qed_get_igu_free_sb(p_hwfn, false);
889*4882a593Smuzhiyun vf->igu_sbs[qid] = p_block->igu_sb_id;
890*4882a593Smuzhiyun p_block->status &= ~QED_IGU_STATUS_FREE;
891*4882a593Smuzhiyun SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun qed_wr(p_hwfn, p_ptt,
894*4882a593Smuzhiyun IGU_REG_MAPPING_MEMORY +
895*4882a593Smuzhiyun sizeof(u32) * p_block->igu_sb_id, val);
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun /* Configure igu sb in CAU which were marked valid */
898*4882a593Smuzhiyun qed_init_cau_sb_entry(p_hwfn, &sb_entry,
899*4882a593Smuzhiyun p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun qed_dmae_host2grc(p_hwfn, p_ptt,
902*4882a593Smuzhiyun (u64)(uintptr_t)&sb_entry,
903*4882a593Smuzhiyun CAU_REG_SB_VAR_MEMORY +
904*4882a593Smuzhiyun p_block->igu_sb_id * sizeof(u64), 2, NULL);
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun vf->num_sbs = (u8) num_rx_queues;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun return vf->num_sbs;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
qed_iov_free_vf_igu_sbs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)912*4882a593Smuzhiyun static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
913*4882a593Smuzhiyun struct qed_ptt *p_ptt,
914*4882a593Smuzhiyun struct qed_vf_info *vf)
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
917*4882a593Smuzhiyun int idx, igu_id;
918*4882a593Smuzhiyun u32 addr, val;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun /* Invalidate igu CAM lines and mark them as free */
921*4882a593Smuzhiyun for (idx = 0; idx < vf->num_sbs; idx++) {
922*4882a593Smuzhiyun igu_id = vf->igu_sbs[idx];
923*4882a593Smuzhiyun addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun val = qed_rd(p_hwfn, p_ptt, addr);
926*4882a593Smuzhiyun SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
927*4882a593Smuzhiyun qed_wr(p_hwfn, p_ptt, addr, val);
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
930*4882a593Smuzhiyun p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun vf->num_sbs = 0;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun
qed_iov_set_link(struct qed_hwfn * p_hwfn,u16 vfid,struct qed_mcp_link_params * params,struct qed_mcp_link_state * link,struct qed_mcp_link_capabilities * p_caps)936*4882a593Smuzhiyun static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
937*4882a593Smuzhiyun u16 vfid,
938*4882a593Smuzhiyun struct qed_mcp_link_params *params,
939*4882a593Smuzhiyun struct qed_mcp_link_state *link,
940*4882a593Smuzhiyun struct qed_mcp_link_capabilities *p_caps)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
943*4882a593Smuzhiyun vfid,
944*4882a593Smuzhiyun false);
945*4882a593Smuzhiyun struct qed_bulletin_content *p_bulletin;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun if (!p_vf)
948*4882a593Smuzhiyun return;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun p_bulletin = p_vf->bulletin.p_virt;
951*4882a593Smuzhiyun p_bulletin->req_autoneg = params->speed.autoneg;
952*4882a593Smuzhiyun p_bulletin->req_adv_speed = params->speed.advertised_speeds;
953*4882a593Smuzhiyun p_bulletin->req_forced_speed = params->speed.forced_speed;
954*4882a593Smuzhiyun p_bulletin->req_autoneg_pause = params->pause.autoneg;
955*4882a593Smuzhiyun p_bulletin->req_forced_rx = params->pause.forced_rx;
956*4882a593Smuzhiyun p_bulletin->req_forced_tx = params->pause.forced_tx;
957*4882a593Smuzhiyun p_bulletin->req_loopback = params->loopback_mode;
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun p_bulletin->link_up = link->link_up;
960*4882a593Smuzhiyun p_bulletin->speed = link->speed;
961*4882a593Smuzhiyun p_bulletin->full_duplex = link->full_duplex;
962*4882a593Smuzhiyun p_bulletin->autoneg = link->an;
963*4882a593Smuzhiyun p_bulletin->autoneg_complete = link->an_complete;
964*4882a593Smuzhiyun p_bulletin->parallel_detection = link->parallel_detection;
965*4882a593Smuzhiyun p_bulletin->pfc_enabled = link->pfc_enabled;
966*4882a593Smuzhiyun p_bulletin->partner_adv_speed = link->partner_adv_speed;
967*4882a593Smuzhiyun p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
968*4882a593Smuzhiyun p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
969*4882a593Smuzhiyun p_bulletin->partner_adv_pause = link->partner_adv_pause;
970*4882a593Smuzhiyun p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun p_bulletin->capability_speed = p_caps->speed_capabilities;
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
qed_iov_init_hw_for_vf(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_iov_vf_init_params * p_params)975*4882a593Smuzhiyun static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
976*4882a593Smuzhiyun struct qed_ptt *p_ptt,
977*4882a593Smuzhiyun struct qed_iov_vf_init_params *p_params)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun struct qed_mcp_link_capabilities link_caps;
980*4882a593Smuzhiyun struct qed_mcp_link_params link_params;
981*4882a593Smuzhiyun struct qed_mcp_link_state link_state;
982*4882a593Smuzhiyun u8 num_of_vf_avaiable_chains = 0;
983*4882a593Smuzhiyun struct qed_vf_info *vf = NULL;
984*4882a593Smuzhiyun u16 qid, num_irqs;
985*4882a593Smuzhiyun int rc = 0;
986*4882a593Smuzhiyun u32 cids;
987*4882a593Smuzhiyun u8 i;
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
990*4882a593Smuzhiyun if (!vf) {
991*4882a593Smuzhiyun DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
992*4882a593Smuzhiyun return -EINVAL;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun if (vf->b_init) {
996*4882a593Smuzhiyun DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
997*4882a593Smuzhiyun p_params->rel_vf_id);
998*4882a593Smuzhiyun return -EINVAL;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /* Perform sanity checking on the requested queue_id */
1002*4882a593Smuzhiyun for (i = 0; i < p_params->num_queues; i++) {
1003*4882a593Smuzhiyun u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
1004*4882a593Smuzhiyun u16 max_vf_qzone = min_vf_qzone +
1005*4882a593Smuzhiyun FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun qid = p_params->req_rx_queue[i];
1008*4882a593Smuzhiyun if (qid < min_vf_qzone || qid > max_vf_qzone) {
1009*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
1010*4882a593Smuzhiyun "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1011*4882a593Smuzhiyun qid,
1012*4882a593Smuzhiyun p_params->rel_vf_id,
1013*4882a593Smuzhiyun min_vf_qzone, max_vf_qzone);
1014*4882a593Smuzhiyun return -EINVAL;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun qid = p_params->req_tx_queue[i];
1018*4882a593Smuzhiyun if (qid > max_vf_qzone) {
1019*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
1020*4882a593Smuzhiyun "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1021*4882a593Smuzhiyun qid, p_params->rel_vf_id, max_vf_qzone);
1022*4882a593Smuzhiyun return -EINVAL;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun /* If client *really* wants, Tx qid can be shared with PF */
1026*4882a593Smuzhiyun if (qid < min_vf_qzone)
1027*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
1028*4882a593Smuzhiyun QED_MSG_IOV,
1029*4882a593Smuzhiyun "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1030*4882a593Smuzhiyun p_params->rel_vf_id, qid, i);
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun /* Limit number of queues according to number of CIDs */
1034*4882a593Smuzhiyun qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1035*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
1036*4882a593Smuzhiyun QED_MSG_IOV,
1037*4882a593Smuzhiyun "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1038*4882a593Smuzhiyun vf->relative_vf_id, p_params->num_queues, (u16)cids);
1039*4882a593Smuzhiyun num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
1042*4882a593Smuzhiyun p_ptt,
1043*4882a593Smuzhiyun vf, num_irqs);
1044*4882a593Smuzhiyun if (!num_of_vf_avaiable_chains) {
1045*4882a593Smuzhiyun DP_ERR(p_hwfn, "no available igu sbs\n");
1046*4882a593Smuzhiyun return -ENOMEM;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun /* Choose queue number and index ranges */
1050*4882a593Smuzhiyun vf->num_rxqs = num_of_vf_avaiable_chains;
1051*4882a593Smuzhiyun vf->num_txqs = num_of_vf_avaiable_chains;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun for (i = 0; i < vf->num_rxqs; i++) {
1054*4882a593Smuzhiyun struct qed_vf_queue *p_queue = &vf->vf_queues[i];
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1057*4882a593Smuzhiyun p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1060*4882a593Smuzhiyun "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1061*4882a593Smuzhiyun vf->relative_vf_id, i, vf->igu_sbs[i],
1062*4882a593Smuzhiyun p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun /* Update the link configuration in bulletin */
1066*4882a593Smuzhiyun memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
1067*4882a593Smuzhiyun sizeof(link_params));
1068*4882a593Smuzhiyun memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
1069*4882a593Smuzhiyun memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
1070*4882a593Smuzhiyun sizeof(link_caps));
1071*4882a593Smuzhiyun qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
1072*4882a593Smuzhiyun &link_params, &link_state, &link_caps);
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1075*4882a593Smuzhiyun if (!rc) {
1076*4882a593Smuzhiyun vf->b_init = true;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun if (IS_LEAD_HWFN(p_hwfn))
1079*4882a593Smuzhiyun p_hwfn->cdev->p_iov_info->num_vfs++;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun return rc;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
qed_iov_release_hw_for_vf(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 rel_vf_id)1085*4882a593Smuzhiyun static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
1086*4882a593Smuzhiyun struct qed_ptt *p_ptt, u16 rel_vf_id)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun struct qed_mcp_link_capabilities caps;
1089*4882a593Smuzhiyun struct qed_mcp_link_params params;
1090*4882a593Smuzhiyun struct qed_mcp_link_state link;
1091*4882a593Smuzhiyun struct qed_vf_info *vf = NULL;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1094*4882a593Smuzhiyun if (!vf) {
1095*4882a593Smuzhiyun DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
1096*4882a593Smuzhiyun return -EINVAL;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun if (vf->bulletin.p_virt)
1100*4882a593Smuzhiyun memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun /* Get the link configuration back in bulletin so
1105*4882a593Smuzhiyun * that when VFs are re-enabled they get the actual
1106*4882a593Smuzhiyun * link configuration.
1107*4882a593Smuzhiyun */
1108*4882a593Smuzhiyun memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params));
1109*4882a593Smuzhiyun memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
1110*4882a593Smuzhiyun memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
1111*4882a593Smuzhiyun qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun /* Forget the VF's acquisition message */
1114*4882a593Smuzhiyun memset(&vf->acquire, 0, sizeof(vf->acquire));
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun /* disablng interrupts and resetting permission table was done during
1117*4882a593Smuzhiyun * vf-close, however, we could get here without going through vf_close
1118*4882a593Smuzhiyun */
1119*4882a593Smuzhiyun /* Disable Interrupts for VF */
1120*4882a593Smuzhiyun qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /* Reset Permission table */
1123*4882a593Smuzhiyun qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun vf->num_rxqs = 0;
1126*4882a593Smuzhiyun vf->num_txqs = 0;
1127*4882a593Smuzhiyun qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun if (vf->b_init) {
1130*4882a593Smuzhiyun vf->b_init = false;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun if (IS_LEAD_HWFN(p_hwfn))
1133*4882a593Smuzhiyun p_hwfn->cdev->p_iov_info->num_vfs--;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun return 0;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
qed_iov_tlv_supported(u16 tlvtype)1139*4882a593Smuzhiyun static bool qed_iov_tlv_supported(u16 tlvtype)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun /* place a given tlv on the tlv buffer, continuing current tlv list */
qed_add_tlv(struct qed_hwfn * p_hwfn,u8 ** offset,u16 type,u16 length)1145*4882a593Smuzhiyun void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
1146*4882a593Smuzhiyun {
1147*4882a593Smuzhiyun struct channel_tlv *tl = (struct channel_tlv *)*offset;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun tl->type = type;
1150*4882a593Smuzhiyun tl->length = length;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun /* Offset should keep pointing to next TLV (the end of the last) */
1153*4882a593Smuzhiyun *offset += length;
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun /* Return a pointer to the start of the added tlv */
1156*4882a593Smuzhiyun return *offset - length;
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun /* list the types and lengths of the tlvs on the buffer */
qed_dp_tlv_list(struct qed_hwfn * p_hwfn,void * tlvs_list)1160*4882a593Smuzhiyun void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun u16 i = 1, total_length = 0;
1163*4882a593Smuzhiyun struct channel_tlv *tlv;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun do {
1166*4882a593Smuzhiyun tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun /* output tlv */
1169*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1170*4882a593Smuzhiyun "TLV number %d: type %d, length %d\n",
1171*4882a593Smuzhiyun i, tlv->type, tlv->length);
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun if (tlv->type == CHANNEL_TLV_LIST_END)
1174*4882a593Smuzhiyun return;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun /* Validate entry - protect against malicious VFs */
1177*4882a593Smuzhiyun if (!tlv->length) {
1178*4882a593Smuzhiyun DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1179*4882a593Smuzhiyun return;
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun total_length += tlv->length;
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun if (total_length >= sizeof(struct tlv_buffer_size)) {
1185*4882a593Smuzhiyun DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1186*4882a593Smuzhiyun return;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun i++;
1190*4882a593Smuzhiyun } while (1);
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
qed_iov_send_response(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf,u16 length,u8 status)1193*4882a593Smuzhiyun static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1194*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1195*4882a593Smuzhiyun struct qed_vf_info *p_vf,
1196*4882a593Smuzhiyun u16 length, u8 status)
1197*4882a593Smuzhiyun {
1198*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1199*4882a593Smuzhiyun struct qed_dmae_params params;
1200*4882a593Smuzhiyun u8 eng_vf_id;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun mbx->reply_virt->default_resp.hdr.status = status;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun eng_vf_id = p_vf->abs_vf_id;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun memset(¶ms, 0, sizeof(params));
1209*4882a593Smuzhiyun SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
1210*4882a593Smuzhiyun params.dst_vfid = eng_vf_id;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1213*4882a593Smuzhiyun mbx->req_virt->first_tlv.reply_address +
1214*4882a593Smuzhiyun sizeof(u64),
1215*4882a593Smuzhiyun (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1216*4882a593Smuzhiyun ¶ms);
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun /* Once PF copies the rc to the VF, the latter can continue
1219*4882a593Smuzhiyun * and send an additional message. So we have to make sure the
1220*4882a593Smuzhiyun * channel would be re-set to ready prior to that.
1221*4882a593Smuzhiyun */
1222*4882a593Smuzhiyun REG_WR(p_hwfn,
1223*4882a593Smuzhiyun GTT_BAR0_MAP_REG_USDM_RAM +
1224*4882a593Smuzhiyun USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1227*4882a593Smuzhiyun mbx->req_virt->first_tlv.reply_address,
1228*4882a593Smuzhiyun sizeof(u64) / 4, ¶ms);
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
qed_iov_vport_to_tlv(struct qed_hwfn * p_hwfn,enum qed_iov_vport_update_flag flag)1231*4882a593Smuzhiyun static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1232*4882a593Smuzhiyun enum qed_iov_vport_update_flag flag)
1233*4882a593Smuzhiyun {
1234*4882a593Smuzhiyun switch (flag) {
1235*4882a593Smuzhiyun case QED_IOV_VP_UPDATE_ACTIVATE:
1236*4882a593Smuzhiyun return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1237*4882a593Smuzhiyun case QED_IOV_VP_UPDATE_VLAN_STRIP:
1238*4882a593Smuzhiyun return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1239*4882a593Smuzhiyun case QED_IOV_VP_UPDATE_TX_SWITCH:
1240*4882a593Smuzhiyun return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1241*4882a593Smuzhiyun case QED_IOV_VP_UPDATE_MCAST:
1242*4882a593Smuzhiyun return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1243*4882a593Smuzhiyun case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1244*4882a593Smuzhiyun return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1245*4882a593Smuzhiyun case QED_IOV_VP_UPDATE_RSS:
1246*4882a593Smuzhiyun return CHANNEL_TLV_VPORT_UPDATE_RSS;
1247*4882a593Smuzhiyun case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1248*4882a593Smuzhiyun return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1249*4882a593Smuzhiyun case QED_IOV_VP_UPDATE_SGE_TPA:
1250*4882a593Smuzhiyun return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1251*4882a593Smuzhiyun default:
1252*4882a593Smuzhiyun return 0;
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_iov_vf_mbx * p_mbx,u8 status,u16 tlvs_mask,u16 tlvs_accepted)1256*4882a593Smuzhiyun static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1257*4882a593Smuzhiyun struct qed_vf_info *p_vf,
1258*4882a593Smuzhiyun struct qed_iov_vf_mbx *p_mbx,
1259*4882a593Smuzhiyun u8 status,
1260*4882a593Smuzhiyun u16 tlvs_mask, u16 tlvs_accepted)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun struct pfvf_def_resp_tlv *resp;
1263*4882a593Smuzhiyun u16 size, total_len, i;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1266*4882a593Smuzhiyun p_mbx->offset = (u8 *)p_mbx->reply_virt;
1267*4882a593Smuzhiyun size = sizeof(struct pfvf_def_resp_tlv);
1268*4882a593Smuzhiyun total_len = size;
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun /* Prepare response for all extended tlvs if they are found by PF */
1273*4882a593Smuzhiyun for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
1274*4882a593Smuzhiyun if (!(tlvs_mask & BIT(i)))
1275*4882a593Smuzhiyun continue;
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1278*4882a593Smuzhiyun qed_iov_vport_to_tlv(p_hwfn, i), size);
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun if (tlvs_accepted & BIT(i))
1281*4882a593Smuzhiyun resp->hdr.status = status;
1282*4882a593Smuzhiyun else
1283*4882a593Smuzhiyun resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
1286*4882a593Smuzhiyun QED_MSG_IOV,
1287*4882a593Smuzhiyun "VF[%d] - vport_update response: TLV %d, status %02x\n",
1288*4882a593Smuzhiyun p_vf->relative_vf_id,
1289*4882a593Smuzhiyun qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun total_len += size;
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1295*4882a593Smuzhiyun sizeof(struct channel_list_end_tlv));
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun return total_len;
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun
qed_iov_prepare_resp(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf_info,u16 type,u16 length,u8 status)1300*4882a593Smuzhiyun static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1301*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1302*4882a593Smuzhiyun struct qed_vf_info *vf_info,
1303*4882a593Smuzhiyun u16 type, u16 length, u8 status)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun mbx->offset = (u8 *)mbx->reply_virt;
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1310*4882a593Smuzhiyun qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1311*4882a593Smuzhiyun sizeof(struct channel_list_end_tlv));
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun static struct
qed_iov_get_public_vf_info(struct qed_hwfn * p_hwfn,u16 relative_vf_id,bool b_enabled_only)1317*4882a593Smuzhiyun qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1318*4882a593Smuzhiyun u16 relative_vf_id,
1319*4882a593Smuzhiyun bool b_enabled_only)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun struct qed_vf_info *vf = NULL;
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1324*4882a593Smuzhiyun if (!vf)
1325*4882a593Smuzhiyun return NULL;
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun return &vf->p_vf_info;
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun
qed_iov_clean_vf(struct qed_hwfn * p_hwfn,u8 vfid)1330*4882a593Smuzhiyun static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1331*4882a593Smuzhiyun {
1332*4882a593Smuzhiyun struct qed_public_vf_info *vf_info;
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun if (!vf_info)
1337*4882a593Smuzhiyun return;
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun /* Clear the VF mac */
1340*4882a593Smuzhiyun eth_zero_addr(vf_info->mac);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun vf_info->rx_accept_mode = 0;
1343*4882a593Smuzhiyun vf_info->tx_accept_mode = 0;
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
qed_iov_vf_cleanup(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf)1346*4882a593Smuzhiyun static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1347*4882a593Smuzhiyun struct qed_vf_info *p_vf)
1348*4882a593Smuzhiyun {
1349*4882a593Smuzhiyun u32 i, j;
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun p_vf->vf_bulletin = 0;
1352*4882a593Smuzhiyun p_vf->vport_instance = 0;
1353*4882a593Smuzhiyun p_vf->configured_features = 0;
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun /* If VF previously requested less resources, go back to default */
1356*4882a593Smuzhiyun p_vf->num_rxqs = p_vf->num_sbs;
1357*4882a593Smuzhiyun p_vf->num_txqs = p_vf->num_sbs;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun p_vf->num_active_rxqs = 0;
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1362*4882a593Smuzhiyun struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1365*4882a593Smuzhiyun if (!p_queue->cids[j].p_cid)
1366*4882a593Smuzhiyun continue;
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun qed_eth_queue_cid_release(p_hwfn,
1369*4882a593Smuzhiyun p_queue->cids[j].p_cid);
1370*4882a593Smuzhiyun p_queue->cids[j].p_cid = NULL;
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1375*4882a593Smuzhiyun memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1376*4882a593Smuzhiyun qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun /* Returns either 0, or log(size) */
qed_iov_vf_db_bar_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1380*4882a593Smuzhiyun static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
1381*4882a593Smuzhiyun struct qed_ptt *p_ptt)
1382*4882a593Smuzhiyun {
1383*4882a593Smuzhiyun u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun if (val)
1386*4882a593Smuzhiyun return val + 11;
1387*4882a593Smuzhiyun return 0;
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun static void
qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf,struct vf_pf_resc_request * p_req,struct pf_vf_resc * p_resp)1391*4882a593Smuzhiyun qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
1392*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1393*4882a593Smuzhiyun struct qed_vf_info *p_vf,
1394*4882a593Smuzhiyun struct vf_pf_resc_request *p_req,
1395*4882a593Smuzhiyun struct pf_vf_resc *p_resp)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1398*4882a593Smuzhiyun u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
1399*4882a593Smuzhiyun qed_db_addr_vf(0, DQ_DEMS_LEGACY);
1400*4882a593Smuzhiyun u32 bar_size;
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun /* If VF didn't bother asking for QIDs than don't bother limiting
1405*4882a593Smuzhiyun * number of CIDs. The VF doesn't care about the number, and this
1406*4882a593Smuzhiyun * has the likely result of causing an additional acquisition.
1407*4882a593Smuzhiyun */
1408*4882a593Smuzhiyun if (!(p_vf->acquire.vfdev_info.capabilities &
1409*4882a593Smuzhiyun VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1410*4882a593Smuzhiyun return;
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1413*4882a593Smuzhiyun * that would make sure doorbells for all CIDs fall within the bar.
1414*4882a593Smuzhiyun * If it doesn't, make sure regview window is sufficient.
1415*4882a593Smuzhiyun */
1416*4882a593Smuzhiyun if (p_vf->acquire.vfdev_info.capabilities &
1417*4882a593Smuzhiyun VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1418*4882a593Smuzhiyun bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1419*4882a593Smuzhiyun if (bar_size)
1420*4882a593Smuzhiyun bar_size = 1 << bar_size;
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun if (p_hwfn->cdev->num_hwfns > 1)
1423*4882a593Smuzhiyun bar_size /= 2;
1424*4882a593Smuzhiyun } else {
1425*4882a593Smuzhiyun bar_size = PXP_VF_BAR0_DQ_LENGTH;
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun if (bar_size / db_size < 256)
1429*4882a593Smuzhiyun p_resp->num_cids = min_t(u8, p_resp->num_cids,
1430*4882a593Smuzhiyun (u8)(bar_size / db_size));
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun
qed_iov_vf_mbx_acquire_resc(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf,struct vf_pf_resc_request * p_req,struct pf_vf_resc * p_resp)1433*4882a593Smuzhiyun static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1434*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1435*4882a593Smuzhiyun struct qed_vf_info *p_vf,
1436*4882a593Smuzhiyun struct vf_pf_resc_request *p_req,
1437*4882a593Smuzhiyun struct pf_vf_resc *p_resp)
1438*4882a593Smuzhiyun {
1439*4882a593Smuzhiyun u8 i;
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun /* Queue related information */
1442*4882a593Smuzhiyun p_resp->num_rxqs = p_vf->num_rxqs;
1443*4882a593Smuzhiyun p_resp->num_txqs = p_vf->num_txqs;
1444*4882a593Smuzhiyun p_resp->num_sbs = p_vf->num_sbs;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun for (i = 0; i < p_resp->num_sbs; i++) {
1447*4882a593Smuzhiyun p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1448*4882a593Smuzhiyun p_resp->hw_sbs[i].sb_qid = 0;
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun /* These fields are filled for backward compatibility.
1452*4882a593Smuzhiyun * Unused by modern vfs.
1453*4882a593Smuzhiyun */
1454*4882a593Smuzhiyun for (i = 0; i < p_resp->num_rxqs; i++) {
1455*4882a593Smuzhiyun qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1456*4882a593Smuzhiyun (u16 *)&p_resp->hw_qid[i]);
1457*4882a593Smuzhiyun p_resp->cid[i] = i;
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun /* Filter related information */
1461*4882a593Smuzhiyun p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1462*4882a593Smuzhiyun p_req->num_mac_filters);
1463*4882a593Smuzhiyun p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1464*4882a593Smuzhiyun p_req->num_vlan_filters);
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun /* This isn't really needed/enforced, but some legacy VFs might depend
1469*4882a593Smuzhiyun * on the correct filling of this field.
1470*4882a593Smuzhiyun */
1471*4882a593Smuzhiyun p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun /* Validate sufficient resources for VF */
1474*4882a593Smuzhiyun if (p_resp->num_rxqs < p_req->num_rxqs ||
1475*4882a593Smuzhiyun p_resp->num_txqs < p_req->num_txqs ||
1476*4882a593Smuzhiyun p_resp->num_sbs < p_req->num_sbs ||
1477*4882a593Smuzhiyun p_resp->num_mac_filters < p_req->num_mac_filters ||
1478*4882a593Smuzhiyun p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1479*4882a593Smuzhiyun p_resp->num_mc_filters < p_req->num_mc_filters ||
1480*4882a593Smuzhiyun p_resp->num_cids < p_req->num_cids) {
1481*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
1482*4882a593Smuzhiyun QED_MSG_IOV,
1483*4882a593Smuzhiyun "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1484*4882a593Smuzhiyun p_vf->abs_vf_id,
1485*4882a593Smuzhiyun p_req->num_rxqs,
1486*4882a593Smuzhiyun p_resp->num_rxqs,
1487*4882a593Smuzhiyun p_req->num_rxqs,
1488*4882a593Smuzhiyun p_resp->num_txqs,
1489*4882a593Smuzhiyun p_req->num_sbs,
1490*4882a593Smuzhiyun p_resp->num_sbs,
1491*4882a593Smuzhiyun p_req->num_mac_filters,
1492*4882a593Smuzhiyun p_resp->num_mac_filters,
1493*4882a593Smuzhiyun p_req->num_vlan_filters,
1494*4882a593Smuzhiyun p_resp->num_vlan_filters,
1495*4882a593Smuzhiyun p_req->num_mc_filters,
1496*4882a593Smuzhiyun p_resp->num_mc_filters,
1497*4882a593Smuzhiyun p_req->num_cids, p_resp->num_cids);
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun /* Some legacy OSes are incapable of correctly handling this
1500*4882a593Smuzhiyun * failure.
1501*4882a593Smuzhiyun */
1502*4882a593Smuzhiyun if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1503*4882a593Smuzhiyun ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1504*4882a593Smuzhiyun (p_vf->acquire.vfdev_info.os_type ==
1505*4882a593Smuzhiyun VFPF_ACQUIRE_OS_WINDOWS))
1506*4882a593Smuzhiyun return PFVF_STATUS_SUCCESS;
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun return PFVF_STATUS_NO_RESOURCE;
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun return PFVF_STATUS_SUCCESS;
1512*4882a593Smuzhiyun }
1513*4882a593Smuzhiyun
qed_iov_vf_mbx_acquire_stats(struct qed_hwfn * p_hwfn,struct pfvf_stats_info * p_stats)1514*4882a593Smuzhiyun static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1515*4882a593Smuzhiyun struct pfvf_stats_info *p_stats)
1516*4882a593Smuzhiyun {
1517*4882a593Smuzhiyun p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1518*4882a593Smuzhiyun offsetof(struct mstorm_vf_zone,
1519*4882a593Smuzhiyun non_trigger.eth_queue_stat);
1520*4882a593Smuzhiyun p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1521*4882a593Smuzhiyun p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1522*4882a593Smuzhiyun offsetof(struct ustorm_vf_zone,
1523*4882a593Smuzhiyun non_trigger.eth_queue_stat);
1524*4882a593Smuzhiyun p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1525*4882a593Smuzhiyun p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1526*4882a593Smuzhiyun offsetof(struct pstorm_vf_zone,
1527*4882a593Smuzhiyun non_trigger.eth_queue_stat);
1528*4882a593Smuzhiyun p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1529*4882a593Smuzhiyun p_stats->tstats.address = 0;
1530*4882a593Smuzhiyun p_stats->tstats.len = 0;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun
qed_iov_vf_mbx_acquire(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)1533*4882a593Smuzhiyun static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1534*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1535*4882a593Smuzhiyun struct qed_vf_info *vf)
1536*4882a593Smuzhiyun {
1537*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1538*4882a593Smuzhiyun struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1539*4882a593Smuzhiyun struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1540*4882a593Smuzhiyun struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1541*4882a593Smuzhiyun u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1542*4882a593Smuzhiyun struct pf_vf_resc *resc = &resp->resc;
1543*4882a593Smuzhiyun int rc;
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun memset(resp, 0, sizeof(*resp));
1546*4882a593Smuzhiyun
1547*4882a593Smuzhiyun /* Write the PF version so that VF would know which version
1548*4882a593Smuzhiyun * is supported - might be later overriden. This guarantees that
1549*4882a593Smuzhiyun * VF could recognize legacy PF based on lack of versions in reply.
1550*4882a593Smuzhiyun */
1551*4882a593Smuzhiyun pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1552*4882a593Smuzhiyun pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1555*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
1556*4882a593Smuzhiyun QED_MSG_IOV,
1557*4882a593Smuzhiyun "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1558*4882a593Smuzhiyun vf->abs_vf_id, vf->state);
1559*4882a593Smuzhiyun goto out;
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun /* Validate FW compatibility */
1563*4882a593Smuzhiyun if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1564*4882a593Smuzhiyun if (req->vfdev_info.capabilities &
1565*4882a593Smuzhiyun VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1566*4882a593Smuzhiyun struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1569*4882a593Smuzhiyun "VF[%d] is pre-fastpath HSI\n",
1570*4882a593Smuzhiyun vf->abs_vf_id);
1571*4882a593Smuzhiyun p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1572*4882a593Smuzhiyun p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1573*4882a593Smuzhiyun } else {
1574*4882a593Smuzhiyun DP_INFO(p_hwfn,
1575*4882a593Smuzhiyun "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
1576*4882a593Smuzhiyun vf->abs_vf_id,
1577*4882a593Smuzhiyun req->vfdev_info.eth_fp_hsi_major,
1578*4882a593Smuzhiyun req->vfdev_info.eth_fp_hsi_minor,
1579*4882a593Smuzhiyun ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun goto out;
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun /* On 100g PFs, prevent old VFs from loading */
1586*4882a593Smuzhiyun if ((p_hwfn->cdev->num_hwfns > 1) &&
1587*4882a593Smuzhiyun !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1588*4882a593Smuzhiyun DP_INFO(p_hwfn,
1589*4882a593Smuzhiyun "VF[%d] is running an old driver that doesn't support 100g\n",
1590*4882a593Smuzhiyun vf->abs_vf_id);
1591*4882a593Smuzhiyun goto out;
1592*4882a593Smuzhiyun }
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun /* Store the acquire message */
1595*4882a593Smuzhiyun memcpy(&vf->acquire, req, sizeof(vf->acquire));
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun vf->opaque_fid = req->vfdev_info.opaque_fid;
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun vf->vf_bulletin = req->bulletin_addr;
1600*4882a593Smuzhiyun vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1601*4882a593Smuzhiyun vf->bulletin.size : req->bulletin_size;
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun /* fill in pfdev info */
1604*4882a593Smuzhiyun pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1605*4882a593Smuzhiyun pfdev_info->db_size = 0;
1606*4882a593Smuzhiyun pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1609*4882a593Smuzhiyun PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1610*4882a593Smuzhiyun if (p_hwfn->cdev->num_hwfns > 1)
1611*4882a593Smuzhiyun pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1612*4882a593Smuzhiyun
1613*4882a593Smuzhiyun /* Share our ability to use multiple queue-ids only with VFs
1614*4882a593Smuzhiyun * that request it.
1615*4882a593Smuzhiyun */
1616*4882a593Smuzhiyun if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1617*4882a593Smuzhiyun pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1618*4882a593Smuzhiyun
1619*4882a593Smuzhiyun /* Share the sizes of the bars with VF */
1620*4882a593Smuzhiyun resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun pfdev_info->fw_major = FW_MAJOR_VERSION;
1627*4882a593Smuzhiyun pfdev_info->fw_minor = FW_MINOR_VERSION;
1628*4882a593Smuzhiyun pfdev_info->fw_rev = FW_REVISION_VERSION;
1629*4882a593Smuzhiyun pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1632*4882a593Smuzhiyun * this field.
1633*4882a593Smuzhiyun */
1634*4882a593Smuzhiyun pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
1635*4882a593Smuzhiyun req->vfdev_info.eth_fp_hsi_minor);
1636*4882a593Smuzhiyun pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1637*4882a593Smuzhiyun qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun pfdev_info->dev_type = p_hwfn->cdev->type;
1640*4882a593Smuzhiyun pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun /* Fill resources available to VF; Make sure there are enough to
1643*4882a593Smuzhiyun * satisfy the VF's request.
1644*4882a593Smuzhiyun */
1645*4882a593Smuzhiyun vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1646*4882a593Smuzhiyun &req->resc_request, resc);
1647*4882a593Smuzhiyun if (vfpf_status != PFVF_STATUS_SUCCESS)
1648*4882a593Smuzhiyun goto out;
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun /* Start the VF in FW */
1651*4882a593Smuzhiyun rc = qed_sp_vf_start(p_hwfn, vf);
1652*4882a593Smuzhiyun if (rc) {
1653*4882a593Smuzhiyun DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1654*4882a593Smuzhiyun vfpf_status = PFVF_STATUS_FAILURE;
1655*4882a593Smuzhiyun goto out;
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun /* Fill agreed size of bulletin board in response */
1659*4882a593Smuzhiyun resp->bulletin_size = vf->bulletin.size;
1660*4882a593Smuzhiyun qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
1663*4882a593Smuzhiyun QED_MSG_IOV,
1664*4882a593Smuzhiyun "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1665*4882a593Smuzhiyun "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1666*4882a593Smuzhiyun vf->abs_vf_id,
1667*4882a593Smuzhiyun resp->pfdev_info.chip_num,
1668*4882a593Smuzhiyun resp->pfdev_info.db_size,
1669*4882a593Smuzhiyun resp->pfdev_info.indices_per_sb,
1670*4882a593Smuzhiyun resp->pfdev_info.capabilities,
1671*4882a593Smuzhiyun resc->num_rxqs,
1672*4882a593Smuzhiyun resc->num_txqs,
1673*4882a593Smuzhiyun resc->num_sbs,
1674*4882a593Smuzhiyun resc->num_mac_filters,
1675*4882a593Smuzhiyun resc->num_vlan_filters);
1676*4882a593Smuzhiyun vf->state = VF_ACQUIRED;
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun /* Prepare Response */
1679*4882a593Smuzhiyun out:
1680*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1681*4882a593Smuzhiyun sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1682*4882a593Smuzhiyun }
1683*4882a593Smuzhiyun
__qed_iov_spoofchk_set(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,bool val)1684*4882a593Smuzhiyun static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1685*4882a593Smuzhiyun struct qed_vf_info *p_vf, bool val)
1686*4882a593Smuzhiyun {
1687*4882a593Smuzhiyun struct qed_sp_vport_update_params params;
1688*4882a593Smuzhiyun int rc;
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun if (val == p_vf->spoof_chk) {
1691*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1692*4882a593Smuzhiyun "Spoofchk value[%d] is already configured\n", val);
1693*4882a593Smuzhiyun return 0;
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun
1696*4882a593Smuzhiyun memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params));
1697*4882a593Smuzhiyun params.opaque_fid = p_vf->opaque_fid;
1698*4882a593Smuzhiyun params.vport_id = p_vf->vport_id;
1699*4882a593Smuzhiyun params.update_anti_spoofing_en_flg = 1;
1700*4882a593Smuzhiyun params.anti_spoofing_en = val;
1701*4882a593Smuzhiyun
1702*4882a593Smuzhiyun rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL);
1703*4882a593Smuzhiyun if (!rc) {
1704*4882a593Smuzhiyun p_vf->spoof_chk = val;
1705*4882a593Smuzhiyun p_vf->req_spoofchk_val = p_vf->spoof_chk;
1706*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1707*4882a593Smuzhiyun "Spoofchk val[%d] configured\n", val);
1708*4882a593Smuzhiyun } else {
1709*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1710*4882a593Smuzhiyun "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1711*4882a593Smuzhiyun val, p_vf->relative_vf_id);
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun return rc;
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun
qed_iov_reconfigure_unicast_vlan(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf)1717*4882a593Smuzhiyun static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1718*4882a593Smuzhiyun struct qed_vf_info *p_vf)
1719*4882a593Smuzhiyun {
1720*4882a593Smuzhiyun struct qed_filter_ucast filter;
1721*4882a593Smuzhiyun int rc = 0;
1722*4882a593Smuzhiyun int i;
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun memset(&filter, 0, sizeof(filter));
1725*4882a593Smuzhiyun filter.is_rx_filter = 1;
1726*4882a593Smuzhiyun filter.is_tx_filter = 1;
1727*4882a593Smuzhiyun filter.vport_to_add_to = p_vf->vport_id;
1728*4882a593Smuzhiyun filter.opcode = QED_FILTER_ADD;
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun /* Reconfigure vlans */
1731*4882a593Smuzhiyun for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1732*4882a593Smuzhiyun if (!p_vf->shadow_config.vlans[i].used)
1733*4882a593Smuzhiyun continue;
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun filter.type = QED_FILTER_VLAN;
1736*4882a593Smuzhiyun filter.vlan = p_vf->shadow_config.vlans[i].vid;
1737*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1738*4882a593Smuzhiyun "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1739*4882a593Smuzhiyun filter.vlan, p_vf->relative_vf_id);
1740*4882a593Smuzhiyun rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1741*4882a593Smuzhiyun &filter, QED_SPQ_MODE_CB, NULL);
1742*4882a593Smuzhiyun if (rc) {
1743*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
1744*4882a593Smuzhiyun "Failed to configure VLAN [%04x] to VF [%04x]\n",
1745*4882a593Smuzhiyun filter.vlan, p_vf->relative_vf_id);
1746*4882a593Smuzhiyun break;
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun return rc;
1751*4882a593Smuzhiyun }
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun static int
qed_iov_reconfigure_unicast_shadow(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u64 events)1754*4882a593Smuzhiyun qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1755*4882a593Smuzhiyun struct qed_vf_info *p_vf, u64 events)
1756*4882a593Smuzhiyun {
1757*4882a593Smuzhiyun int rc = 0;
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun if ((events & BIT(VLAN_ADDR_FORCED)) &&
1760*4882a593Smuzhiyun !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1761*4882a593Smuzhiyun rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1762*4882a593Smuzhiyun
1763*4882a593Smuzhiyun return rc;
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun
qed_iov_configure_vport_forced(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,u64 events)1766*4882a593Smuzhiyun static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1767*4882a593Smuzhiyun struct qed_vf_info *p_vf, u64 events)
1768*4882a593Smuzhiyun {
1769*4882a593Smuzhiyun int rc = 0;
1770*4882a593Smuzhiyun struct qed_filter_ucast filter;
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun if (!p_vf->vport_instance)
1773*4882a593Smuzhiyun return -EINVAL;
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun if ((events & BIT(MAC_ADDR_FORCED)) ||
1776*4882a593Smuzhiyun p_vf->p_vf_info.is_trusted_configured) {
1777*4882a593Smuzhiyun /* Since there's no way [currently] of removing the MAC,
1778*4882a593Smuzhiyun * we can always assume this means we need to force it.
1779*4882a593Smuzhiyun */
1780*4882a593Smuzhiyun memset(&filter, 0, sizeof(filter));
1781*4882a593Smuzhiyun filter.type = QED_FILTER_MAC;
1782*4882a593Smuzhiyun filter.opcode = QED_FILTER_REPLACE;
1783*4882a593Smuzhiyun filter.is_rx_filter = 1;
1784*4882a593Smuzhiyun filter.is_tx_filter = 1;
1785*4882a593Smuzhiyun filter.vport_to_add_to = p_vf->vport_id;
1786*4882a593Smuzhiyun ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1789*4882a593Smuzhiyun &filter, QED_SPQ_MODE_CB, NULL);
1790*4882a593Smuzhiyun if (rc) {
1791*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
1792*4882a593Smuzhiyun "PF failed to configure MAC for VF\n");
1793*4882a593Smuzhiyun return rc;
1794*4882a593Smuzhiyun }
1795*4882a593Smuzhiyun if (p_vf->p_vf_info.is_trusted_configured)
1796*4882a593Smuzhiyun p_vf->configured_features |=
1797*4882a593Smuzhiyun BIT(VFPF_BULLETIN_MAC_ADDR);
1798*4882a593Smuzhiyun else
1799*4882a593Smuzhiyun p_vf->configured_features |=
1800*4882a593Smuzhiyun BIT(MAC_ADDR_FORCED);
1801*4882a593Smuzhiyun }
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun if (events & BIT(VLAN_ADDR_FORCED)) {
1804*4882a593Smuzhiyun struct qed_sp_vport_update_params vport_update;
1805*4882a593Smuzhiyun u8 removal;
1806*4882a593Smuzhiyun int i;
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun memset(&filter, 0, sizeof(filter));
1809*4882a593Smuzhiyun filter.type = QED_FILTER_VLAN;
1810*4882a593Smuzhiyun filter.is_rx_filter = 1;
1811*4882a593Smuzhiyun filter.is_tx_filter = 1;
1812*4882a593Smuzhiyun filter.vport_to_add_to = p_vf->vport_id;
1813*4882a593Smuzhiyun filter.vlan = p_vf->bulletin.p_virt->pvid;
1814*4882a593Smuzhiyun filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1815*4882a593Smuzhiyun QED_FILTER_FLUSH;
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun /* Send the ramrod */
1818*4882a593Smuzhiyun rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1819*4882a593Smuzhiyun &filter, QED_SPQ_MODE_CB, NULL);
1820*4882a593Smuzhiyun if (rc) {
1821*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
1822*4882a593Smuzhiyun "PF failed to configure VLAN for VF\n");
1823*4882a593Smuzhiyun return rc;
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun /* Update the default-vlan & silent vlan stripping */
1827*4882a593Smuzhiyun memset(&vport_update, 0, sizeof(vport_update));
1828*4882a593Smuzhiyun vport_update.opaque_fid = p_vf->opaque_fid;
1829*4882a593Smuzhiyun vport_update.vport_id = p_vf->vport_id;
1830*4882a593Smuzhiyun vport_update.update_default_vlan_enable_flg = 1;
1831*4882a593Smuzhiyun vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1832*4882a593Smuzhiyun vport_update.update_default_vlan_flg = 1;
1833*4882a593Smuzhiyun vport_update.default_vlan = filter.vlan;
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun vport_update.update_inner_vlan_removal_flg = 1;
1836*4882a593Smuzhiyun removal = filter.vlan ? 1
1837*4882a593Smuzhiyun : p_vf->shadow_config.inner_vlan_removal;
1838*4882a593Smuzhiyun vport_update.inner_vlan_removal_flg = removal;
1839*4882a593Smuzhiyun vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1840*4882a593Smuzhiyun rc = qed_sp_vport_update(p_hwfn,
1841*4882a593Smuzhiyun &vport_update,
1842*4882a593Smuzhiyun QED_SPQ_MODE_EBLOCK, NULL);
1843*4882a593Smuzhiyun if (rc) {
1844*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
1845*4882a593Smuzhiyun "PF failed to configure VF vport for vlan\n");
1846*4882a593Smuzhiyun return rc;
1847*4882a593Smuzhiyun }
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun /* Update all the Rx queues */
1850*4882a593Smuzhiyun for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1851*4882a593Smuzhiyun struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1852*4882a593Smuzhiyun struct qed_queue_cid *p_cid = NULL;
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun /* There can be at most 1 Rx queue on qzone. Find it */
1855*4882a593Smuzhiyun p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
1856*4882a593Smuzhiyun if (!p_cid)
1857*4882a593Smuzhiyun continue;
1858*4882a593Smuzhiyun
1859*4882a593Smuzhiyun rc = qed_sp_eth_rx_queues_update(p_hwfn,
1860*4882a593Smuzhiyun (void **)&p_cid,
1861*4882a593Smuzhiyun 1, 0, 1,
1862*4882a593Smuzhiyun QED_SPQ_MODE_EBLOCK,
1863*4882a593Smuzhiyun NULL);
1864*4882a593Smuzhiyun if (rc) {
1865*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
1866*4882a593Smuzhiyun "Failed to send Rx update fo queue[0x%04x]\n",
1867*4882a593Smuzhiyun p_cid->rel.queue_id);
1868*4882a593Smuzhiyun return rc;
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun if (filter.vlan)
1873*4882a593Smuzhiyun p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1874*4882a593Smuzhiyun else
1875*4882a593Smuzhiyun p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
1876*4882a593Smuzhiyun }
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun /* If forced features are terminated, we need to configure the shadow
1879*4882a593Smuzhiyun * configuration back again.
1880*4882a593Smuzhiyun */
1881*4882a593Smuzhiyun if (events)
1882*4882a593Smuzhiyun qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun return rc;
1885*4882a593Smuzhiyun }
1886*4882a593Smuzhiyun
qed_iov_vf_mbx_start_vport(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)1887*4882a593Smuzhiyun static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1888*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1889*4882a593Smuzhiyun struct qed_vf_info *vf)
1890*4882a593Smuzhiyun {
1891*4882a593Smuzhiyun struct qed_sp_vport_start_params params = { 0 };
1892*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1893*4882a593Smuzhiyun struct vfpf_vport_start_tlv *start;
1894*4882a593Smuzhiyun u8 status = PFVF_STATUS_SUCCESS;
1895*4882a593Smuzhiyun struct qed_vf_info *vf_info;
1896*4882a593Smuzhiyun u64 *p_bitmap;
1897*4882a593Smuzhiyun int sb_id;
1898*4882a593Smuzhiyun int rc;
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1901*4882a593Smuzhiyun if (!vf_info) {
1902*4882a593Smuzhiyun DP_NOTICE(p_hwfn->cdev,
1903*4882a593Smuzhiyun "Failed to get VF info, invalid vfid [%d]\n",
1904*4882a593Smuzhiyun vf->relative_vf_id);
1905*4882a593Smuzhiyun return;
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun vf->state = VF_ENABLED;
1909*4882a593Smuzhiyun start = &mbx->req_virt->start_vport;
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun /* Initialize Status block in CAU */
1914*4882a593Smuzhiyun for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1915*4882a593Smuzhiyun if (!start->sb_addr[sb_id]) {
1916*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1917*4882a593Smuzhiyun "VF[%d] did not fill the address of SB %d\n",
1918*4882a593Smuzhiyun vf->relative_vf_id, sb_id);
1919*4882a593Smuzhiyun break;
1920*4882a593Smuzhiyun }
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun qed_int_cau_conf_sb(p_hwfn, p_ptt,
1923*4882a593Smuzhiyun start->sb_addr[sb_id],
1924*4882a593Smuzhiyun vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
1925*4882a593Smuzhiyun }
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun vf->mtu = start->mtu;
1928*4882a593Smuzhiyun vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun /* Take into consideration configuration forced by hypervisor;
1931*4882a593Smuzhiyun * If none is configured, use the supplied VF values [for old
1932*4882a593Smuzhiyun * vfs that would still be fine, since they passed '0' as padding].
1933*4882a593Smuzhiyun */
1934*4882a593Smuzhiyun p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1935*4882a593Smuzhiyun if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1936*4882a593Smuzhiyun u8 vf_req = start->only_untagged;
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1939*4882a593Smuzhiyun *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1940*4882a593Smuzhiyun }
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun params.tpa_mode = start->tpa_mode;
1943*4882a593Smuzhiyun params.remove_inner_vlan = start->inner_vlan_removal;
1944*4882a593Smuzhiyun params.tx_switching = true;
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1947*4882a593Smuzhiyun params.drop_ttl0 = false;
1948*4882a593Smuzhiyun params.concrete_fid = vf->concrete_fid;
1949*4882a593Smuzhiyun params.opaque_fid = vf->opaque_fid;
1950*4882a593Smuzhiyun params.vport_id = vf->vport_id;
1951*4882a593Smuzhiyun params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1952*4882a593Smuzhiyun params.mtu = vf->mtu;
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun /* Non trusted VFs should enable control frame filtering */
1955*4882a593Smuzhiyun params.check_mac = !vf->p_vf_info.is_trusted_configured;
1956*4882a593Smuzhiyun
1957*4882a593Smuzhiyun rc = qed_sp_eth_vport_start(p_hwfn, ¶ms);
1958*4882a593Smuzhiyun if (rc) {
1959*4882a593Smuzhiyun DP_ERR(p_hwfn,
1960*4882a593Smuzhiyun "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1961*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
1962*4882a593Smuzhiyun } else {
1963*4882a593Smuzhiyun vf->vport_instance++;
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun /* Force configuration if needed on the newly opened vport */
1966*4882a593Smuzhiyun qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1969*4882a593Smuzhiyun }
1970*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1971*4882a593Smuzhiyun sizeof(struct pfvf_def_resp_tlv), status);
1972*4882a593Smuzhiyun }
1973*4882a593Smuzhiyun
qed_iov_vf_mbx_stop_vport(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)1974*4882a593Smuzhiyun static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1975*4882a593Smuzhiyun struct qed_ptt *p_ptt,
1976*4882a593Smuzhiyun struct qed_vf_info *vf)
1977*4882a593Smuzhiyun {
1978*4882a593Smuzhiyun u8 status = PFVF_STATUS_SUCCESS;
1979*4882a593Smuzhiyun int rc;
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun vf->vport_instance--;
1982*4882a593Smuzhiyun vf->spoof_chk = false;
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
1985*4882a593Smuzhiyun (qed_iov_validate_active_txq(p_hwfn, vf))) {
1986*4882a593Smuzhiyun vf->b_malicious = true;
1987*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
1988*4882a593Smuzhiyun "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
1989*4882a593Smuzhiyun vf->abs_vf_id);
1990*4882a593Smuzhiyun status = PFVF_STATUS_MALICIOUS;
1991*4882a593Smuzhiyun goto out;
1992*4882a593Smuzhiyun }
1993*4882a593Smuzhiyun
1994*4882a593Smuzhiyun rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1995*4882a593Smuzhiyun if (rc) {
1996*4882a593Smuzhiyun DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1997*4882a593Smuzhiyun rc);
1998*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun /* Forget the configuration on the vport */
2002*4882a593Smuzhiyun vf->configured_features = 0;
2003*4882a593Smuzhiyun memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun out:
2006*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2007*4882a593Smuzhiyun sizeof(struct pfvf_def_resp_tlv), status);
2008*4882a593Smuzhiyun }
2009*4882a593Smuzhiyun
qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf,u8 status,bool b_legacy)2010*4882a593Smuzhiyun static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
2011*4882a593Smuzhiyun struct qed_ptt *p_ptt,
2012*4882a593Smuzhiyun struct qed_vf_info *vf,
2013*4882a593Smuzhiyun u8 status, bool b_legacy)
2014*4882a593Smuzhiyun {
2015*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2016*4882a593Smuzhiyun struct pfvf_start_queue_resp_tlv *p_tlv;
2017*4882a593Smuzhiyun struct vfpf_start_rxq_tlv *req;
2018*4882a593Smuzhiyun u16 length;
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun mbx->offset = (u8 *)mbx->reply_virt;
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun /* Taking a bigger struct instead of adding a TLV to list was a
2023*4882a593Smuzhiyun * mistake, but one which we're now stuck with, as some older
2024*4882a593Smuzhiyun * clients assume the size of the previous response.
2025*4882a593Smuzhiyun */
2026*4882a593Smuzhiyun if (!b_legacy)
2027*4882a593Smuzhiyun length = sizeof(*p_tlv);
2028*4882a593Smuzhiyun else
2029*4882a593Smuzhiyun length = sizeof(struct pfvf_def_resp_tlv);
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2032*4882a593Smuzhiyun length);
2033*4882a593Smuzhiyun qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2034*4882a593Smuzhiyun sizeof(struct channel_list_end_tlv));
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun /* Update the TLV with the response */
2037*4882a593Smuzhiyun if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2038*4882a593Smuzhiyun req = &mbx->req_virt->start_rxq;
2039*4882a593Smuzhiyun p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2040*4882a593Smuzhiyun offsetof(struct mstorm_vf_zone,
2041*4882a593Smuzhiyun non_trigger.eth_rx_queue_producers) +
2042*4882a593Smuzhiyun sizeof(struct eth_rx_prod_data) * req->rx_qid;
2043*4882a593Smuzhiyun }
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2046*4882a593Smuzhiyun }
2047*4882a593Smuzhiyun
qed_iov_vf_mbx_qid(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,bool b_is_tx)2048*4882a593Smuzhiyun static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
2049*4882a593Smuzhiyun struct qed_vf_info *p_vf, bool b_is_tx)
2050*4882a593Smuzhiyun {
2051*4882a593Smuzhiyun struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2052*4882a593Smuzhiyun struct vfpf_qid_tlv *p_qid_tlv;
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun /* Search for the qid if the VF published its going to provide it */
2055*4882a593Smuzhiyun if (!(p_vf->acquire.vfdev_info.capabilities &
2056*4882a593Smuzhiyun VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2057*4882a593Smuzhiyun if (b_is_tx)
2058*4882a593Smuzhiyun return QED_IOV_LEGACY_QID_TX;
2059*4882a593Smuzhiyun else
2060*4882a593Smuzhiyun return QED_IOV_LEGACY_QID_RX;
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun p_qid_tlv = (struct vfpf_qid_tlv *)
2064*4882a593Smuzhiyun qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2065*4882a593Smuzhiyun CHANNEL_TLV_QID);
2066*4882a593Smuzhiyun if (!p_qid_tlv) {
2067*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2068*4882a593Smuzhiyun "VF[%2x]: Failed to provide qid\n",
2069*4882a593Smuzhiyun p_vf->relative_vf_id);
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun return QED_IOV_QID_INVALID;
2072*4882a593Smuzhiyun }
2073*4882a593Smuzhiyun
2074*4882a593Smuzhiyun if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2075*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2076*4882a593Smuzhiyun "VF[%02x]: Provided qid out-of-bounds %02x\n",
2077*4882a593Smuzhiyun p_vf->relative_vf_id, p_qid_tlv->qid);
2078*4882a593Smuzhiyun return QED_IOV_QID_INVALID;
2079*4882a593Smuzhiyun }
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun return p_qid_tlv->qid;
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun
qed_iov_vf_mbx_start_rxq(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)2084*4882a593Smuzhiyun static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
2085*4882a593Smuzhiyun struct qed_ptt *p_ptt,
2086*4882a593Smuzhiyun struct qed_vf_info *vf)
2087*4882a593Smuzhiyun {
2088*4882a593Smuzhiyun struct qed_queue_start_common_params params;
2089*4882a593Smuzhiyun struct qed_queue_cid_vf_params vf_params;
2090*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2091*4882a593Smuzhiyun u8 status = PFVF_STATUS_NO_RESOURCE;
2092*4882a593Smuzhiyun u8 qid_usage_idx, vf_legacy = 0;
2093*4882a593Smuzhiyun struct vfpf_start_rxq_tlv *req;
2094*4882a593Smuzhiyun struct qed_vf_queue *p_queue;
2095*4882a593Smuzhiyun struct qed_queue_cid *p_cid;
2096*4882a593Smuzhiyun struct qed_sb_info sb_dummy;
2097*4882a593Smuzhiyun int rc;
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun req = &mbx->req_virt->start_rxq;
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2102*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_DISABLE) ||
2103*4882a593Smuzhiyun !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2104*4882a593Smuzhiyun goto out;
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2107*4882a593Smuzhiyun if (qid_usage_idx == QED_IOV_QID_INVALID)
2108*4882a593Smuzhiyun goto out;
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun p_queue = &vf->vf_queues[req->rx_qid];
2111*4882a593Smuzhiyun if (p_queue->cids[qid_usage_idx].p_cid)
2112*4882a593Smuzhiyun goto out;
2113*4882a593Smuzhiyun
2114*4882a593Smuzhiyun vf_legacy = qed_vf_calculate_legacy(vf);
2115*4882a593Smuzhiyun
2116*4882a593Smuzhiyun /* Acquire a new queue-cid */
2117*4882a593Smuzhiyun memset(¶ms, 0, sizeof(params));
2118*4882a593Smuzhiyun params.queue_id = p_queue->fw_rx_qid;
2119*4882a593Smuzhiyun params.vport_id = vf->vport_id;
2120*4882a593Smuzhiyun params.stats_id = vf->abs_vf_id + 0x10;
2121*4882a593Smuzhiyun /* Since IGU index is passed via sb_info, construct a dummy one */
2122*4882a593Smuzhiyun memset(&sb_dummy, 0, sizeof(sb_dummy));
2123*4882a593Smuzhiyun sb_dummy.igu_sb_id = req->hw_sb;
2124*4882a593Smuzhiyun params.p_sb = &sb_dummy;
2125*4882a593Smuzhiyun params.sb_idx = req->sb_index;
2126*4882a593Smuzhiyun
2127*4882a593Smuzhiyun memset(&vf_params, 0, sizeof(vf_params));
2128*4882a593Smuzhiyun vf_params.vfid = vf->relative_vf_id;
2129*4882a593Smuzhiyun vf_params.vf_qid = (u8)req->rx_qid;
2130*4882a593Smuzhiyun vf_params.vf_legacy = vf_legacy;
2131*4882a593Smuzhiyun vf_params.qid_usage_idx = qid_usage_idx;
2132*4882a593Smuzhiyun p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2133*4882a593Smuzhiyun ¶ms, true, &vf_params);
2134*4882a593Smuzhiyun if (!p_cid)
2135*4882a593Smuzhiyun goto out;
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun /* Legacy VFs have their Producers in a different location, which they
2138*4882a593Smuzhiyun * calculate on their own and clean the producer prior to this.
2139*4882a593Smuzhiyun */
2140*4882a593Smuzhiyun if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
2141*4882a593Smuzhiyun REG_WR(p_hwfn,
2142*4882a593Smuzhiyun GTT_BAR0_MAP_REG_MSDM_RAM +
2143*4882a593Smuzhiyun MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2144*4882a593Smuzhiyun 0);
2145*4882a593Smuzhiyun
2146*4882a593Smuzhiyun rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
2147*4882a593Smuzhiyun req->bd_max_bytes,
2148*4882a593Smuzhiyun req->rxq_addr,
2149*4882a593Smuzhiyun req->cqe_pbl_addr, req->cqe_pbl_size);
2150*4882a593Smuzhiyun if (rc) {
2151*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
2152*4882a593Smuzhiyun qed_eth_queue_cid_release(p_hwfn, p_cid);
2153*4882a593Smuzhiyun } else {
2154*4882a593Smuzhiyun p_queue->cids[qid_usage_idx].p_cid = p_cid;
2155*4882a593Smuzhiyun p_queue->cids[qid_usage_idx].b_is_tx = false;
2156*4882a593Smuzhiyun status = PFVF_STATUS_SUCCESS;
2157*4882a593Smuzhiyun vf->num_active_rxqs++;
2158*4882a593Smuzhiyun }
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun out:
2161*4882a593Smuzhiyun qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2162*4882a593Smuzhiyun !!(vf_legacy &
2163*4882a593Smuzhiyun QED_QCID_LEGACY_VF_RX_PROD));
2164*4882a593Smuzhiyun }
2165*4882a593Smuzhiyun
2166*4882a593Smuzhiyun static void
qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv * p_resp,struct qed_tunnel_info * p_tun,u16 tunn_feature_mask)2167*4882a593Smuzhiyun qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2168*4882a593Smuzhiyun struct qed_tunnel_info *p_tun,
2169*4882a593Smuzhiyun u16 tunn_feature_mask)
2170*4882a593Smuzhiyun {
2171*4882a593Smuzhiyun p_resp->tunn_feature_mask = tunn_feature_mask;
2172*4882a593Smuzhiyun p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2173*4882a593Smuzhiyun p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2174*4882a593Smuzhiyun p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2175*4882a593Smuzhiyun p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2176*4882a593Smuzhiyun p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2177*4882a593Smuzhiyun p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2178*4882a593Smuzhiyun p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2179*4882a593Smuzhiyun p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2180*4882a593Smuzhiyun p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2181*4882a593Smuzhiyun p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2182*4882a593Smuzhiyun p_resp->geneve_udp_port = p_tun->geneve_port.port;
2183*4882a593Smuzhiyun p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2184*4882a593Smuzhiyun }
2185*4882a593Smuzhiyun
2186*4882a593Smuzhiyun static void
__qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv * p_req,struct qed_tunn_update_type * p_tun,enum qed_tunn_mode mask,u8 tun_cls)2187*4882a593Smuzhiyun __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2188*4882a593Smuzhiyun struct qed_tunn_update_type *p_tun,
2189*4882a593Smuzhiyun enum qed_tunn_mode mask, u8 tun_cls)
2190*4882a593Smuzhiyun {
2191*4882a593Smuzhiyun if (p_req->tun_mode_update_mask & BIT(mask)) {
2192*4882a593Smuzhiyun p_tun->b_update_mode = true;
2193*4882a593Smuzhiyun
2194*4882a593Smuzhiyun if (p_req->tunn_mode & BIT(mask))
2195*4882a593Smuzhiyun p_tun->b_mode_enabled = true;
2196*4882a593Smuzhiyun }
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun p_tun->tun_cls = tun_cls;
2199*4882a593Smuzhiyun }
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun static void
qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv * p_req,struct qed_tunn_update_type * p_tun,struct qed_tunn_update_udp_port * p_port,enum qed_tunn_mode mask,u8 tun_cls,u8 update_port,u16 port)2202*4882a593Smuzhiyun qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2203*4882a593Smuzhiyun struct qed_tunn_update_type *p_tun,
2204*4882a593Smuzhiyun struct qed_tunn_update_udp_port *p_port,
2205*4882a593Smuzhiyun enum qed_tunn_mode mask,
2206*4882a593Smuzhiyun u8 tun_cls, u8 update_port, u16 port)
2207*4882a593Smuzhiyun {
2208*4882a593Smuzhiyun if (update_port) {
2209*4882a593Smuzhiyun p_port->b_update_port = true;
2210*4882a593Smuzhiyun p_port->port = port;
2211*4882a593Smuzhiyun }
2212*4882a593Smuzhiyun
2213*4882a593Smuzhiyun __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2214*4882a593Smuzhiyun }
2215*4882a593Smuzhiyun
2216*4882a593Smuzhiyun static bool
qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv * p_req)2217*4882a593Smuzhiyun qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2218*4882a593Smuzhiyun {
2219*4882a593Smuzhiyun bool b_update_requested = false;
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2222*4882a593Smuzhiyun p_req->update_geneve_port || p_req->update_vxlan_port)
2223*4882a593Smuzhiyun b_update_requested = true;
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun return b_update_requested;
2226*4882a593Smuzhiyun }
2227*4882a593Smuzhiyun
qed_pf_validate_tunn_mode(struct qed_tunn_update_type * tun,int * rc)2228*4882a593Smuzhiyun static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
2229*4882a593Smuzhiyun {
2230*4882a593Smuzhiyun if (tun->b_update_mode && !tun->b_mode_enabled) {
2231*4882a593Smuzhiyun tun->b_update_mode = false;
2232*4882a593Smuzhiyun *rc = -EINVAL;
2233*4882a593Smuzhiyun }
2234*4882a593Smuzhiyun }
2235*4882a593Smuzhiyun
2236*4882a593Smuzhiyun static int
qed_pf_validate_modify_tunn_config(struct qed_hwfn * p_hwfn,u16 * tun_features,bool * update,struct qed_tunnel_info * tun_src)2237*4882a593Smuzhiyun qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
2238*4882a593Smuzhiyun u16 *tun_features, bool *update,
2239*4882a593Smuzhiyun struct qed_tunnel_info *tun_src)
2240*4882a593Smuzhiyun {
2241*4882a593Smuzhiyun struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
2242*4882a593Smuzhiyun struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
2243*4882a593Smuzhiyun u16 bultn_vxlan_port, bultn_geneve_port;
2244*4882a593Smuzhiyun void *cookie = p_hwfn->cdev->ops_cookie;
2245*4882a593Smuzhiyun int i, rc = 0;
2246*4882a593Smuzhiyun
2247*4882a593Smuzhiyun *tun_features = p_hwfn->cdev->tunn_feature_mask;
2248*4882a593Smuzhiyun bultn_vxlan_port = tun->vxlan_port.port;
2249*4882a593Smuzhiyun bultn_geneve_port = tun->geneve_port.port;
2250*4882a593Smuzhiyun qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
2251*4882a593Smuzhiyun qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
2252*4882a593Smuzhiyun qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
2253*4882a593Smuzhiyun qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
2254*4882a593Smuzhiyun qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
2255*4882a593Smuzhiyun
2256*4882a593Smuzhiyun if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
2257*4882a593Smuzhiyun (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2258*4882a593Smuzhiyun tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2259*4882a593Smuzhiyun tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2260*4882a593Smuzhiyun tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2261*4882a593Smuzhiyun tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
2262*4882a593Smuzhiyun tun_src->b_update_rx_cls = false;
2263*4882a593Smuzhiyun tun_src->b_update_tx_cls = false;
2264*4882a593Smuzhiyun rc = -EINVAL;
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun
2267*4882a593Smuzhiyun if (tun_src->vxlan_port.b_update_port) {
2268*4882a593Smuzhiyun if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
2269*4882a593Smuzhiyun tun_src->vxlan_port.b_update_port = false;
2270*4882a593Smuzhiyun } else {
2271*4882a593Smuzhiyun *update = true;
2272*4882a593Smuzhiyun bultn_vxlan_port = tun_src->vxlan_port.port;
2273*4882a593Smuzhiyun }
2274*4882a593Smuzhiyun }
2275*4882a593Smuzhiyun
2276*4882a593Smuzhiyun if (tun_src->geneve_port.b_update_port) {
2277*4882a593Smuzhiyun if (tun_src->geneve_port.port == tun->geneve_port.port) {
2278*4882a593Smuzhiyun tun_src->geneve_port.b_update_port = false;
2279*4882a593Smuzhiyun } else {
2280*4882a593Smuzhiyun *update = true;
2281*4882a593Smuzhiyun bultn_geneve_port = tun_src->geneve_port.port;
2282*4882a593Smuzhiyun }
2283*4882a593Smuzhiyun }
2284*4882a593Smuzhiyun
2285*4882a593Smuzhiyun qed_for_each_vf(p_hwfn, i) {
2286*4882a593Smuzhiyun qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
2287*4882a593Smuzhiyun bultn_geneve_port);
2288*4882a593Smuzhiyun }
2289*4882a593Smuzhiyun
2290*4882a593Smuzhiyun qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2291*4882a593Smuzhiyun ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
2292*4882a593Smuzhiyun
2293*4882a593Smuzhiyun return rc;
2294*4882a593Smuzhiyun }
2295*4882a593Smuzhiyun
qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf)2296*4882a593Smuzhiyun static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
2297*4882a593Smuzhiyun struct qed_ptt *p_ptt,
2298*4882a593Smuzhiyun struct qed_vf_info *p_vf)
2299*4882a593Smuzhiyun {
2300*4882a593Smuzhiyun struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
2301*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2302*4882a593Smuzhiyun struct pfvf_update_tunn_param_tlv *p_resp;
2303*4882a593Smuzhiyun struct vfpf_update_tunn_param_tlv *p_req;
2304*4882a593Smuzhiyun u8 status = PFVF_STATUS_SUCCESS;
2305*4882a593Smuzhiyun bool b_update_required = false;
2306*4882a593Smuzhiyun struct qed_tunnel_info tunn;
2307*4882a593Smuzhiyun u16 tunn_feature_mask = 0;
2308*4882a593Smuzhiyun int i, rc = 0;
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun mbx->offset = (u8 *)mbx->reply_virt;
2311*4882a593Smuzhiyun
2312*4882a593Smuzhiyun memset(&tunn, 0, sizeof(tunn));
2313*4882a593Smuzhiyun p_req = &mbx->req_virt->tunn_param_update;
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun if (!qed_iov_pf_validate_tunn_param(p_req)) {
2316*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2317*4882a593Smuzhiyun "No tunnel update requested by VF\n");
2318*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
2319*4882a593Smuzhiyun goto send_resp;
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun
2322*4882a593Smuzhiyun tunn.b_update_rx_cls = p_req->update_tun_cls;
2323*4882a593Smuzhiyun tunn.b_update_tx_cls = p_req->update_tun_cls;
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2326*4882a593Smuzhiyun QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2327*4882a593Smuzhiyun p_req->update_vxlan_port,
2328*4882a593Smuzhiyun p_req->vxlan_port);
2329*4882a593Smuzhiyun qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2330*4882a593Smuzhiyun QED_MODE_L2GENEVE_TUNN,
2331*4882a593Smuzhiyun p_req->l2geneve_clss,
2332*4882a593Smuzhiyun p_req->update_geneve_port,
2333*4882a593Smuzhiyun p_req->geneve_port);
2334*4882a593Smuzhiyun __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2335*4882a593Smuzhiyun QED_MODE_IPGENEVE_TUNN,
2336*4882a593Smuzhiyun p_req->ipgeneve_clss);
2337*4882a593Smuzhiyun __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2338*4882a593Smuzhiyun QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
2339*4882a593Smuzhiyun __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2340*4882a593Smuzhiyun QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
2341*4882a593Smuzhiyun
2342*4882a593Smuzhiyun /* If PF modifies VF's req then it should
2343*4882a593Smuzhiyun * still return an error in case of partial configuration
2344*4882a593Smuzhiyun * or modified configuration as opposed to requested one.
2345*4882a593Smuzhiyun */
2346*4882a593Smuzhiyun rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
2347*4882a593Smuzhiyun &b_update_required, &tunn);
2348*4882a593Smuzhiyun
2349*4882a593Smuzhiyun if (rc)
2350*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
2351*4882a593Smuzhiyun
2352*4882a593Smuzhiyun /* If QED client is willing to update anything ? */
2353*4882a593Smuzhiyun if (b_update_required) {
2354*4882a593Smuzhiyun u16 geneve_port;
2355*4882a593Smuzhiyun
2356*4882a593Smuzhiyun rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2357*4882a593Smuzhiyun QED_SPQ_MODE_EBLOCK, NULL);
2358*4882a593Smuzhiyun if (rc)
2359*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun geneve_port = p_tun->geneve_port.port;
2362*4882a593Smuzhiyun qed_for_each_vf(p_hwfn, i) {
2363*4882a593Smuzhiyun qed_iov_bulletin_set_udp_ports(p_hwfn, i,
2364*4882a593Smuzhiyun p_tun->vxlan_port.port,
2365*4882a593Smuzhiyun geneve_port);
2366*4882a593Smuzhiyun }
2367*4882a593Smuzhiyun }
2368*4882a593Smuzhiyun
2369*4882a593Smuzhiyun send_resp:
2370*4882a593Smuzhiyun p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
2371*4882a593Smuzhiyun CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2372*4882a593Smuzhiyun
2373*4882a593Smuzhiyun qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2374*4882a593Smuzhiyun qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2375*4882a593Smuzhiyun sizeof(struct channel_list_end_tlv));
2376*4882a593Smuzhiyun
2377*4882a593Smuzhiyun qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2378*4882a593Smuzhiyun }
2379*4882a593Smuzhiyun
qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf,u32 cid,u8 status)2380*4882a593Smuzhiyun static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
2381*4882a593Smuzhiyun struct qed_ptt *p_ptt,
2382*4882a593Smuzhiyun struct qed_vf_info *p_vf,
2383*4882a593Smuzhiyun u32 cid, u8 status)
2384*4882a593Smuzhiyun {
2385*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2386*4882a593Smuzhiyun struct pfvf_start_queue_resp_tlv *p_tlv;
2387*4882a593Smuzhiyun bool b_legacy = false;
2388*4882a593Smuzhiyun u16 length;
2389*4882a593Smuzhiyun
2390*4882a593Smuzhiyun mbx->offset = (u8 *)mbx->reply_virt;
2391*4882a593Smuzhiyun
2392*4882a593Smuzhiyun /* Taking a bigger struct instead of adding a TLV to list was a
2393*4882a593Smuzhiyun * mistake, but one which we're now stuck with, as some older
2394*4882a593Smuzhiyun * clients assume the size of the previous response.
2395*4882a593Smuzhiyun */
2396*4882a593Smuzhiyun if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2397*4882a593Smuzhiyun ETH_HSI_VER_NO_PKT_LEN_TUNN)
2398*4882a593Smuzhiyun b_legacy = true;
2399*4882a593Smuzhiyun
2400*4882a593Smuzhiyun if (!b_legacy)
2401*4882a593Smuzhiyun length = sizeof(*p_tlv);
2402*4882a593Smuzhiyun else
2403*4882a593Smuzhiyun length = sizeof(struct pfvf_def_resp_tlv);
2404*4882a593Smuzhiyun
2405*4882a593Smuzhiyun p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2406*4882a593Smuzhiyun length);
2407*4882a593Smuzhiyun qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2408*4882a593Smuzhiyun sizeof(struct channel_list_end_tlv));
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun /* Update the TLV with the response */
2411*4882a593Smuzhiyun if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2412*4882a593Smuzhiyun p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2415*4882a593Smuzhiyun }
2416*4882a593Smuzhiyun
qed_iov_vf_mbx_start_txq(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)2417*4882a593Smuzhiyun static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
2418*4882a593Smuzhiyun struct qed_ptt *p_ptt,
2419*4882a593Smuzhiyun struct qed_vf_info *vf)
2420*4882a593Smuzhiyun {
2421*4882a593Smuzhiyun struct qed_queue_start_common_params params;
2422*4882a593Smuzhiyun struct qed_queue_cid_vf_params vf_params;
2423*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2424*4882a593Smuzhiyun u8 status = PFVF_STATUS_NO_RESOURCE;
2425*4882a593Smuzhiyun struct vfpf_start_txq_tlv *req;
2426*4882a593Smuzhiyun struct qed_vf_queue *p_queue;
2427*4882a593Smuzhiyun struct qed_queue_cid *p_cid;
2428*4882a593Smuzhiyun struct qed_sb_info sb_dummy;
2429*4882a593Smuzhiyun u8 qid_usage_idx, vf_legacy;
2430*4882a593Smuzhiyun u32 cid = 0;
2431*4882a593Smuzhiyun int rc;
2432*4882a593Smuzhiyun u16 pq;
2433*4882a593Smuzhiyun
2434*4882a593Smuzhiyun memset(¶ms, 0, sizeof(params));
2435*4882a593Smuzhiyun req = &mbx->req_virt->start_txq;
2436*4882a593Smuzhiyun
2437*4882a593Smuzhiyun if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2438*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_NA) ||
2439*4882a593Smuzhiyun !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2440*4882a593Smuzhiyun goto out;
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2443*4882a593Smuzhiyun if (qid_usage_idx == QED_IOV_QID_INVALID)
2444*4882a593Smuzhiyun goto out;
2445*4882a593Smuzhiyun
2446*4882a593Smuzhiyun p_queue = &vf->vf_queues[req->tx_qid];
2447*4882a593Smuzhiyun if (p_queue->cids[qid_usage_idx].p_cid)
2448*4882a593Smuzhiyun goto out;
2449*4882a593Smuzhiyun
2450*4882a593Smuzhiyun vf_legacy = qed_vf_calculate_legacy(vf);
2451*4882a593Smuzhiyun
2452*4882a593Smuzhiyun /* Acquire a new queue-cid */
2453*4882a593Smuzhiyun params.queue_id = p_queue->fw_tx_qid;
2454*4882a593Smuzhiyun params.vport_id = vf->vport_id;
2455*4882a593Smuzhiyun params.stats_id = vf->abs_vf_id + 0x10;
2456*4882a593Smuzhiyun
2457*4882a593Smuzhiyun /* Since IGU index is passed via sb_info, construct a dummy one */
2458*4882a593Smuzhiyun memset(&sb_dummy, 0, sizeof(sb_dummy));
2459*4882a593Smuzhiyun sb_dummy.igu_sb_id = req->hw_sb;
2460*4882a593Smuzhiyun params.p_sb = &sb_dummy;
2461*4882a593Smuzhiyun params.sb_idx = req->sb_index;
2462*4882a593Smuzhiyun
2463*4882a593Smuzhiyun memset(&vf_params, 0, sizeof(vf_params));
2464*4882a593Smuzhiyun vf_params.vfid = vf->relative_vf_id;
2465*4882a593Smuzhiyun vf_params.vf_qid = (u8)req->tx_qid;
2466*4882a593Smuzhiyun vf_params.vf_legacy = vf_legacy;
2467*4882a593Smuzhiyun vf_params.qid_usage_idx = qid_usage_idx;
2468*4882a593Smuzhiyun
2469*4882a593Smuzhiyun p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2470*4882a593Smuzhiyun ¶ms, false, &vf_params);
2471*4882a593Smuzhiyun if (!p_cid)
2472*4882a593Smuzhiyun goto out;
2473*4882a593Smuzhiyun
2474*4882a593Smuzhiyun pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
2475*4882a593Smuzhiyun rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
2476*4882a593Smuzhiyun req->pbl_addr, req->pbl_size, pq);
2477*4882a593Smuzhiyun if (rc) {
2478*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
2479*4882a593Smuzhiyun qed_eth_queue_cid_release(p_hwfn, p_cid);
2480*4882a593Smuzhiyun } else {
2481*4882a593Smuzhiyun status = PFVF_STATUS_SUCCESS;
2482*4882a593Smuzhiyun p_queue->cids[qid_usage_idx].p_cid = p_cid;
2483*4882a593Smuzhiyun p_queue->cids[qid_usage_idx].b_is_tx = true;
2484*4882a593Smuzhiyun cid = p_cid->cid;
2485*4882a593Smuzhiyun }
2486*4882a593Smuzhiyun
2487*4882a593Smuzhiyun out:
2488*4882a593Smuzhiyun qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
2489*4882a593Smuzhiyun }
2490*4882a593Smuzhiyun
qed_iov_vf_stop_rxqs(struct qed_hwfn * p_hwfn,struct qed_vf_info * vf,u16 rxq_id,u8 qid_usage_idx,bool cqe_completion)2491*4882a593Smuzhiyun static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
2492*4882a593Smuzhiyun struct qed_vf_info *vf,
2493*4882a593Smuzhiyun u16 rxq_id,
2494*4882a593Smuzhiyun u8 qid_usage_idx, bool cqe_completion)
2495*4882a593Smuzhiyun {
2496*4882a593Smuzhiyun struct qed_vf_queue *p_queue;
2497*4882a593Smuzhiyun int rc = 0;
2498*4882a593Smuzhiyun
2499*4882a593Smuzhiyun if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
2500*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
2501*4882a593Smuzhiyun QED_MSG_IOV,
2502*4882a593Smuzhiyun "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2503*4882a593Smuzhiyun vf->relative_vf_id, rxq_id, qid_usage_idx);
2504*4882a593Smuzhiyun return -EINVAL;
2505*4882a593Smuzhiyun }
2506*4882a593Smuzhiyun
2507*4882a593Smuzhiyun p_queue = &vf->vf_queues[rxq_id];
2508*4882a593Smuzhiyun
2509*4882a593Smuzhiyun /* We've validated the index and the existence of the active RXQ -
2510*4882a593Smuzhiyun * now we need to make sure that it's using the correct qid.
2511*4882a593Smuzhiyun */
2512*4882a593Smuzhiyun if (!p_queue->cids[qid_usage_idx].p_cid ||
2513*4882a593Smuzhiyun p_queue->cids[qid_usage_idx].b_is_tx) {
2514*4882a593Smuzhiyun struct qed_queue_cid *p_cid;
2515*4882a593Smuzhiyun
2516*4882a593Smuzhiyun p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
2517*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
2518*4882a593Smuzhiyun QED_MSG_IOV,
2519*4882a593Smuzhiyun "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2520*4882a593Smuzhiyun vf->relative_vf_id,
2521*4882a593Smuzhiyun rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
2522*4882a593Smuzhiyun return -EINVAL;
2523*4882a593Smuzhiyun }
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun /* Now that we know we have a valid Rx-queue - close it */
2526*4882a593Smuzhiyun rc = qed_eth_rx_queue_stop(p_hwfn,
2527*4882a593Smuzhiyun p_queue->cids[qid_usage_idx].p_cid,
2528*4882a593Smuzhiyun false, cqe_completion);
2529*4882a593Smuzhiyun if (rc)
2530*4882a593Smuzhiyun return rc;
2531*4882a593Smuzhiyun
2532*4882a593Smuzhiyun p_queue->cids[qid_usage_idx].p_cid = NULL;
2533*4882a593Smuzhiyun vf->num_active_rxqs--;
2534*4882a593Smuzhiyun
2535*4882a593Smuzhiyun return 0;
2536*4882a593Smuzhiyun }
2537*4882a593Smuzhiyun
qed_iov_vf_stop_txqs(struct qed_hwfn * p_hwfn,struct qed_vf_info * vf,u16 txq_id,u8 qid_usage_idx)2538*4882a593Smuzhiyun static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
2539*4882a593Smuzhiyun struct qed_vf_info *vf,
2540*4882a593Smuzhiyun u16 txq_id, u8 qid_usage_idx)
2541*4882a593Smuzhiyun {
2542*4882a593Smuzhiyun struct qed_vf_queue *p_queue;
2543*4882a593Smuzhiyun int rc = 0;
2544*4882a593Smuzhiyun
2545*4882a593Smuzhiyun if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
2546*4882a593Smuzhiyun return -EINVAL;
2547*4882a593Smuzhiyun
2548*4882a593Smuzhiyun p_queue = &vf->vf_queues[txq_id];
2549*4882a593Smuzhiyun if (!p_queue->cids[qid_usage_idx].p_cid ||
2550*4882a593Smuzhiyun !p_queue->cids[qid_usage_idx].b_is_tx)
2551*4882a593Smuzhiyun return -EINVAL;
2552*4882a593Smuzhiyun
2553*4882a593Smuzhiyun rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
2554*4882a593Smuzhiyun if (rc)
2555*4882a593Smuzhiyun return rc;
2556*4882a593Smuzhiyun
2557*4882a593Smuzhiyun p_queue->cids[qid_usage_idx].p_cid = NULL;
2558*4882a593Smuzhiyun return 0;
2559*4882a593Smuzhiyun }
2560*4882a593Smuzhiyun
qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)2561*4882a593Smuzhiyun static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2562*4882a593Smuzhiyun struct qed_ptt *p_ptt,
2563*4882a593Smuzhiyun struct qed_vf_info *vf)
2564*4882a593Smuzhiyun {
2565*4882a593Smuzhiyun u16 length = sizeof(struct pfvf_def_resp_tlv);
2566*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2567*4882a593Smuzhiyun u8 status = PFVF_STATUS_FAILURE;
2568*4882a593Smuzhiyun struct vfpf_stop_rxqs_tlv *req;
2569*4882a593Smuzhiyun u8 qid_usage_idx;
2570*4882a593Smuzhiyun int rc;
2571*4882a593Smuzhiyun
2572*4882a593Smuzhiyun /* There has never been an official driver that used this interface
2573*4882a593Smuzhiyun * for stopping multiple queues, and it is now considered deprecated.
2574*4882a593Smuzhiyun * Validate this isn't used here.
2575*4882a593Smuzhiyun */
2576*4882a593Smuzhiyun req = &mbx->req_virt->stop_rxqs;
2577*4882a593Smuzhiyun if (req->num_rxqs != 1) {
2578*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2579*4882a593Smuzhiyun "Odd; VF[%d] tried stopping multiple Rx queues\n",
2580*4882a593Smuzhiyun vf->relative_vf_id);
2581*4882a593Smuzhiyun status = PFVF_STATUS_NOT_SUPPORTED;
2582*4882a593Smuzhiyun goto out;
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun
2585*4882a593Smuzhiyun /* Find which qid-index is associated with the queue */
2586*4882a593Smuzhiyun qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2587*4882a593Smuzhiyun if (qid_usage_idx == QED_IOV_QID_INVALID)
2588*4882a593Smuzhiyun goto out;
2589*4882a593Smuzhiyun
2590*4882a593Smuzhiyun rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2591*4882a593Smuzhiyun qid_usage_idx, req->cqe_completion);
2592*4882a593Smuzhiyun if (!rc)
2593*4882a593Smuzhiyun status = PFVF_STATUS_SUCCESS;
2594*4882a593Smuzhiyun out:
2595*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2596*4882a593Smuzhiyun length, status);
2597*4882a593Smuzhiyun }
2598*4882a593Smuzhiyun
qed_iov_vf_mbx_stop_txqs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)2599*4882a593Smuzhiyun static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2600*4882a593Smuzhiyun struct qed_ptt *p_ptt,
2601*4882a593Smuzhiyun struct qed_vf_info *vf)
2602*4882a593Smuzhiyun {
2603*4882a593Smuzhiyun u16 length = sizeof(struct pfvf_def_resp_tlv);
2604*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2605*4882a593Smuzhiyun u8 status = PFVF_STATUS_FAILURE;
2606*4882a593Smuzhiyun struct vfpf_stop_txqs_tlv *req;
2607*4882a593Smuzhiyun u8 qid_usage_idx;
2608*4882a593Smuzhiyun int rc;
2609*4882a593Smuzhiyun
2610*4882a593Smuzhiyun /* There has never been an official driver that used this interface
2611*4882a593Smuzhiyun * for stopping multiple queues, and it is now considered deprecated.
2612*4882a593Smuzhiyun * Validate this isn't used here.
2613*4882a593Smuzhiyun */
2614*4882a593Smuzhiyun req = &mbx->req_virt->stop_txqs;
2615*4882a593Smuzhiyun if (req->num_txqs != 1) {
2616*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2617*4882a593Smuzhiyun "Odd; VF[%d] tried stopping multiple Tx queues\n",
2618*4882a593Smuzhiyun vf->relative_vf_id);
2619*4882a593Smuzhiyun status = PFVF_STATUS_NOT_SUPPORTED;
2620*4882a593Smuzhiyun goto out;
2621*4882a593Smuzhiyun }
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun /* Find which qid-index is associated with the queue */
2624*4882a593Smuzhiyun qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2625*4882a593Smuzhiyun if (qid_usage_idx == QED_IOV_QID_INVALID)
2626*4882a593Smuzhiyun goto out;
2627*4882a593Smuzhiyun
2628*4882a593Smuzhiyun rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
2629*4882a593Smuzhiyun if (!rc)
2630*4882a593Smuzhiyun status = PFVF_STATUS_SUCCESS;
2631*4882a593Smuzhiyun
2632*4882a593Smuzhiyun out:
2633*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2634*4882a593Smuzhiyun length, status);
2635*4882a593Smuzhiyun }
2636*4882a593Smuzhiyun
qed_iov_vf_mbx_update_rxqs(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)2637*4882a593Smuzhiyun static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2638*4882a593Smuzhiyun struct qed_ptt *p_ptt,
2639*4882a593Smuzhiyun struct qed_vf_info *vf)
2640*4882a593Smuzhiyun {
2641*4882a593Smuzhiyun struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
2642*4882a593Smuzhiyun u16 length = sizeof(struct pfvf_def_resp_tlv);
2643*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2644*4882a593Smuzhiyun struct vfpf_update_rxq_tlv *req;
2645*4882a593Smuzhiyun u8 status = PFVF_STATUS_FAILURE;
2646*4882a593Smuzhiyun u8 complete_event_flg;
2647*4882a593Smuzhiyun u8 complete_cqe_flg;
2648*4882a593Smuzhiyun u8 qid_usage_idx;
2649*4882a593Smuzhiyun int rc;
2650*4882a593Smuzhiyun u8 i;
2651*4882a593Smuzhiyun
2652*4882a593Smuzhiyun req = &mbx->req_virt->update_rxq;
2653*4882a593Smuzhiyun complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2654*4882a593Smuzhiyun complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2655*4882a593Smuzhiyun
2656*4882a593Smuzhiyun qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2657*4882a593Smuzhiyun if (qid_usage_idx == QED_IOV_QID_INVALID)
2658*4882a593Smuzhiyun goto out;
2659*4882a593Smuzhiyun
2660*4882a593Smuzhiyun /* There shouldn't exist a VF that uses queue-qids yet uses this
2661*4882a593Smuzhiyun * API with multiple Rx queues. Validate this.
2662*4882a593Smuzhiyun */
2663*4882a593Smuzhiyun if ((vf->acquire.vfdev_info.capabilities &
2664*4882a593Smuzhiyun VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
2665*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2666*4882a593Smuzhiyun "VF[%d] supports QIDs but sends multiple queues\n",
2667*4882a593Smuzhiyun vf->relative_vf_id);
2668*4882a593Smuzhiyun goto out;
2669*4882a593Smuzhiyun }
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun /* Validate inputs - for the legacy case this is still true since
2672*4882a593Smuzhiyun * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2673*4882a593Smuzhiyun */
2674*4882a593Smuzhiyun for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2675*4882a593Smuzhiyun if (!qed_iov_validate_rxq(p_hwfn, vf, i,
2676*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_NA) ||
2677*4882a593Smuzhiyun !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2678*4882a593Smuzhiyun vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2679*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2680*4882a593Smuzhiyun "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2681*4882a593Smuzhiyun vf->relative_vf_id, req->rx_qid,
2682*4882a593Smuzhiyun req->num_rxqs);
2683*4882a593Smuzhiyun goto out;
2684*4882a593Smuzhiyun }
2685*4882a593Smuzhiyun }
2686*4882a593Smuzhiyun
2687*4882a593Smuzhiyun /* Prepare the handlers */
2688*4882a593Smuzhiyun for (i = 0; i < req->num_rxqs; i++) {
2689*4882a593Smuzhiyun u16 qid = req->rx_qid + i;
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2692*4882a593Smuzhiyun }
2693*4882a593Smuzhiyun
2694*4882a593Smuzhiyun rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2695*4882a593Smuzhiyun req->num_rxqs,
2696*4882a593Smuzhiyun complete_cqe_flg,
2697*4882a593Smuzhiyun complete_event_flg,
2698*4882a593Smuzhiyun QED_SPQ_MODE_EBLOCK, NULL);
2699*4882a593Smuzhiyun if (rc)
2700*4882a593Smuzhiyun goto out;
2701*4882a593Smuzhiyun
2702*4882a593Smuzhiyun status = PFVF_STATUS_SUCCESS;
2703*4882a593Smuzhiyun out:
2704*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2705*4882a593Smuzhiyun length, status);
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun
qed_iov_search_list_tlvs(struct qed_hwfn * p_hwfn,void * p_tlvs_list,u16 req_type)2708*4882a593Smuzhiyun void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2709*4882a593Smuzhiyun void *p_tlvs_list, u16 req_type)
2710*4882a593Smuzhiyun {
2711*4882a593Smuzhiyun struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2712*4882a593Smuzhiyun int len = 0;
2713*4882a593Smuzhiyun
2714*4882a593Smuzhiyun do {
2715*4882a593Smuzhiyun if (!p_tlv->length) {
2716*4882a593Smuzhiyun DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2717*4882a593Smuzhiyun return NULL;
2718*4882a593Smuzhiyun }
2719*4882a593Smuzhiyun
2720*4882a593Smuzhiyun if (p_tlv->type == req_type) {
2721*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2722*4882a593Smuzhiyun "Extended tlv type %d, length %d found\n",
2723*4882a593Smuzhiyun p_tlv->type, p_tlv->length);
2724*4882a593Smuzhiyun return p_tlv;
2725*4882a593Smuzhiyun }
2726*4882a593Smuzhiyun
2727*4882a593Smuzhiyun len += p_tlv->length;
2728*4882a593Smuzhiyun p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2729*4882a593Smuzhiyun
2730*4882a593Smuzhiyun if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2731*4882a593Smuzhiyun DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2732*4882a593Smuzhiyun return NULL;
2733*4882a593Smuzhiyun }
2734*4882a593Smuzhiyun } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2735*4882a593Smuzhiyun
2736*4882a593Smuzhiyun return NULL;
2737*4882a593Smuzhiyun }
2738*4882a593Smuzhiyun
2739*4882a593Smuzhiyun static void
qed_iov_vp_update_act_param(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2740*4882a593Smuzhiyun qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2741*4882a593Smuzhiyun struct qed_sp_vport_update_params *p_data,
2742*4882a593Smuzhiyun struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2743*4882a593Smuzhiyun {
2744*4882a593Smuzhiyun struct vfpf_vport_update_activate_tlv *p_act_tlv;
2745*4882a593Smuzhiyun u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2746*4882a593Smuzhiyun
2747*4882a593Smuzhiyun p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2748*4882a593Smuzhiyun qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2749*4882a593Smuzhiyun if (!p_act_tlv)
2750*4882a593Smuzhiyun return;
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2753*4882a593Smuzhiyun p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2754*4882a593Smuzhiyun p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2755*4882a593Smuzhiyun p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2756*4882a593Smuzhiyun *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2757*4882a593Smuzhiyun }
2758*4882a593Smuzhiyun
2759*4882a593Smuzhiyun static void
qed_iov_vp_update_vlan_param(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_vf_info * p_vf,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2760*4882a593Smuzhiyun qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2761*4882a593Smuzhiyun struct qed_sp_vport_update_params *p_data,
2762*4882a593Smuzhiyun struct qed_vf_info *p_vf,
2763*4882a593Smuzhiyun struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2764*4882a593Smuzhiyun {
2765*4882a593Smuzhiyun struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2766*4882a593Smuzhiyun u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2767*4882a593Smuzhiyun
2768*4882a593Smuzhiyun p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2769*4882a593Smuzhiyun qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2770*4882a593Smuzhiyun if (!p_vlan_tlv)
2771*4882a593Smuzhiyun return;
2772*4882a593Smuzhiyun
2773*4882a593Smuzhiyun p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2774*4882a593Smuzhiyun
2775*4882a593Smuzhiyun /* Ignore the VF request if we're forcing a vlan */
2776*4882a593Smuzhiyun if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
2777*4882a593Smuzhiyun p_data->update_inner_vlan_removal_flg = 1;
2778*4882a593Smuzhiyun p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2779*4882a593Smuzhiyun }
2780*4882a593Smuzhiyun
2781*4882a593Smuzhiyun *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2782*4882a593Smuzhiyun }
2783*4882a593Smuzhiyun
2784*4882a593Smuzhiyun static void
qed_iov_vp_update_tx_switch(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2785*4882a593Smuzhiyun qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2786*4882a593Smuzhiyun struct qed_sp_vport_update_params *p_data,
2787*4882a593Smuzhiyun struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2788*4882a593Smuzhiyun {
2789*4882a593Smuzhiyun struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2790*4882a593Smuzhiyun u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2791*4882a593Smuzhiyun
2792*4882a593Smuzhiyun p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2793*4882a593Smuzhiyun qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2794*4882a593Smuzhiyun tlv);
2795*4882a593Smuzhiyun if (!p_tx_switch_tlv)
2796*4882a593Smuzhiyun return;
2797*4882a593Smuzhiyun
2798*4882a593Smuzhiyun p_data->update_tx_switching_flg = 1;
2799*4882a593Smuzhiyun p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2800*4882a593Smuzhiyun *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2801*4882a593Smuzhiyun }
2802*4882a593Smuzhiyun
2803*4882a593Smuzhiyun static void
qed_iov_vp_update_mcast_bin_param(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2804*4882a593Smuzhiyun qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2805*4882a593Smuzhiyun struct qed_sp_vport_update_params *p_data,
2806*4882a593Smuzhiyun struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2807*4882a593Smuzhiyun {
2808*4882a593Smuzhiyun struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2809*4882a593Smuzhiyun u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2810*4882a593Smuzhiyun
2811*4882a593Smuzhiyun p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2812*4882a593Smuzhiyun qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2813*4882a593Smuzhiyun if (!p_mcast_tlv)
2814*4882a593Smuzhiyun return;
2815*4882a593Smuzhiyun
2816*4882a593Smuzhiyun p_data->update_approx_mcast_flg = 1;
2817*4882a593Smuzhiyun memcpy(p_data->bins, p_mcast_tlv->bins,
2818*4882a593Smuzhiyun sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2819*4882a593Smuzhiyun *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2820*4882a593Smuzhiyun }
2821*4882a593Smuzhiyun
2822*4882a593Smuzhiyun static void
qed_iov_vp_update_accept_flag(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2823*4882a593Smuzhiyun qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2824*4882a593Smuzhiyun struct qed_sp_vport_update_params *p_data,
2825*4882a593Smuzhiyun struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2826*4882a593Smuzhiyun {
2827*4882a593Smuzhiyun struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2828*4882a593Smuzhiyun struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2829*4882a593Smuzhiyun u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2830*4882a593Smuzhiyun
2831*4882a593Smuzhiyun p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2832*4882a593Smuzhiyun qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2833*4882a593Smuzhiyun if (!p_accept_tlv)
2834*4882a593Smuzhiyun return;
2835*4882a593Smuzhiyun
2836*4882a593Smuzhiyun p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2837*4882a593Smuzhiyun p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2838*4882a593Smuzhiyun p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2839*4882a593Smuzhiyun p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2840*4882a593Smuzhiyun *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2841*4882a593Smuzhiyun }
2842*4882a593Smuzhiyun
2843*4882a593Smuzhiyun static void
qed_iov_vp_update_accept_any_vlan(struct qed_hwfn * p_hwfn,struct qed_sp_vport_update_params * p_data,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2844*4882a593Smuzhiyun qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2845*4882a593Smuzhiyun struct qed_sp_vport_update_params *p_data,
2846*4882a593Smuzhiyun struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2847*4882a593Smuzhiyun {
2848*4882a593Smuzhiyun struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2849*4882a593Smuzhiyun u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2850*4882a593Smuzhiyun
2851*4882a593Smuzhiyun p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2852*4882a593Smuzhiyun qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2853*4882a593Smuzhiyun tlv);
2854*4882a593Smuzhiyun if (!p_accept_any_vlan)
2855*4882a593Smuzhiyun return;
2856*4882a593Smuzhiyun
2857*4882a593Smuzhiyun p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2858*4882a593Smuzhiyun p_data->update_accept_any_vlan_flg =
2859*4882a593Smuzhiyun p_accept_any_vlan->update_accept_any_vlan_flg;
2860*4882a593Smuzhiyun *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2861*4882a593Smuzhiyun }
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun static void
qed_iov_vp_update_rss_param(struct qed_hwfn * p_hwfn,struct qed_vf_info * vf,struct qed_sp_vport_update_params * p_data,struct qed_rss_params * p_rss,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask,u16 * tlvs_accepted)2864*4882a593Smuzhiyun qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2865*4882a593Smuzhiyun struct qed_vf_info *vf,
2866*4882a593Smuzhiyun struct qed_sp_vport_update_params *p_data,
2867*4882a593Smuzhiyun struct qed_rss_params *p_rss,
2868*4882a593Smuzhiyun struct qed_iov_vf_mbx *p_mbx,
2869*4882a593Smuzhiyun u16 *tlvs_mask, u16 *tlvs_accepted)
2870*4882a593Smuzhiyun {
2871*4882a593Smuzhiyun struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2872*4882a593Smuzhiyun u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2873*4882a593Smuzhiyun bool b_reject = false;
2874*4882a593Smuzhiyun u16 table_size;
2875*4882a593Smuzhiyun u16 i, q_idx;
2876*4882a593Smuzhiyun
2877*4882a593Smuzhiyun p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2878*4882a593Smuzhiyun qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2879*4882a593Smuzhiyun if (!p_rss_tlv) {
2880*4882a593Smuzhiyun p_data->rss_params = NULL;
2881*4882a593Smuzhiyun return;
2882*4882a593Smuzhiyun }
2883*4882a593Smuzhiyun
2884*4882a593Smuzhiyun memset(p_rss, 0, sizeof(struct qed_rss_params));
2885*4882a593Smuzhiyun
2886*4882a593Smuzhiyun p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2887*4882a593Smuzhiyun VFPF_UPDATE_RSS_CONFIG_FLAG);
2888*4882a593Smuzhiyun p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2889*4882a593Smuzhiyun VFPF_UPDATE_RSS_CAPS_FLAG);
2890*4882a593Smuzhiyun p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2891*4882a593Smuzhiyun VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2892*4882a593Smuzhiyun p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2893*4882a593Smuzhiyun VFPF_UPDATE_RSS_KEY_FLAG);
2894*4882a593Smuzhiyun
2895*4882a593Smuzhiyun p_rss->rss_enable = p_rss_tlv->rss_enable;
2896*4882a593Smuzhiyun p_rss->rss_eng_id = vf->relative_vf_id + 1;
2897*4882a593Smuzhiyun p_rss->rss_caps = p_rss_tlv->rss_caps;
2898*4882a593Smuzhiyun p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2899*4882a593Smuzhiyun memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2900*4882a593Smuzhiyun
2901*4882a593Smuzhiyun table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2902*4882a593Smuzhiyun (1 << p_rss_tlv->rss_table_size_log));
2903*4882a593Smuzhiyun
2904*4882a593Smuzhiyun for (i = 0; i < table_size; i++) {
2905*4882a593Smuzhiyun struct qed_queue_cid *p_cid;
2906*4882a593Smuzhiyun
2907*4882a593Smuzhiyun q_idx = p_rss_tlv->rss_ind_table[i];
2908*4882a593Smuzhiyun if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
2909*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_ENABLE)) {
2910*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
2911*4882a593Smuzhiyun QED_MSG_IOV,
2912*4882a593Smuzhiyun "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2913*4882a593Smuzhiyun vf->relative_vf_id, q_idx);
2914*4882a593Smuzhiyun b_reject = true;
2915*4882a593Smuzhiyun goto out;
2916*4882a593Smuzhiyun }
2917*4882a593Smuzhiyun
2918*4882a593Smuzhiyun p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
2919*4882a593Smuzhiyun p_rss->rss_ind_table[i] = p_cid;
2920*4882a593Smuzhiyun }
2921*4882a593Smuzhiyun
2922*4882a593Smuzhiyun p_data->rss_params = p_rss;
2923*4882a593Smuzhiyun out:
2924*4882a593Smuzhiyun *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2925*4882a593Smuzhiyun if (!b_reject)
2926*4882a593Smuzhiyun *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
2927*4882a593Smuzhiyun }
2928*4882a593Smuzhiyun
2929*4882a593Smuzhiyun static void
qed_iov_vp_update_sge_tpa_param(struct qed_hwfn * p_hwfn,struct qed_vf_info * vf,struct qed_sp_vport_update_params * p_data,struct qed_sge_tpa_params * p_sge_tpa,struct qed_iov_vf_mbx * p_mbx,u16 * tlvs_mask)2930*4882a593Smuzhiyun qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2931*4882a593Smuzhiyun struct qed_vf_info *vf,
2932*4882a593Smuzhiyun struct qed_sp_vport_update_params *p_data,
2933*4882a593Smuzhiyun struct qed_sge_tpa_params *p_sge_tpa,
2934*4882a593Smuzhiyun struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2935*4882a593Smuzhiyun {
2936*4882a593Smuzhiyun struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2937*4882a593Smuzhiyun u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2938*4882a593Smuzhiyun
2939*4882a593Smuzhiyun p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2940*4882a593Smuzhiyun qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2941*4882a593Smuzhiyun
2942*4882a593Smuzhiyun if (!p_sge_tpa_tlv) {
2943*4882a593Smuzhiyun p_data->sge_tpa_params = NULL;
2944*4882a593Smuzhiyun return;
2945*4882a593Smuzhiyun }
2946*4882a593Smuzhiyun
2947*4882a593Smuzhiyun memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2948*4882a593Smuzhiyun
2949*4882a593Smuzhiyun p_sge_tpa->update_tpa_en_flg =
2950*4882a593Smuzhiyun !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2951*4882a593Smuzhiyun p_sge_tpa->update_tpa_param_flg =
2952*4882a593Smuzhiyun !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2953*4882a593Smuzhiyun VFPF_UPDATE_TPA_PARAM_FLAG);
2954*4882a593Smuzhiyun
2955*4882a593Smuzhiyun p_sge_tpa->tpa_ipv4_en_flg =
2956*4882a593Smuzhiyun !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2957*4882a593Smuzhiyun p_sge_tpa->tpa_ipv6_en_flg =
2958*4882a593Smuzhiyun !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2959*4882a593Smuzhiyun p_sge_tpa->tpa_pkt_split_flg =
2960*4882a593Smuzhiyun !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2961*4882a593Smuzhiyun p_sge_tpa->tpa_hdr_data_split_flg =
2962*4882a593Smuzhiyun !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2963*4882a593Smuzhiyun p_sge_tpa->tpa_gro_consistent_flg =
2964*4882a593Smuzhiyun !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2965*4882a593Smuzhiyun
2966*4882a593Smuzhiyun p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2967*4882a593Smuzhiyun p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2968*4882a593Smuzhiyun p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2969*4882a593Smuzhiyun p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2970*4882a593Smuzhiyun p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2971*4882a593Smuzhiyun
2972*4882a593Smuzhiyun p_data->sge_tpa_params = p_sge_tpa;
2973*4882a593Smuzhiyun
2974*4882a593Smuzhiyun *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2975*4882a593Smuzhiyun }
2976*4882a593Smuzhiyun
qed_iov_pre_update_vport(struct qed_hwfn * hwfn,u8 vfid,struct qed_sp_vport_update_params * params,u16 * tlvs)2977*4882a593Smuzhiyun static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
2978*4882a593Smuzhiyun u8 vfid,
2979*4882a593Smuzhiyun struct qed_sp_vport_update_params *params,
2980*4882a593Smuzhiyun u16 *tlvs)
2981*4882a593Smuzhiyun {
2982*4882a593Smuzhiyun u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
2983*4882a593Smuzhiyun struct qed_filter_accept_flags *flags = ¶ms->accept_flags;
2984*4882a593Smuzhiyun struct qed_public_vf_info *vf_info;
2985*4882a593Smuzhiyun u16 tlv_mask;
2986*4882a593Smuzhiyun
2987*4882a593Smuzhiyun tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) |
2988*4882a593Smuzhiyun BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN);
2989*4882a593Smuzhiyun
2990*4882a593Smuzhiyun /* Untrusted VFs can't even be trusted to know that fact.
2991*4882a593Smuzhiyun * Simply indicate everything is configured fine, and trace
2992*4882a593Smuzhiyun * configuration 'behind their back'.
2993*4882a593Smuzhiyun */
2994*4882a593Smuzhiyun if (!(*tlvs & tlv_mask))
2995*4882a593Smuzhiyun return 0;
2996*4882a593Smuzhiyun
2997*4882a593Smuzhiyun vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun if (flags->update_rx_mode_config) {
3000*4882a593Smuzhiyun vf_info->rx_accept_mode = flags->rx_accept_filter;
3001*4882a593Smuzhiyun if (!vf_info->is_trusted_configured)
3002*4882a593Smuzhiyun flags->rx_accept_filter &= ~mask;
3003*4882a593Smuzhiyun }
3004*4882a593Smuzhiyun
3005*4882a593Smuzhiyun if (flags->update_tx_mode_config) {
3006*4882a593Smuzhiyun vf_info->tx_accept_mode = flags->tx_accept_filter;
3007*4882a593Smuzhiyun if (!vf_info->is_trusted_configured)
3008*4882a593Smuzhiyun flags->tx_accept_filter &= ~mask;
3009*4882a593Smuzhiyun }
3010*4882a593Smuzhiyun
3011*4882a593Smuzhiyun if (params->update_accept_any_vlan_flg) {
3012*4882a593Smuzhiyun vf_info->accept_any_vlan = params->accept_any_vlan;
3013*4882a593Smuzhiyun
3014*4882a593Smuzhiyun if (vf_info->forced_vlan && !vf_info->is_trusted_configured)
3015*4882a593Smuzhiyun params->accept_any_vlan = false;
3016*4882a593Smuzhiyun }
3017*4882a593Smuzhiyun
3018*4882a593Smuzhiyun return 0;
3019*4882a593Smuzhiyun }
3020*4882a593Smuzhiyun
qed_iov_vf_mbx_vport_update(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)3021*4882a593Smuzhiyun static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
3022*4882a593Smuzhiyun struct qed_ptt *p_ptt,
3023*4882a593Smuzhiyun struct qed_vf_info *vf)
3024*4882a593Smuzhiyun {
3025*4882a593Smuzhiyun struct qed_rss_params *p_rss_params = NULL;
3026*4882a593Smuzhiyun struct qed_sp_vport_update_params params;
3027*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3028*4882a593Smuzhiyun struct qed_sge_tpa_params sge_tpa_params;
3029*4882a593Smuzhiyun u16 tlvs_mask = 0, tlvs_accepted = 0;
3030*4882a593Smuzhiyun u8 status = PFVF_STATUS_SUCCESS;
3031*4882a593Smuzhiyun u16 length;
3032*4882a593Smuzhiyun int rc;
3033*4882a593Smuzhiyun
3034*4882a593Smuzhiyun /* Valiate PF can send such a request */
3035*4882a593Smuzhiyun if (!vf->vport_instance) {
3036*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
3037*4882a593Smuzhiyun QED_MSG_IOV,
3038*4882a593Smuzhiyun "No VPORT instance available for VF[%d], failing vport update\n",
3039*4882a593Smuzhiyun vf->abs_vf_id);
3040*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
3041*4882a593Smuzhiyun goto out;
3042*4882a593Smuzhiyun }
3043*4882a593Smuzhiyun p_rss_params = vzalloc(sizeof(*p_rss_params));
3044*4882a593Smuzhiyun if (p_rss_params == NULL) {
3045*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
3046*4882a593Smuzhiyun goto out;
3047*4882a593Smuzhiyun }
3048*4882a593Smuzhiyun
3049*4882a593Smuzhiyun memset(¶ms, 0, sizeof(params));
3050*4882a593Smuzhiyun params.opaque_fid = vf->opaque_fid;
3051*4882a593Smuzhiyun params.vport_id = vf->vport_id;
3052*4882a593Smuzhiyun params.rss_params = NULL;
3053*4882a593Smuzhiyun
3054*4882a593Smuzhiyun /* Search for extended tlvs list and update values
3055*4882a593Smuzhiyun * from VF in struct qed_sp_vport_update_params.
3056*4882a593Smuzhiyun */
3057*4882a593Smuzhiyun qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3058*4882a593Smuzhiyun qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3059*4882a593Smuzhiyun qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3060*4882a593Smuzhiyun qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3061*4882a593Smuzhiyun qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3062*4882a593Smuzhiyun qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3063*4882a593Smuzhiyun qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
3064*4882a593Smuzhiyun &sge_tpa_params, mbx, &tlvs_mask);
3065*4882a593Smuzhiyun
3066*4882a593Smuzhiyun tlvs_accepted = tlvs_mask;
3067*4882a593Smuzhiyun
3068*4882a593Smuzhiyun /* Some of the extended TLVs need to be validated first; In that case,
3069*4882a593Smuzhiyun * they can update the mask without updating the accepted [so that
3070*4882a593Smuzhiyun * PF could communicate to VF it has rejected request].
3071*4882a593Smuzhiyun */
3072*4882a593Smuzhiyun qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3073*4882a593Smuzhiyun mbx, &tlvs_mask, &tlvs_accepted);
3074*4882a593Smuzhiyun
3075*4882a593Smuzhiyun if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
3076*4882a593Smuzhiyun ¶ms, &tlvs_accepted)) {
3077*4882a593Smuzhiyun tlvs_accepted = 0;
3078*4882a593Smuzhiyun status = PFVF_STATUS_NOT_SUPPORTED;
3079*4882a593Smuzhiyun goto out;
3080*4882a593Smuzhiyun }
3081*4882a593Smuzhiyun
3082*4882a593Smuzhiyun if (!tlvs_accepted) {
3083*4882a593Smuzhiyun if (tlvs_mask)
3084*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3085*4882a593Smuzhiyun "Upper-layer prevents VF vport configuration\n");
3086*4882a593Smuzhiyun else
3087*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3088*4882a593Smuzhiyun "No feature tlvs found for vport update\n");
3089*4882a593Smuzhiyun status = PFVF_STATUS_NOT_SUPPORTED;
3090*4882a593Smuzhiyun goto out;
3091*4882a593Smuzhiyun }
3092*4882a593Smuzhiyun
3093*4882a593Smuzhiyun rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL);
3094*4882a593Smuzhiyun
3095*4882a593Smuzhiyun if (rc)
3096*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
3097*4882a593Smuzhiyun
3098*4882a593Smuzhiyun out:
3099*4882a593Smuzhiyun vfree(p_rss_params);
3100*4882a593Smuzhiyun length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3101*4882a593Smuzhiyun tlvs_mask, tlvs_accepted);
3102*4882a593Smuzhiyun qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3103*4882a593Smuzhiyun }
3104*4882a593Smuzhiyun
qed_iov_vf_update_vlan_shadow(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_filter_ucast * p_params)3105*4882a593Smuzhiyun static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
3106*4882a593Smuzhiyun struct qed_vf_info *p_vf,
3107*4882a593Smuzhiyun struct qed_filter_ucast *p_params)
3108*4882a593Smuzhiyun {
3109*4882a593Smuzhiyun int i;
3110*4882a593Smuzhiyun
3111*4882a593Smuzhiyun /* First remove entries and then add new ones */
3112*4882a593Smuzhiyun if (p_params->opcode == QED_FILTER_REMOVE) {
3113*4882a593Smuzhiyun for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3114*4882a593Smuzhiyun if (p_vf->shadow_config.vlans[i].used &&
3115*4882a593Smuzhiyun p_vf->shadow_config.vlans[i].vid ==
3116*4882a593Smuzhiyun p_params->vlan) {
3117*4882a593Smuzhiyun p_vf->shadow_config.vlans[i].used = false;
3118*4882a593Smuzhiyun break;
3119*4882a593Smuzhiyun }
3120*4882a593Smuzhiyun if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3121*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
3122*4882a593Smuzhiyun QED_MSG_IOV,
3123*4882a593Smuzhiyun "VF [%d] - Tries to remove a non-existing vlan\n",
3124*4882a593Smuzhiyun p_vf->relative_vf_id);
3125*4882a593Smuzhiyun return -EINVAL;
3126*4882a593Smuzhiyun }
3127*4882a593Smuzhiyun } else if (p_params->opcode == QED_FILTER_REPLACE ||
3128*4882a593Smuzhiyun p_params->opcode == QED_FILTER_FLUSH) {
3129*4882a593Smuzhiyun for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3130*4882a593Smuzhiyun p_vf->shadow_config.vlans[i].used = false;
3131*4882a593Smuzhiyun }
3132*4882a593Smuzhiyun
3133*4882a593Smuzhiyun /* In forced mode, we're willing to remove entries - but we don't add
3134*4882a593Smuzhiyun * new ones.
3135*4882a593Smuzhiyun */
3136*4882a593Smuzhiyun if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
3137*4882a593Smuzhiyun return 0;
3138*4882a593Smuzhiyun
3139*4882a593Smuzhiyun if (p_params->opcode == QED_FILTER_ADD ||
3140*4882a593Smuzhiyun p_params->opcode == QED_FILTER_REPLACE) {
3141*4882a593Smuzhiyun for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3142*4882a593Smuzhiyun if (p_vf->shadow_config.vlans[i].used)
3143*4882a593Smuzhiyun continue;
3144*4882a593Smuzhiyun
3145*4882a593Smuzhiyun p_vf->shadow_config.vlans[i].used = true;
3146*4882a593Smuzhiyun p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3147*4882a593Smuzhiyun break;
3148*4882a593Smuzhiyun }
3149*4882a593Smuzhiyun
3150*4882a593Smuzhiyun if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3151*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
3152*4882a593Smuzhiyun QED_MSG_IOV,
3153*4882a593Smuzhiyun "VF [%d] - Tries to configure more than %d vlan filters\n",
3154*4882a593Smuzhiyun p_vf->relative_vf_id,
3155*4882a593Smuzhiyun QED_ETH_VF_NUM_VLAN_FILTERS + 1);
3156*4882a593Smuzhiyun return -EINVAL;
3157*4882a593Smuzhiyun }
3158*4882a593Smuzhiyun }
3159*4882a593Smuzhiyun
3160*4882a593Smuzhiyun return 0;
3161*4882a593Smuzhiyun }
3162*4882a593Smuzhiyun
qed_iov_vf_update_mac_shadow(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_filter_ucast * p_params)3163*4882a593Smuzhiyun static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
3164*4882a593Smuzhiyun struct qed_vf_info *p_vf,
3165*4882a593Smuzhiyun struct qed_filter_ucast *p_params)
3166*4882a593Smuzhiyun {
3167*4882a593Smuzhiyun int i;
3168*4882a593Smuzhiyun
3169*4882a593Smuzhiyun /* If we're in forced-mode, we don't allow any change */
3170*4882a593Smuzhiyun if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
3171*4882a593Smuzhiyun return 0;
3172*4882a593Smuzhiyun
3173*4882a593Smuzhiyun /* Don't keep track of shadow copy since we don't intend to restore. */
3174*4882a593Smuzhiyun if (p_vf->p_vf_info.is_trusted_configured)
3175*4882a593Smuzhiyun return 0;
3176*4882a593Smuzhiyun
3177*4882a593Smuzhiyun /* First remove entries and then add new ones */
3178*4882a593Smuzhiyun if (p_params->opcode == QED_FILTER_REMOVE) {
3179*4882a593Smuzhiyun for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3180*4882a593Smuzhiyun if (ether_addr_equal(p_vf->shadow_config.macs[i],
3181*4882a593Smuzhiyun p_params->mac)) {
3182*4882a593Smuzhiyun eth_zero_addr(p_vf->shadow_config.macs[i]);
3183*4882a593Smuzhiyun break;
3184*4882a593Smuzhiyun }
3185*4882a593Smuzhiyun }
3186*4882a593Smuzhiyun
3187*4882a593Smuzhiyun if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3188*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3189*4882a593Smuzhiyun "MAC isn't configured\n");
3190*4882a593Smuzhiyun return -EINVAL;
3191*4882a593Smuzhiyun }
3192*4882a593Smuzhiyun } else if (p_params->opcode == QED_FILTER_REPLACE ||
3193*4882a593Smuzhiyun p_params->opcode == QED_FILTER_FLUSH) {
3194*4882a593Smuzhiyun for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
3195*4882a593Smuzhiyun eth_zero_addr(p_vf->shadow_config.macs[i]);
3196*4882a593Smuzhiyun }
3197*4882a593Smuzhiyun
3198*4882a593Smuzhiyun /* List the new MAC address */
3199*4882a593Smuzhiyun if (p_params->opcode != QED_FILTER_ADD &&
3200*4882a593Smuzhiyun p_params->opcode != QED_FILTER_REPLACE)
3201*4882a593Smuzhiyun return 0;
3202*4882a593Smuzhiyun
3203*4882a593Smuzhiyun for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3204*4882a593Smuzhiyun if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
3205*4882a593Smuzhiyun ether_addr_copy(p_vf->shadow_config.macs[i],
3206*4882a593Smuzhiyun p_params->mac);
3207*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3208*4882a593Smuzhiyun "Added MAC at %d entry in shadow\n", i);
3209*4882a593Smuzhiyun break;
3210*4882a593Smuzhiyun }
3211*4882a593Smuzhiyun }
3212*4882a593Smuzhiyun
3213*4882a593Smuzhiyun if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3214*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
3215*4882a593Smuzhiyun return -EINVAL;
3216*4882a593Smuzhiyun }
3217*4882a593Smuzhiyun
3218*4882a593Smuzhiyun return 0;
3219*4882a593Smuzhiyun }
3220*4882a593Smuzhiyun
3221*4882a593Smuzhiyun static int
qed_iov_vf_update_unicast_shadow(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_filter_ucast * p_params)3222*4882a593Smuzhiyun qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
3223*4882a593Smuzhiyun struct qed_vf_info *p_vf,
3224*4882a593Smuzhiyun struct qed_filter_ucast *p_params)
3225*4882a593Smuzhiyun {
3226*4882a593Smuzhiyun int rc = 0;
3227*4882a593Smuzhiyun
3228*4882a593Smuzhiyun if (p_params->type == QED_FILTER_MAC) {
3229*4882a593Smuzhiyun rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3230*4882a593Smuzhiyun if (rc)
3231*4882a593Smuzhiyun return rc;
3232*4882a593Smuzhiyun }
3233*4882a593Smuzhiyun
3234*4882a593Smuzhiyun if (p_params->type == QED_FILTER_VLAN)
3235*4882a593Smuzhiyun rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3236*4882a593Smuzhiyun
3237*4882a593Smuzhiyun return rc;
3238*4882a593Smuzhiyun }
3239*4882a593Smuzhiyun
qed_iov_chk_ucast(struct qed_hwfn * hwfn,int vfid,struct qed_filter_ucast * params)3240*4882a593Smuzhiyun static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
3241*4882a593Smuzhiyun int vfid, struct qed_filter_ucast *params)
3242*4882a593Smuzhiyun {
3243*4882a593Smuzhiyun struct qed_public_vf_info *vf;
3244*4882a593Smuzhiyun
3245*4882a593Smuzhiyun vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
3246*4882a593Smuzhiyun if (!vf)
3247*4882a593Smuzhiyun return -EINVAL;
3248*4882a593Smuzhiyun
3249*4882a593Smuzhiyun /* No real decision to make; Store the configured MAC */
3250*4882a593Smuzhiyun if (params->type == QED_FILTER_MAC ||
3251*4882a593Smuzhiyun params->type == QED_FILTER_MAC_VLAN) {
3252*4882a593Smuzhiyun ether_addr_copy(vf->mac, params->mac);
3253*4882a593Smuzhiyun
3254*4882a593Smuzhiyun if (vf->is_trusted_configured) {
3255*4882a593Smuzhiyun qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid);
3256*4882a593Smuzhiyun
3257*4882a593Smuzhiyun /* Update and post bulleitin again */
3258*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3259*4882a593Smuzhiyun }
3260*4882a593Smuzhiyun }
3261*4882a593Smuzhiyun
3262*4882a593Smuzhiyun return 0;
3263*4882a593Smuzhiyun }
3264*4882a593Smuzhiyun
qed_iov_vf_mbx_ucast_filter(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)3265*4882a593Smuzhiyun static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
3266*4882a593Smuzhiyun struct qed_ptt *p_ptt,
3267*4882a593Smuzhiyun struct qed_vf_info *vf)
3268*4882a593Smuzhiyun {
3269*4882a593Smuzhiyun struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3270*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3271*4882a593Smuzhiyun struct vfpf_ucast_filter_tlv *req;
3272*4882a593Smuzhiyun u8 status = PFVF_STATUS_SUCCESS;
3273*4882a593Smuzhiyun struct qed_filter_ucast params;
3274*4882a593Smuzhiyun int rc;
3275*4882a593Smuzhiyun
3276*4882a593Smuzhiyun /* Prepare the unicast filter params */
3277*4882a593Smuzhiyun memset(¶ms, 0, sizeof(struct qed_filter_ucast));
3278*4882a593Smuzhiyun req = &mbx->req_virt->ucast_filter;
3279*4882a593Smuzhiyun params.opcode = (enum qed_filter_opcode)req->opcode;
3280*4882a593Smuzhiyun params.type = (enum qed_filter_ucast_type)req->type;
3281*4882a593Smuzhiyun
3282*4882a593Smuzhiyun params.is_rx_filter = 1;
3283*4882a593Smuzhiyun params.is_tx_filter = 1;
3284*4882a593Smuzhiyun params.vport_to_remove_from = vf->vport_id;
3285*4882a593Smuzhiyun params.vport_to_add_to = vf->vport_id;
3286*4882a593Smuzhiyun memcpy(params.mac, req->mac, ETH_ALEN);
3287*4882a593Smuzhiyun params.vlan = req->vlan;
3288*4882a593Smuzhiyun
3289*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
3290*4882a593Smuzhiyun QED_MSG_IOV,
3291*4882a593Smuzhiyun "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %pM, vlan 0x%04x\n",
3292*4882a593Smuzhiyun vf->abs_vf_id, params.opcode, params.type,
3293*4882a593Smuzhiyun params.is_rx_filter ? "RX" : "",
3294*4882a593Smuzhiyun params.is_tx_filter ? "TX" : "",
3295*4882a593Smuzhiyun params.vport_to_add_to,
3296*4882a593Smuzhiyun params.mac, params.vlan);
3297*4882a593Smuzhiyun
3298*4882a593Smuzhiyun if (!vf->vport_instance) {
3299*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
3300*4882a593Smuzhiyun QED_MSG_IOV,
3301*4882a593Smuzhiyun "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3302*4882a593Smuzhiyun vf->abs_vf_id);
3303*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
3304*4882a593Smuzhiyun goto out;
3305*4882a593Smuzhiyun }
3306*4882a593Smuzhiyun
3307*4882a593Smuzhiyun /* Update shadow copy of the VF configuration */
3308*4882a593Smuzhiyun if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) {
3309*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
3310*4882a593Smuzhiyun goto out;
3311*4882a593Smuzhiyun }
3312*4882a593Smuzhiyun
3313*4882a593Smuzhiyun /* Determine if the unicast filtering is acceptible by PF */
3314*4882a593Smuzhiyun if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
3315*4882a593Smuzhiyun (params.type == QED_FILTER_VLAN ||
3316*4882a593Smuzhiyun params.type == QED_FILTER_MAC_VLAN)) {
3317*4882a593Smuzhiyun /* Once VLAN is forced or PVID is set, do not allow
3318*4882a593Smuzhiyun * to add/replace any further VLANs.
3319*4882a593Smuzhiyun */
3320*4882a593Smuzhiyun if (params.opcode == QED_FILTER_ADD ||
3321*4882a593Smuzhiyun params.opcode == QED_FILTER_REPLACE)
3322*4882a593Smuzhiyun status = PFVF_STATUS_FORCED;
3323*4882a593Smuzhiyun goto out;
3324*4882a593Smuzhiyun }
3325*4882a593Smuzhiyun
3326*4882a593Smuzhiyun if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
3327*4882a593Smuzhiyun (params.type == QED_FILTER_MAC ||
3328*4882a593Smuzhiyun params.type == QED_FILTER_MAC_VLAN)) {
3329*4882a593Smuzhiyun if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
3330*4882a593Smuzhiyun (params.opcode != QED_FILTER_ADD &&
3331*4882a593Smuzhiyun params.opcode != QED_FILTER_REPLACE))
3332*4882a593Smuzhiyun status = PFVF_STATUS_FORCED;
3333*4882a593Smuzhiyun goto out;
3334*4882a593Smuzhiyun }
3335*4882a593Smuzhiyun
3336*4882a593Smuzhiyun rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms);
3337*4882a593Smuzhiyun if (rc) {
3338*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
3339*4882a593Smuzhiyun goto out;
3340*4882a593Smuzhiyun }
3341*4882a593Smuzhiyun
3342*4882a593Smuzhiyun rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3343*4882a593Smuzhiyun QED_SPQ_MODE_CB, NULL);
3344*4882a593Smuzhiyun if (rc)
3345*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
3346*4882a593Smuzhiyun
3347*4882a593Smuzhiyun out:
3348*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3349*4882a593Smuzhiyun sizeof(struct pfvf_def_resp_tlv), status);
3350*4882a593Smuzhiyun }
3351*4882a593Smuzhiyun
qed_iov_vf_mbx_int_cleanup(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)3352*4882a593Smuzhiyun static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
3353*4882a593Smuzhiyun struct qed_ptt *p_ptt,
3354*4882a593Smuzhiyun struct qed_vf_info *vf)
3355*4882a593Smuzhiyun {
3356*4882a593Smuzhiyun int i;
3357*4882a593Smuzhiyun
3358*4882a593Smuzhiyun /* Reset the SBs */
3359*4882a593Smuzhiyun for (i = 0; i < vf->num_sbs; i++)
3360*4882a593Smuzhiyun qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3361*4882a593Smuzhiyun vf->igu_sbs[i],
3362*4882a593Smuzhiyun vf->opaque_fid, false);
3363*4882a593Smuzhiyun
3364*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3365*4882a593Smuzhiyun sizeof(struct pfvf_def_resp_tlv),
3366*4882a593Smuzhiyun PFVF_STATUS_SUCCESS);
3367*4882a593Smuzhiyun }
3368*4882a593Smuzhiyun
qed_iov_vf_mbx_close(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)3369*4882a593Smuzhiyun static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
3370*4882a593Smuzhiyun struct qed_ptt *p_ptt, struct qed_vf_info *vf)
3371*4882a593Smuzhiyun {
3372*4882a593Smuzhiyun u16 length = sizeof(struct pfvf_def_resp_tlv);
3373*4882a593Smuzhiyun u8 status = PFVF_STATUS_SUCCESS;
3374*4882a593Smuzhiyun
3375*4882a593Smuzhiyun /* Disable Interrupts for VF */
3376*4882a593Smuzhiyun qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3377*4882a593Smuzhiyun
3378*4882a593Smuzhiyun /* Reset Permission table */
3379*4882a593Smuzhiyun qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3380*4882a593Smuzhiyun
3381*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3382*4882a593Smuzhiyun length, status);
3383*4882a593Smuzhiyun }
3384*4882a593Smuzhiyun
qed_iov_vf_mbx_release(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf)3385*4882a593Smuzhiyun static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
3386*4882a593Smuzhiyun struct qed_ptt *p_ptt,
3387*4882a593Smuzhiyun struct qed_vf_info *p_vf)
3388*4882a593Smuzhiyun {
3389*4882a593Smuzhiyun u16 length = sizeof(struct pfvf_def_resp_tlv);
3390*4882a593Smuzhiyun u8 status = PFVF_STATUS_SUCCESS;
3391*4882a593Smuzhiyun int rc = 0;
3392*4882a593Smuzhiyun
3393*4882a593Smuzhiyun qed_iov_vf_cleanup(p_hwfn, p_vf);
3394*4882a593Smuzhiyun
3395*4882a593Smuzhiyun if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3396*4882a593Smuzhiyun /* Stopping the VF */
3397*4882a593Smuzhiyun rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3398*4882a593Smuzhiyun p_vf->opaque_fid);
3399*4882a593Smuzhiyun
3400*4882a593Smuzhiyun if (rc) {
3401*4882a593Smuzhiyun DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
3402*4882a593Smuzhiyun rc);
3403*4882a593Smuzhiyun status = PFVF_STATUS_FAILURE;
3404*4882a593Smuzhiyun }
3405*4882a593Smuzhiyun
3406*4882a593Smuzhiyun p_vf->state = VF_STOPPED;
3407*4882a593Smuzhiyun }
3408*4882a593Smuzhiyun
3409*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3410*4882a593Smuzhiyun length, status);
3411*4882a593Smuzhiyun }
3412*4882a593Smuzhiyun
qed_iov_vf_pf_get_coalesce(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf)3413*4882a593Smuzhiyun static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
3414*4882a593Smuzhiyun struct qed_ptt *p_ptt,
3415*4882a593Smuzhiyun struct qed_vf_info *p_vf)
3416*4882a593Smuzhiyun {
3417*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3418*4882a593Smuzhiyun struct pfvf_read_coal_resp_tlv *p_resp;
3419*4882a593Smuzhiyun struct vfpf_read_coal_req_tlv *req;
3420*4882a593Smuzhiyun u8 status = PFVF_STATUS_FAILURE;
3421*4882a593Smuzhiyun struct qed_vf_queue *p_queue;
3422*4882a593Smuzhiyun struct qed_queue_cid *p_cid;
3423*4882a593Smuzhiyun u16 coal = 0, qid, i;
3424*4882a593Smuzhiyun bool b_is_rx;
3425*4882a593Smuzhiyun int rc = 0;
3426*4882a593Smuzhiyun
3427*4882a593Smuzhiyun mbx->offset = (u8 *)mbx->reply_virt;
3428*4882a593Smuzhiyun req = &mbx->req_virt->read_coal_req;
3429*4882a593Smuzhiyun
3430*4882a593Smuzhiyun qid = req->qid;
3431*4882a593Smuzhiyun b_is_rx = req->is_rx ? true : false;
3432*4882a593Smuzhiyun
3433*4882a593Smuzhiyun if (b_is_rx) {
3434*4882a593Smuzhiyun if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
3435*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_ENABLE)) {
3436*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3437*4882a593Smuzhiyun "VF[%d]: Invalid Rx queue_id = %d\n",
3438*4882a593Smuzhiyun p_vf->abs_vf_id, qid);
3439*4882a593Smuzhiyun goto send_resp;
3440*4882a593Smuzhiyun }
3441*4882a593Smuzhiyun
3442*4882a593Smuzhiyun p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3443*4882a593Smuzhiyun rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3444*4882a593Smuzhiyun if (rc)
3445*4882a593Smuzhiyun goto send_resp;
3446*4882a593Smuzhiyun } else {
3447*4882a593Smuzhiyun if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
3448*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_ENABLE)) {
3449*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3450*4882a593Smuzhiyun "VF[%d]: Invalid Tx queue_id = %d\n",
3451*4882a593Smuzhiyun p_vf->abs_vf_id, qid);
3452*4882a593Smuzhiyun goto send_resp;
3453*4882a593Smuzhiyun }
3454*4882a593Smuzhiyun for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3455*4882a593Smuzhiyun p_queue = &p_vf->vf_queues[qid];
3456*4882a593Smuzhiyun if ((!p_queue->cids[i].p_cid) ||
3457*4882a593Smuzhiyun (!p_queue->cids[i].b_is_tx))
3458*4882a593Smuzhiyun continue;
3459*4882a593Smuzhiyun
3460*4882a593Smuzhiyun p_cid = p_queue->cids[i].p_cid;
3461*4882a593Smuzhiyun
3462*4882a593Smuzhiyun rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3463*4882a593Smuzhiyun if (rc)
3464*4882a593Smuzhiyun goto send_resp;
3465*4882a593Smuzhiyun break;
3466*4882a593Smuzhiyun }
3467*4882a593Smuzhiyun }
3468*4882a593Smuzhiyun
3469*4882a593Smuzhiyun status = PFVF_STATUS_SUCCESS;
3470*4882a593Smuzhiyun
3471*4882a593Smuzhiyun send_resp:
3472*4882a593Smuzhiyun p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
3473*4882a593Smuzhiyun sizeof(*p_resp));
3474*4882a593Smuzhiyun p_resp->coal = coal;
3475*4882a593Smuzhiyun
3476*4882a593Smuzhiyun qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
3477*4882a593Smuzhiyun sizeof(struct channel_list_end_tlv));
3478*4882a593Smuzhiyun
3479*4882a593Smuzhiyun qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3480*4882a593Smuzhiyun }
3481*4882a593Smuzhiyun
qed_iov_vf_pf_set_coalesce(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * vf)3482*4882a593Smuzhiyun static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
3483*4882a593Smuzhiyun struct qed_ptt *p_ptt,
3484*4882a593Smuzhiyun struct qed_vf_info *vf)
3485*4882a593Smuzhiyun {
3486*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3487*4882a593Smuzhiyun struct vfpf_update_coalesce *req;
3488*4882a593Smuzhiyun u8 status = PFVF_STATUS_FAILURE;
3489*4882a593Smuzhiyun struct qed_queue_cid *p_cid;
3490*4882a593Smuzhiyun u16 rx_coal, tx_coal;
3491*4882a593Smuzhiyun int rc = 0, i;
3492*4882a593Smuzhiyun u16 qid;
3493*4882a593Smuzhiyun
3494*4882a593Smuzhiyun req = &mbx->req_virt->update_coalesce;
3495*4882a593Smuzhiyun
3496*4882a593Smuzhiyun rx_coal = req->rx_coal;
3497*4882a593Smuzhiyun tx_coal = req->tx_coal;
3498*4882a593Smuzhiyun qid = req->qid;
3499*4882a593Smuzhiyun
3500*4882a593Smuzhiyun if (!qed_iov_validate_rxq(p_hwfn, vf, qid,
3501*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) {
3502*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3503*4882a593Smuzhiyun "VF[%d]: Invalid Rx queue_id = %d\n",
3504*4882a593Smuzhiyun vf->abs_vf_id, qid);
3505*4882a593Smuzhiyun goto out;
3506*4882a593Smuzhiyun }
3507*4882a593Smuzhiyun
3508*4882a593Smuzhiyun if (!qed_iov_validate_txq(p_hwfn, vf, qid,
3509*4882a593Smuzhiyun QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) {
3510*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3511*4882a593Smuzhiyun "VF[%d]: Invalid Tx queue_id = %d\n",
3512*4882a593Smuzhiyun vf->abs_vf_id, qid);
3513*4882a593Smuzhiyun goto out;
3514*4882a593Smuzhiyun }
3515*4882a593Smuzhiyun
3516*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
3517*4882a593Smuzhiyun QED_MSG_IOV,
3518*4882a593Smuzhiyun "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3519*4882a593Smuzhiyun vf->abs_vf_id, rx_coal, tx_coal, qid);
3520*4882a593Smuzhiyun
3521*4882a593Smuzhiyun if (rx_coal) {
3522*4882a593Smuzhiyun p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3523*4882a593Smuzhiyun
3524*4882a593Smuzhiyun rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3525*4882a593Smuzhiyun if (rc) {
3526*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
3527*4882a593Smuzhiyun QED_MSG_IOV,
3528*4882a593Smuzhiyun "VF[%d]: Unable to set rx queue = %d coalesce\n",
3529*4882a593Smuzhiyun vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3530*4882a593Smuzhiyun goto out;
3531*4882a593Smuzhiyun }
3532*4882a593Smuzhiyun vf->rx_coal = rx_coal;
3533*4882a593Smuzhiyun }
3534*4882a593Smuzhiyun
3535*4882a593Smuzhiyun if (tx_coal) {
3536*4882a593Smuzhiyun struct qed_vf_queue *p_queue = &vf->vf_queues[qid];
3537*4882a593Smuzhiyun
3538*4882a593Smuzhiyun for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3539*4882a593Smuzhiyun if (!p_queue->cids[i].p_cid)
3540*4882a593Smuzhiyun continue;
3541*4882a593Smuzhiyun
3542*4882a593Smuzhiyun if (!p_queue->cids[i].b_is_tx)
3543*4882a593Smuzhiyun continue;
3544*4882a593Smuzhiyun
3545*4882a593Smuzhiyun rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3546*4882a593Smuzhiyun p_queue->cids[i].p_cid);
3547*4882a593Smuzhiyun
3548*4882a593Smuzhiyun if (rc) {
3549*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
3550*4882a593Smuzhiyun QED_MSG_IOV,
3551*4882a593Smuzhiyun "VF[%d]: Unable to set tx queue coalesce\n",
3552*4882a593Smuzhiyun vf->abs_vf_id);
3553*4882a593Smuzhiyun goto out;
3554*4882a593Smuzhiyun }
3555*4882a593Smuzhiyun }
3556*4882a593Smuzhiyun vf->tx_coal = tx_coal;
3557*4882a593Smuzhiyun }
3558*4882a593Smuzhiyun
3559*4882a593Smuzhiyun status = PFVF_STATUS_SUCCESS;
3560*4882a593Smuzhiyun out:
3561*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3562*4882a593Smuzhiyun sizeof(struct pfvf_def_resp_tlv), status);
3563*4882a593Smuzhiyun }
3564*4882a593Smuzhiyun static int
qed_iov_vf_flr_poll_dorq(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_ptt * p_ptt)3565*4882a593Smuzhiyun qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
3566*4882a593Smuzhiyun struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3567*4882a593Smuzhiyun {
3568*4882a593Smuzhiyun int cnt;
3569*4882a593Smuzhiyun u32 val;
3570*4882a593Smuzhiyun
3571*4882a593Smuzhiyun qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
3572*4882a593Smuzhiyun
3573*4882a593Smuzhiyun for (cnt = 0; cnt < 50; cnt++) {
3574*4882a593Smuzhiyun val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3575*4882a593Smuzhiyun if (!val)
3576*4882a593Smuzhiyun break;
3577*4882a593Smuzhiyun msleep(20);
3578*4882a593Smuzhiyun }
3579*4882a593Smuzhiyun qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
3580*4882a593Smuzhiyun
3581*4882a593Smuzhiyun if (cnt == 50) {
3582*4882a593Smuzhiyun DP_ERR(p_hwfn,
3583*4882a593Smuzhiyun "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3584*4882a593Smuzhiyun p_vf->abs_vf_id, val);
3585*4882a593Smuzhiyun return -EBUSY;
3586*4882a593Smuzhiyun }
3587*4882a593Smuzhiyun
3588*4882a593Smuzhiyun return 0;
3589*4882a593Smuzhiyun }
3590*4882a593Smuzhiyun
3591*4882a593Smuzhiyun static int
qed_iov_vf_flr_poll_pbf(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_ptt * p_ptt)3592*4882a593Smuzhiyun qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
3593*4882a593Smuzhiyun struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3594*4882a593Smuzhiyun {
3595*4882a593Smuzhiyun u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3596*4882a593Smuzhiyun int i, cnt;
3597*4882a593Smuzhiyun
3598*4882a593Smuzhiyun /* Read initial consumers & producers */
3599*4882a593Smuzhiyun for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3600*4882a593Smuzhiyun u32 prod;
3601*4882a593Smuzhiyun
3602*4882a593Smuzhiyun cons[i] = qed_rd(p_hwfn, p_ptt,
3603*4882a593Smuzhiyun PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3604*4882a593Smuzhiyun i * 0x40);
3605*4882a593Smuzhiyun prod = qed_rd(p_hwfn, p_ptt,
3606*4882a593Smuzhiyun PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3607*4882a593Smuzhiyun i * 0x40);
3608*4882a593Smuzhiyun distance[i] = prod - cons[i];
3609*4882a593Smuzhiyun }
3610*4882a593Smuzhiyun
3611*4882a593Smuzhiyun /* Wait for consumers to pass the producers */
3612*4882a593Smuzhiyun i = 0;
3613*4882a593Smuzhiyun for (cnt = 0; cnt < 50; cnt++) {
3614*4882a593Smuzhiyun for (; i < MAX_NUM_VOQS_E4; i++) {
3615*4882a593Smuzhiyun u32 tmp;
3616*4882a593Smuzhiyun
3617*4882a593Smuzhiyun tmp = qed_rd(p_hwfn, p_ptt,
3618*4882a593Smuzhiyun PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3619*4882a593Smuzhiyun i * 0x40);
3620*4882a593Smuzhiyun if (distance[i] > tmp - cons[i])
3621*4882a593Smuzhiyun break;
3622*4882a593Smuzhiyun }
3623*4882a593Smuzhiyun
3624*4882a593Smuzhiyun if (i == MAX_NUM_VOQS_E4)
3625*4882a593Smuzhiyun break;
3626*4882a593Smuzhiyun
3627*4882a593Smuzhiyun msleep(20);
3628*4882a593Smuzhiyun }
3629*4882a593Smuzhiyun
3630*4882a593Smuzhiyun if (cnt == 50) {
3631*4882a593Smuzhiyun DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3632*4882a593Smuzhiyun p_vf->abs_vf_id, i);
3633*4882a593Smuzhiyun return -EBUSY;
3634*4882a593Smuzhiyun }
3635*4882a593Smuzhiyun
3636*4882a593Smuzhiyun return 0;
3637*4882a593Smuzhiyun }
3638*4882a593Smuzhiyun
qed_iov_vf_flr_poll(struct qed_hwfn * p_hwfn,struct qed_vf_info * p_vf,struct qed_ptt * p_ptt)3639*4882a593Smuzhiyun static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
3640*4882a593Smuzhiyun struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3641*4882a593Smuzhiyun {
3642*4882a593Smuzhiyun int rc;
3643*4882a593Smuzhiyun
3644*4882a593Smuzhiyun rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3645*4882a593Smuzhiyun if (rc)
3646*4882a593Smuzhiyun return rc;
3647*4882a593Smuzhiyun
3648*4882a593Smuzhiyun rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3649*4882a593Smuzhiyun if (rc)
3650*4882a593Smuzhiyun return rc;
3651*4882a593Smuzhiyun
3652*4882a593Smuzhiyun return 0;
3653*4882a593Smuzhiyun }
3654*4882a593Smuzhiyun
3655*4882a593Smuzhiyun static int
qed_iov_execute_vf_flr_cleanup(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 rel_vf_id,u32 * ack_vfs)3656*4882a593Smuzhiyun qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
3657*4882a593Smuzhiyun struct qed_ptt *p_ptt,
3658*4882a593Smuzhiyun u16 rel_vf_id, u32 *ack_vfs)
3659*4882a593Smuzhiyun {
3660*4882a593Smuzhiyun struct qed_vf_info *p_vf;
3661*4882a593Smuzhiyun int rc = 0;
3662*4882a593Smuzhiyun
3663*4882a593Smuzhiyun p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3664*4882a593Smuzhiyun if (!p_vf)
3665*4882a593Smuzhiyun return 0;
3666*4882a593Smuzhiyun
3667*4882a593Smuzhiyun if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3668*4882a593Smuzhiyun (1ULL << (rel_vf_id % 64))) {
3669*4882a593Smuzhiyun u16 vfid = p_vf->abs_vf_id;
3670*4882a593Smuzhiyun
3671*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3672*4882a593Smuzhiyun "VF[%d] - Handling FLR\n", vfid);
3673*4882a593Smuzhiyun
3674*4882a593Smuzhiyun qed_iov_vf_cleanup(p_hwfn, p_vf);
3675*4882a593Smuzhiyun
3676*4882a593Smuzhiyun /* If VF isn't active, no need for anything but SW */
3677*4882a593Smuzhiyun if (!p_vf->b_init)
3678*4882a593Smuzhiyun goto cleanup;
3679*4882a593Smuzhiyun
3680*4882a593Smuzhiyun rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3681*4882a593Smuzhiyun if (rc)
3682*4882a593Smuzhiyun goto cleanup;
3683*4882a593Smuzhiyun
3684*4882a593Smuzhiyun rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
3685*4882a593Smuzhiyun if (rc) {
3686*4882a593Smuzhiyun DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3687*4882a593Smuzhiyun return rc;
3688*4882a593Smuzhiyun }
3689*4882a593Smuzhiyun
3690*4882a593Smuzhiyun /* Workaround to make VF-PF channel ready, as FW
3691*4882a593Smuzhiyun * doesn't do that as a part of FLR.
3692*4882a593Smuzhiyun */
3693*4882a593Smuzhiyun REG_WR(p_hwfn,
3694*4882a593Smuzhiyun GTT_BAR0_MAP_REG_USDM_RAM +
3695*4882a593Smuzhiyun USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3696*4882a593Smuzhiyun
3697*4882a593Smuzhiyun /* VF_STOPPED has to be set only after final cleanup
3698*4882a593Smuzhiyun * but prior to re-enabling the VF.
3699*4882a593Smuzhiyun */
3700*4882a593Smuzhiyun p_vf->state = VF_STOPPED;
3701*4882a593Smuzhiyun
3702*4882a593Smuzhiyun rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3703*4882a593Smuzhiyun if (rc) {
3704*4882a593Smuzhiyun DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3705*4882a593Smuzhiyun vfid);
3706*4882a593Smuzhiyun return rc;
3707*4882a593Smuzhiyun }
3708*4882a593Smuzhiyun cleanup:
3709*4882a593Smuzhiyun /* Mark VF for ack and clean pending state */
3710*4882a593Smuzhiyun if (p_vf->state == VF_RESET)
3711*4882a593Smuzhiyun p_vf->state = VF_STOPPED;
3712*4882a593Smuzhiyun ack_vfs[vfid / 32] |= BIT((vfid % 32));
3713*4882a593Smuzhiyun p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3714*4882a593Smuzhiyun ~(1ULL << (rel_vf_id % 64));
3715*4882a593Smuzhiyun p_vf->vf_mbx.b_pending_msg = false;
3716*4882a593Smuzhiyun }
3717*4882a593Smuzhiyun
3718*4882a593Smuzhiyun return rc;
3719*4882a593Smuzhiyun }
3720*4882a593Smuzhiyun
3721*4882a593Smuzhiyun static int
qed_iov_vf_flr_cleanup(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3722*4882a593Smuzhiyun qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3723*4882a593Smuzhiyun {
3724*4882a593Smuzhiyun u32 ack_vfs[VF_MAX_STATIC / 32];
3725*4882a593Smuzhiyun int rc = 0;
3726*4882a593Smuzhiyun u16 i;
3727*4882a593Smuzhiyun
3728*4882a593Smuzhiyun memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3729*4882a593Smuzhiyun
3730*4882a593Smuzhiyun /* Since BRB <-> PRS interface can't be tested as part of the flr
3731*4882a593Smuzhiyun * polling due to HW limitations, simply sleep a bit. And since
3732*4882a593Smuzhiyun * there's no need to wait per-vf, do it before looping.
3733*4882a593Smuzhiyun */
3734*4882a593Smuzhiyun msleep(100);
3735*4882a593Smuzhiyun
3736*4882a593Smuzhiyun for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
3737*4882a593Smuzhiyun qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3738*4882a593Smuzhiyun
3739*4882a593Smuzhiyun rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3740*4882a593Smuzhiyun return rc;
3741*4882a593Smuzhiyun }
3742*4882a593Smuzhiyun
qed_iov_mark_vf_flr(struct qed_hwfn * p_hwfn,u32 * p_disabled_vfs)3743*4882a593Smuzhiyun bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
3744*4882a593Smuzhiyun {
3745*4882a593Smuzhiyun bool found = false;
3746*4882a593Smuzhiyun u16 i;
3747*4882a593Smuzhiyun
3748*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
3749*4882a593Smuzhiyun for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3750*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3751*4882a593Smuzhiyun "[%08x,...,%08x]: %08x\n",
3752*4882a593Smuzhiyun i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3753*4882a593Smuzhiyun
3754*4882a593Smuzhiyun if (!p_hwfn->cdev->p_iov_info) {
3755*4882a593Smuzhiyun DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
3756*4882a593Smuzhiyun return false;
3757*4882a593Smuzhiyun }
3758*4882a593Smuzhiyun
3759*4882a593Smuzhiyun /* Mark VFs */
3760*4882a593Smuzhiyun for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
3761*4882a593Smuzhiyun struct qed_vf_info *p_vf;
3762*4882a593Smuzhiyun u8 vfid;
3763*4882a593Smuzhiyun
3764*4882a593Smuzhiyun p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
3765*4882a593Smuzhiyun if (!p_vf)
3766*4882a593Smuzhiyun continue;
3767*4882a593Smuzhiyun
3768*4882a593Smuzhiyun vfid = p_vf->abs_vf_id;
3769*4882a593Smuzhiyun if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3770*4882a593Smuzhiyun u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3771*4882a593Smuzhiyun u16 rel_vf_id = p_vf->relative_vf_id;
3772*4882a593Smuzhiyun
3773*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3774*4882a593Smuzhiyun "VF[%d] [rel %d] got FLR-ed\n",
3775*4882a593Smuzhiyun vfid, rel_vf_id);
3776*4882a593Smuzhiyun
3777*4882a593Smuzhiyun p_vf->state = VF_RESET;
3778*4882a593Smuzhiyun
3779*4882a593Smuzhiyun /* No need to lock here, since pending_flr should
3780*4882a593Smuzhiyun * only change here and before ACKing MFw. Since
3781*4882a593Smuzhiyun * MFW will not trigger an additional attention for
3782*4882a593Smuzhiyun * VF flr until ACKs, we're safe.
3783*4882a593Smuzhiyun */
3784*4882a593Smuzhiyun p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3785*4882a593Smuzhiyun found = true;
3786*4882a593Smuzhiyun }
3787*4882a593Smuzhiyun }
3788*4882a593Smuzhiyun
3789*4882a593Smuzhiyun return found;
3790*4882a593Smuzhiyun }
3791*4882a593Smuzhiyun
qed_iov_get_link(struct qed_hwfn * p_hwfn,u16 vfid,struct qed_mcp_link_params * p_params,struct qed_mcp_link_state * p_link,struct qed_mcp_link_capabilities * p_caps)3792*4882a593Smuzhiyun static int qed_iov_get_link(struct qed_hwfn *p_hwfn,
3793*4882a593Smuzhiyun u16 vfid,
3794*4882a593Smuzhiyun struct qed_mcp_link_params *p_params,
3795*4882a593Smuzhiyun struct qed_mcp_link_state *p_link,
3796*4882a593Smuzhiyun struct qed_mcp_link_capabilities *p_caps)
3797*4882a593Smuzhiyun {
3798*4882a593Smuzhiyun struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
3799*4882a593Smuzhiyun vfid,
3800*4882a593Smuzhiyun false);
3801*4882a593Smuzhiyun struct qed_bulletin_content *p_bulletin;
3802*4882a593Smuzhiyun
3803*4882a593Smuzhiyun if (!p_vf)
3804*4882a593Smuzhiyun return -EINVAL;
3805*4882a593Smuzhiyun
3806*4882a593Smuzhiyun p_bulletin = p_vf->bulletin.p_virt;
3807*4882a593Smuzhiyun
3808*4882a593Smuzhiyun if (p_params)
3809*4882a593Smuzhiyun __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3810*4882a593Smuzhiyun if (p_link)
3811*4882a593Smuzhiyun __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3812*4882a593Smuzhiyun if (p_caps)
3813*4882a593Smuzhiyun __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3814*4882a593Smuzhiyun return 0;
3815*4882a593Smuzhiyun }
3816*4882a593Smuzhiyun
3817*4882a593Smuzhiyun static int
qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_vf_info * p_vf)3818*4882a593Smuzhiyun qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
3819*4882a593Smuzhiyun struct qed_ptt *p_ptt,
3820*4882a593Smuzhiyun struct qed_vf_info *p_vf)
3821*4882a593Smuzhiyun {
3822*4882a593Smuzhiyun struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt;
3823*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3824*4882a593Smuzhiyun struct vfpf_bulletin_update_mac_tlv *p_req;
3825*4882a593Smuzhiyun u8 status = PFVF_STATUS_SUCCESS;
3826*4882a593Smuzhiyun int rc = 0;
3827*4882a593Smuzhiyun
3828*4882a593Smuzhiyun if (!p_vf->p_vf_info.is_trusted_configured) {
3829*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
3830*4882a593Smuzhiyun QED_MSG_IOV,
3831*4882a593Smuzhiyun "Blocking bulletin update request from untrusted VF[%d]\n",
3832*4882a593Smuzhiyun p_vf->abs_vf_id);
3833*4882a593Smuzhiyun status = PFVF_STATUS_NOT_SUPPORTED;
3834*4882a593Smuzhiyun rc = -EINVAL;
3835*4882a593Smuzhiyun goto send_status;
3836*4882a593Smuzhiyun }
3837*4882a593Smuzhiyun
3838*4882a593Smuzhiyun p_req = &mbx->req_virt->bulletin_update_mac;
3839*4882a593Smuzhiyun ether_addr_copy(p_bulletin->mac, p_req->mac);
3840*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3841*4882a593Smuzhiyun "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
3842*4882a593Smuzhiyun p_vf->abs_vf_id, p_req->mac);
3843*4882a593Smuzhiyun
3844*4882a593Smuzhiyun send_status:
3845*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3846*4882a593Smuzhiyun CHANNEL_TLV_BULLETIN_UPDATE_MAC,
3847*4882a593Smuzhiyun sizeof(struct pfvf_def_resp_tlv), status);
3848*4882a593Smuzhiyun return rc;
3849*4882a593Smuzhiyun }
3850*4882a593Smuzhiyun
qed_iov_process_mbx_req(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,int vfid)3851*4882a593Smuzhiyun static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3852*4882a593Smuzhiyun struct qed_ptt *p_ptt, int vfid)
3853*4882a593Smuzhiyun {
3854*4882a593Smuzhiyun struct qed_iov_vf_mbx *mbx;
3855*4882a593Smuzhiyun struct qed_vf_info *p_vf;
3856*4882a593Smuzhiyun
3857*4882a593Smuzhiyun p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3858*4882a593Smuzhiyun if (!p_vf)
3859*4882a593Smuzhiyun return;
3860*4882a593Smuzhiyun
3861*4882a593Smuzhiyun mbx = &p_vf->vf_mbx;
3862*4882a593Smuzhiyun
3863*4882a593Smuzhiyun /* qed_iov_process_mbx_request */
3864*4882a593Smuzhiyun if (!mbx->b_pending_msg) {
3865*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
3866*4882a593Smuzhiyun "VF[%02x]: Trying to process mailbox message when none is pending\n",
3867*4882a593Smuzhiyun p_vf->abs_vf_id);
3868*4882a593Smuzhiyun return;
3869*4882a593Smuzhiyun }
3870*4882a593Smuzhiyun mbx->b_pending_msg = false;
3871*4882a593Smuzhiyun
3872*4882a593Smuzhiyun mbx->first_tlv = mbx->req_virt->first_tlv;
3873*4882a593Smuzhiyun
3874*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3875*4882a593Smuzhiyun "VF[%02x]: Processing mailbox message [type %04x]\n",
3876*4882a593Smuzhiyun p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3877*4882a593Smuzhiyun
3878*4882a593Smuzhiyun /* check if tlv type is known */
3879*4882a593Smuzhiyun if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3880*4882a593Smuzhiyun !p_vf->b_malicious) {
3881*4882a593Smuzhiyun switch (mbx->first_tlv.tl.type) {
3882*4882a593Smuzhiyun case CHANNEL_TLV_ACQUIRE:
3883*4882a593Smuzhiyun qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3884*4882a593Smuzhiyun break;
3885*4882a593Smuzhiyun case CHANNEL_TLV_VPORT_START:
3886*4882a593Smuzhiyun qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3887*4882a593Smuzhiyun break;
3888*4882a593Smuzhiyun case CHANNEL_TLV_VPORT_TEARDOWN:
3889*4882a593Smuzhiyun qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3890*4882a593Smuzhiyun break;
3891*4882a593Smuzhiyun case CHANNEL_TLV_START_RXQ:
3892*4882a593Smuzhiyun qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3893*4882a593Smuzhiyun break;
3894*4882a593Smuzhiyun case CHANNEL_TLV_START_TXQ:
3895*4882a593Smuzhiyun qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3896*4882a593Smuzhiyun break;
3897*4882a593Smuzhiyun case CHANNEL_TLV_STOP_RXQS:
3898*4882a593Smuzhiyun qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3899*4882a593Smuzhiyun break;
3900*4882a593Smuzhiyun case CHANNEL_TLV_STOP_TXQS:
3901*4882a593Smuzhiyun qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3902*4882a593Smuzhiyun break;
3903*4882a593Smuzhiyun case CHANNEL_TLV_UPDATE_RXQ:
3904*4882a593Smuzhiyun qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3905*4882a593Smuzhiyun break;
3906*4882a593Smuzhiyun case CHANNEL_TLV_VPORT_UPDATE:
3907*4882a593Smuzhiyun qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3908*4882a593Smuzhiyun break;
3909*4882a593Smuzhiyun case CHANNEL_TLV_UCAST_FILTER:
3910*4882a593Smuzhiyun qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3911*4882a593Smuzhiyun break;
3912*4882a593Smuzhiyun case CHANNEL_TLV_CLOSE:
3913*4882a593Smuzhiyun qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3914*4882a593Smuzhiyun break;
3915*4882a593Smuzhiyun case CHANNEL_TLV_INT_CLEANUP:
3916*4882a593Smuzhiyun qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3917*4882a593Smuzhiyun break;
3918*4882a593Smuzhiyun case CHANNEL_TLV_RELEASE:
3919*4882a593Smuzhiyun qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3920*4882a593Smuzhiyun break;
3921*4882a593Smuzhiyun case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3922*4882a593Smuzhiyun qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3923*4882a593Smuzhiyun break;
3924*4882a593Smuzhiyun case CHANNEL_TLV_COALESCE_UPDATE:
3925*4882a593Smuzhiyun qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3926*4882a593Smuzhiyun break;
3927*4882a593Smuzhiyun case CHANNEL_TLV_COALESCE_READ:
3928*4882a593Smuzhiyun qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
3929*4882a593Smuzhiyun break;
3930*4882a593Smuzhiyun case CHANNEL_TLV_BULLETIN_UPDATE_MAC:
3931*4882a593Smuzhiyun qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf);
3932*4882a593Smuzhiyun break;
3933*4882a593Smuzhiyun }
3934*4882a593Smuzhiyun } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3935*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3936*4882a593Smuzhiyun "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3937*4882a593Smuzhiyun p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3938*4882a593Smuzhiyun
3939*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3940*4882a593Smuzhiyun mbx->first_tlv.tl.type,
3941*4882a593Smuzhiyun sizeof(struct pfvf_def_resp_tlv),
3942*4882a593Smuzhiyun PFVF_STATUS_MALICIOUS);
3943*4882a593Smuzhiyun } else {
3944*4882a593Smuzhiyun /* unknown TLV - this may belong to a VF driver from the future
3945*4882a593Smuzhiyun * - a version written after this PF driver was written, which
3946*4882a593Smuzhiyun * supports features unknown as of yet. Too bad since we don't
3947*4882a593Smuzhiyun * support them. Or this may be because someone wrote a crappy
3948*4882a593Smuzhiyun * VF driver and is sending garbage over the channel.
3949*4882a593Smuzhiyun */
3950*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
3951*4882a593Smuzhiyun "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3952*4882a593Smuzhiyun p_vf->abs_vf_id,
3953*4882a593Smuzhiyun mbx->first_tlv.tl.type,
3954*4882a593Smuzhiyun mbx->first_tlv.tl.length,
3955*4882a593Smuzhiyun mbx->first_tlv.padding, mbx->first_tlv.reply_address);
3956*4882a593Smuzhiyun
3957*4882a593Smuzhiyun /* Try replying in case reply address matches the acquisition's
3958*4882a593Smuzhiyun * posted address.
3959*4882a593Smuzhiyun */
3960*4882a593Smuzhiyun if (p_vf->acquire.first_tlv.reply_address &&
3961*4882a593Smuzhiyun (mbx->first_tlv.reply_address ==
3962*4882a593Smuzhiyun p_vf->acquire.first_tlv.reply_address)) {
3963*4882a593Smuzhiyun qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3964*4882a593Smuzhiyun mbx->first_tlv.tl.type,
3965*4882a593Smuzhiyun sizeof(struct pfvf_def_resp_tlv),
3966*4882a593Smuzhiyun PFVF_STATUS_NOT_SUPPORTED);
3967*4882a593Smuzhiyun } else {
3968*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
3969*4882a593Smuzhiyun QED_MSG_IOV,
3970*4882a593Smuzhiyun "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3971*4882a593Smuzhiyun p_vf->abs_vf_id);
3972*4882a593Smuzhiyun }
3973*4882a593Smuzhiyun }
3974*4882a593Smuzhiyun }
3975*4882a593Smuzhiyun
qed_iov_pf_get_pending_events(struct qed_hwfn * p_hwfn,u64 * events)3976*4882a593Smuzhiyun static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
3977*4882a593Smuzhiyun {
3978*4882a593Smuzhiyun int i;
3979*4882a593Smuzhiyun
3980*4882a593Smuzhiyun memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3981*4882a593Smuzhiyun
3982*4882a593Smuzhiyun qed_for_each_vf(p_hwfn, i) {
3983*4882a593Smuzhiyun struct qed_vf_info *p_vf;
3984*4882a593Smuzhiyun
3985*4882a593Smuzhiyun p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
3986*4882a593Smuzhiyun if (p_vf->vf_mbx.b_pending_msg)
3987*4882a593Smuzhiyun events[i / 64] |= 1ULL << (i % 64);
3988*4882a593Smuzhiyun }
3989*4882a593Smuzhiyun }
3990*4882a593Smuzhiyun
qed_sriov_get_vf_from_absid(struct qed_hwfn * p_hwfn,u16 abs_vfid)3991*4882a593Smuzhiyun static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
3992*4882a593Smuzhiyun u16 abs_vfid)
3993*4882a593Smuzhiyun {
3994*4882a593Smuzhiyun u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
3995*4882a593Smuzhiyun
3996*4882a593Smuzhiyun if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3997*4882a593Smuzhiyun DP_VERBOSE(p_hwfn,
3998*4882a593Smuzhiyun QED_MSG_IOV,
3999*4882a593Smuzhiyun "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
4000*4882a593Smuzhiyun abs_vfid);
4001*4882a593Smuzhiyun return NULL;
4002*4882a593Smuzhiyun }
4003*4882a593Smuzhiyun
4004*4882a593Smuzhiyun return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
4005*4882a593Smuzhiyun }
4006*4882a593Smuzhiyun
qed_sriov_vfpf_msg(struct qed_hwfn * p_hwfn,u16 abs_vfid,struct regpair * vf_msg)4007*4882a593Smuzhiyun static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
4008*4882a593Smuzhiyun u16 abs_vfid, struct regpair *vf_msg)
4009*4882a593Smuzhiyun {
4010*4882a593Smuzhiyun struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
4011*4882a593Smuzhiyun abs_vfid);
4012*4882a593Smuzhiyun
4013*4882a593Smuzhiyun if (!p_vf)
4014*4882a593Smuzhiyun return 0;
4015*4882a593Smuzhiyun
4016*4882a593Smuzhiyun /* List the physical address of the request so that handler
4017*4882a593Smuzhiyun * could later on copy the message from it.
4018*4882a593Smuzhiyun */
4019*4882a593Smuzhiyun p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo);
4020*4882a593Smuzhiyun
4021*4882a593Smuzhiyun /* Mark the event and schedule the workqueue */
4022*4882a593Smuzhiyun p_vf->vf_mbx.b_pending_msg = true;
4023*4882a593Smuzhiyun qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
4024*4882a593Smuzhiyun
4025*4882a593Smuzhiyun return 0;
4026*4882a593Smuzhiyun }
4027*4882a593Smuzhiyun
qed_sriov_vfpf_malicious(struct qed_hwfn * p_hwfn,struct malicious_vf_eqe_data * p_data)4028*4882a593Smuzhiyun static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
4029*4882a593Smuzhiyun struct malicious_vf_eqe_data *p_data)
4030*4882a593Smuzhiyun {
4031*4882a593Smuzhiyun struct qed_vf_info *p_vf;
4032*4882a593Smuzhiyun
4033*4882a593Smuzhiyun p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4034*4882a593Smuzhiyun
4035*4882a593Smuzhiyun if (!p_vf)
4036*4882a593Smuzhiyun return;
4037*4882a593Smuzhiyun
4038*4882a593Smuzhiyun if (!p_vf->b_malicious) {
4039*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
4040*4882a593Smuzhiyun "VF [%d] - Malicious behavior [%02x]\n",
4041*4882a593Smuzhiyun p_vf->abs_vf_id, p_data->err_id);
4042*4882a593Smuzhiyun
4043*4882a593Smuzhiyun p_vf->b_malicious = true;
4044*4882a593Smuzhiyun } else {
4045*4882a593Smuzhiyun DP_INFO(p_hwfn,
4046*4882a593Smuzhiyun "VF [%d] - Malicious behavior [%02x]\n",
4047*4882a593Smuzhiyun p_vf->abs_vf_id, p_data->err_id);
4048*4882a593Smuzhiyun }
4049*4882a593Smuzhiyun }
4050*4882a593Smuzhiyun
qed_sriov_eqe_event(struct qed_hwfn * p_hwfn,u8 opcode,__le16 echo,union event_ring_data * data,u8 fw_return_code)4051*4882a593Smuzhiyun static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
4052*4882a593Smuzhiyun union event_ring_data *data, u8 fw_return_code)
4053*4882a593Smuzhiyun {
4054*4882a593Smuzhiyun switch (opcode) {
4055*4882a593Smuzhiyun case COMMON_EVENT_VF_PF_CHANNEL:
4056*4882a593Smuzhiyun return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
4057*4882a593Smuzhiyun &data->vf_pf_channel.msg_addr);
4058*4882a593Smuzhiyun case COMMON_EVENT_MALICIOUS_VF:
4059*4882a593Smuzhiyun qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4060*4882a593Smuzhiyun return 0;
4061*4882a593Smuzhiyun default:
4062*4882a593Smuzhiyun DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
4063*4882a593Smuzhiyun opcode);
4064*4882a593Smuzhiyun return -EINVAL;
4065*4882a593Smuzhiyun }
4066*4882a593Smuzhiyun }
4067*4882a593Smuzhiyun
qed_iov_get_next_active_vf(struct qed_hwfn * p_hwfn,u16 rel_vf_id)4068*4882a593Smuzhiyun u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4069*4882a593Smuzhiyun {
4070*4882a593Smuzhiyun struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
4071*4882a593Smuzhiyun u16 i;
4072*4882a593Smuzhiyun
4073*4882a593Smuzhiyun if (!p_iov)
4074*4882a593Smuzhiyun goto out;
4075*4882a593Smuzhiyun
4076*4882a593Smuzhiyun for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4077*4882a593Smuzhiyun if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4078*4882a593Smuzhiyun return i;
4079*4882a593Smuzhiyun
4080*4882a593Smuzhiyun out:
4081*4882a593Smuzhiyun return MAX_NUM_VFS;
4082*4882a593Smuzhiyun }
4083*4882a593Smuzhiyun
qed_iov_copy_vf_msg(struct qed_hwfn * p_hwfn,struct qed_ptt * ptt,int vfid)4084*4882a593Smuzhiyun static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
4085*4882a593Smuzhiyun int vfid)
4086*4882a593Smuzhiyun {
4087*4882a593Smuzhiyun struct qed_dmae_params params;
4088*4882a593Smuzhiyun struct qed_vf_info *vf_info;
4089*4882a593Smuzhiyun
4090*4882a593Smuzhiyun vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4091*4882a593Smuzhiyun if (!vf_info)
4092*4882a593Smuzhiyun return -EINVAL;
4093*4882a593Smuzhiyun
4094*4882a593Smuzhiyun memset(¶ms, 0, sizeof(params));
4095*4882a593Smuzhiyun SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1);
4096*4882a593Smuzhiyun SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1);
4097*4882a593Smuzhiyun params.src_vfid = vf_info->abs_vf_id;
4098*4882a593Smuzhiyun
4099*4882a593Smuzhiyun if (qed_dmae_host2host(p_hwfn, ptt,
4100*4882a593Smuzhiyun vf_info->vf_mbx.pending_req,
4101*4882a593Smuzhiyun vf_info->vf_mbx.req_phys,
4102*4882a593Smuzhiyun sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4103*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4104*4882a593Smuzhiyun "Failed to copy message from VF 0x%02x\n", vfid);
4105*4882a593Smuzhiyun
4106*4882a593Smuzhiyun return -EIO;
4107*4882a593Smuzhiyun }
4108*4882a593Smuzhiyun
4109*4882a593Smuzhiyun return 0;
4110*4882a593Smuzhiyun }
4111*4882a593Smuzhiyun
qed_iov_bulletin_set_forced_mac(struct qed_hwfn * p_hwfn,u8 * mac,int vfid)4112*4882a593Smuzhiyun static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
4113*4882a593Smuzhiyun u8 *mac, int vfid)
4114*4882a593Smuzhiyun {
4115*4882a593Smuzhiyun struct qed_vf_info *vf_info;
4116*4882a593Smuzhiyun u64 feature;
4117*4882a593Smuzhiyun
4118*4882a593Smuzhiyun vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4119*4882a593Smuzhiyun if (!vf_info) {
4120*4882a593Smuzhiyun DP_NOTICE(p_hwfn->cdev,
4121*4882a593Smuzhiyun "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4122*4882a593Smuzhiyun return;
4123*4882a593Smuzhiyun }
4124*4882a593Smuzhiyun
4125*4882a593Smuzhiyun if (vf_info->b_malicious) {
4126*4882a593Smuzhiyun DP_NOTICE(p_hwfn->cdev,
4127*4882a593Smuzhiyun "Can't set forced MAC to malicious VF [%d]\n", vfid);
4128*4882a593Smuzhiyun return;
4129*4882a593Smuzhiyun }
4130*4882a593Smuzhiyun
4131*4882a593Smuzhiyun if (vf_info->p_vf_info.is_trusted_configured) {
4132*4882a593Smuzhiyun feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4133*4882a593Smuzhiyun /* Trust mode will disable Forced MAC */
4134*4882a593Smuzhiyun vf_info->bulletin.p_virt->valid_bitmap &=
4135*4882a593Smuzhiyun ~BIT(MAC_ADDR_FORCED);
4136*4882a593Smuzhiyun } else {
4137*4882a593Smuzhiyun feature = BIT(MAC_ADDR_FORCED);
4138*4882a593Smuzhiyun /* Forced MAC will disable MAC_ADDR */
4139*4882a593Smuzhiyun vf_info->bulletin.p_virt->valid_bitmap &=
4140*4882a593Smuzhiyun ~BIT(VFPF_BULLETIN_MAC_ADDR);
4141*4882a593Smuzhiyun }
4142*4882a593Smuzhiyun
4143*4882a593Smuzhiyun memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4144*4882a593Smuzhiyun
4145*4882a593Smuzhiyun vf_info->bulletin.p_virt->valid_bitmap |= feature;
4146*4882a593Smuzhiyun
4147*4882a593Smuzhiyun qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4148*4882a593Smuzhiyun }
4149*4882a593Smuzhiyun
qed_iov_bulletin_set_mac(struct qed_hwfn * p_hwfn,u8 * mac,int vfid)4150*4882a593Smuzhiyun static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid)
4151*4882a593Smuzhiyun {
4152*4882a593Smuzhiyun struct qed_vf_info *vf_info;
4153*4882a593Smuzhiyun u64 feature;
4154*4882a593Smuzhiyun
4155*4882a593Smuzhiyun vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4156*4882a593Smuzhiyun if (!vf_info) {
4157*4882a593Smuzhiyun DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n",
4158*4882a593Smuzhiyun vfid);
4159*4882a593Smuzhiyun return -EINVAL;
4160*4882a593Smuzhiyun }
4161*4882a593Smuzhiyun
4162*4882a593Smuzhiyun if (vf_info->b_malicious) {
4163*4882a593Smuzhiyun DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n",
4164*4882a593Smuzhiyun vfid);
4165*4882a593Smuzhiyun return -EINVAL;
4166*4882a593Smuzhiyun }
4167*4882a593Smuzhiyun
4168*4882a593Smuzhiyun if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) {
4169*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4170*4882a593Smuzhiyun "Can not set MAC, Forced MAC is configured\n");
4171*4882a593Smuzhiyun return -EINVAL;
4172*4882a593Smuzhiyun }
4173*4882a593Smuzhiyun
4174*4882a593Smuzhiyun feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4175*4882a593Smuzhiyun ether_addr_copy(vf_info->bulletin.p_virt->mac, mac);
4176*4882a593Smuzhiyun
4177*4882a593Smuzhiyun vf_info->bulletin.p_virt->valid_bitmap |= feature;
4178*4882a593Smuzhiyun
4179*4882a593Smuzhiyun if (vf_info->p_vf_info.is_trusted_configured)
4180*4882a593Smuzhiyun qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4181*4882a593Smuzhiyun
4182*4882a593Smuzhiyun return 0;
4183*4882a593Smuzhiyun }
4184*4882a593Smuzhiyun
qed_iov_bulletin_set_forced_vlan(struct qed_hwfn * p_hwfn,u16 pvid,int vfid)4185*4882a593Smuzhiyun static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
4186*4882a593Smuzhiyun u16 pvid, int vfid)
4187*4882a593Smuzhiyun {
4188*4882a593Smuzhiyun struct qed_vf_info *vf_info;
4189*4882a593Smuzhiyun u64 feature;
4190*4882a593Smuzhiyun
4191*4882a593Smuzhiyun vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4192*4882a593Smuzhiyun if (!vf_info) {
4193*4882a593Smuzhiyun DP_NOTICE(p_hwfn->cdev,
4194*4882a593Smuzhiyun "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4195*4882a593Smuzhiyun return;
4196*4882a593Smuzhiyun }
4197*4882a593Smuzhiyun
4198*4882a593Smuzhiyun if (vf_info->b_malicious) {
4199*4882a593Smuzhiyun DP_NOTICE(p_hwfn->cdev,
4200*4882a593Smuzhiyun "Can't set forced vlan to malicious VF [%d]\n", vfid);
4201*4882a593Smuzhiyun return;
4202*4882a593Smuzhiyun }
4203*4882a593Smuzhiyun
4204*4882a593Smuzhiyun feature = 1 << VLAN_ADDR_FORCED;
4205*4882a593Smuzhiyun vf_info->bulletin.p_virt->pvid = pvid;
4206*4882a593Smuzhiyun if (pvid)
4207*4882a593Smuzhiyun vf_info->bulletin.p_virt->valid_bitmap |= feature;
4208*4882a593Smuzhiyun else
4209*4882a593Smuzhiyun vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4210*4882a593Smuzhiyun
4211*4882a593Smuzhiyun qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4212*4882a593Smuzhiyun }
4213*4882a593Smuzhiyun
qed_iov_bulletin_set_udp_ports(struct qed_hwfn * p_hwfn,int vfid,u16 vxlan_port,u16 geneve_port)4214*4882a593Smuzhiyun void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
4215*4882a593Smuzhiyun int vfid, u16 vxlan_port, u16 geneve_port)
4216*4882a593Smuzhiyun {
4217*4882a593Smuzhiyun struct qed_vf_info *vf_info;
4218*4882a593Smuzhiyun
4219*4882a593Smuzhiyun vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4220*4882a593Smuzhiyun if (!vf_info) {
4221*4882a593Smuzhiyun DP_NOTICE(p_hwfn->cdev,
4222*4882a593Smuzhiyun "Can not set udp ports, invalid vfid [%d]\n", vfid);
4223*4882a593Smuzhiyun return;
4224*4882a593Smuzhiyun }
4225*4882a593Smuzhiyun
4226*4882a593Smuzhiyun if (vf_info->b_malicious) {
4227*4882a593Smuzhiyun DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4228*4882a593Smuzhiyun "Can not set udp ports to malicious VF [%d]\n",
4229*4882a593Smuzhiyun vfid);
4230*4882a593Smuzhiyun return;
4231*4882a593Smuzhiyun }
4232*4882a593Smuzhiyun
4233*4882a593Smuzhiyun vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4234*4882a593Smuzhiyun vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4235*4882a593Smuzhiyun }
4236*4882a593Smuzhiyun
qed_iov_vf_has_vport_instance(struct qed_hwfn * p_hwfn,int vfid)4237*4882a593Smuzhiyun static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
4238*4882a593Smuzhiyun {
4239*4882a593Smuzhiyun struct qed_vf_info *p_vf_info;
4240*4882a593Smuzhiyun
4241*4882a593Smuzhiyun p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4242*4882a593Smuzhiyun if (!p_vf_info)
4243*4882a593Smuzhiyun return false;
4244*4882a593Smuzhiyun
4245*4882a593Smuzhiyun return !!p_vf_info->vport_instance;
4246*4882a593Smuzhiyun }
4247*4882a593Smuzhiyun
qed_iov_is_vf_stopped(struct qed_hwfn * p_hwfn,int vfid)4248*4882a593Smuzhiyun static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
4249*4882a593Smuzhiyun {
4250*4882a593Smuzhiyun struct qed_vf_info *p_vf_info;
4251*4882a593Smuzhiyun
4252*4882a593Smuzhiyun p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4253*4882a593Smuzhiyun if (!p_vf_info)
4254*4882a593Smuzhiyun return true;
4255*4882a593Smuzhiyun
4256*4882a593Smuzhiyun return p_vf_info->state == VF_STOPPED;
4257*4882a593Smuzhiyun }
4258*4882a593Smuzhiyun
qed_iov_spoofchk_get(struct qed_hwfn * p_hwfn,int vfid)4259*4882a593Smuzhiyun static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
4260*4882a593Smuzhiyun {
4261*4882a593Smuzhiyun struct qed_vf_info *vf_info;
4262*4882a593Smuzhiyun
4263*4882a593Smuzhiyun vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4264*4882a593Smuzhiyun if (!vf_info)
4265*4882a593Smuzhiyun return false;
4266*4882a593Smuzhiyun
4267*4882a593Smuzhiyun return vf_info->spoof_chk;
4268*4882a593Smuzhiyun }
4269*4882a593Smuzhiyun
qed_iov_spoofchk_set(struct qed_hwfn * p_hwfn,int vfid,bool val)4270*4882a593Smuzhiyun static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
4271*4882a593Smuzhiyun {
4272*4882a593Smuzhiyun struct qed_vf_info *vf;
4273*4882a593Smuzhiyun int rc = -EINVAL;
4274*4882a593Smuzhiyun
4275*4882a593Smuzhiyun if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4276*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
4277*4882a593Smuzhiyun "SR-IOV sanity check failed, can't set spoofchk\n");
4278*4882a593Smuzhiyun goto out;
4279*4882a593Smuzhiyun }
4280*4882a593Smuzhiyun
4281*4882a593Smuzhiyun vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4282*4882a593Smuzhiyun if (!vf)
4283*4882a593Smuzhiyun goto out;
4284*4882a593Smuzhiyun
4285*4882a593Smuzhiyun if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4286*4882a593Smuzhiyun /* After VF VPORT start PF will configure spoof check */
4287*4882a593Smuzhiyun vf->req_spoofchk_val = val;
4288*4882a593Smuzhiyun rc = 0;
4289*4882a593Smuzhiyun goto out;
4290*4882a593Smuzhiyun }
4291*4882a593Smuzhiyun
4292*4882a593Smuzhiyun rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
4293*4882a593Smuzhiyun
4294*4882a593Smuzhiyun out:
4295*4882a593Smuzhiyun return rc;
4296*4882a593Smuzhiyun }
4297*4882a593Smuzhiyun
qed_iov_bulletin_get_mac(struct qed_hwfn * p_hwfn,u16 rel_vf_id)4298*4882a593Smuzhiyun static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4299*4882a593Smuzhiyun {
4300*4882a593Smuzhiyun struct qed_vf_info *p_vf;
4301*4882a593Smuzhiyun
4302*4882a593Smuzhiyun p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4303*4882a593Smuzhiyun if (!p_vf || !p_vf->bulletin.p_virt)
4304*4882a593Smuzhiyun return NULL;
4305*4882a593Smuzhiyun
4306*4882a593Smuzhiyun if (!(p_vf->bulletin.p_virt->valid_bitmap &
4307*4882a593Smuzhiyun BIT(VFPF_BULLETIN_MAC_ADDR)))
4308*4882a593Smuzhiyun return NULL;
4309*4882a593Smuzhiyun
4310*4882a593Smuzhiyun return p_vf->bulletin.p_virt->mac;
4311*4882a593Smuzhiyun }
4312*4882a593Smuzhiyun
qed_iov_bulletin_get_forced_mac(struct qed_hwfn * p_hwfn,u16 rel_vf_id)4313*4882a593Smuzhiyun static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
4314*4882a593Smuzhiyun u16 rel_vf_id)
4315*4882a593Smuzhiyun {
4316*4882a593Smuzhiyun struct qed_vf_info *p_vf;
4317*4882a593Smuzhiyun
4318*4882a593Smuzhiyun p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4319*4882a593Smuzhiyun if (!p_vf || !p_vf->bulletin.p_virt)
4320*4882a593Smuzhiyun return NULL;
4321*4882a593Smuzhiyun
4322*4882a593Smuzhiyun if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
4323*4882a593Smuzhiyun return NULL;
4324*4882a593Smuzhiyun
4325*4882a593Smuzhiyun return p_vf->bulletin.p_virt->mac;
4326*4882a593Smuzhiyun }
4327*4882a593Smuzhiyun
4328*4882a593Smuzhiyun static u16
qed_iov_bulletin_get_forced_vlan(struct qed_hwfn * p_hwfn,u16 rel_vf_id)4329*4882a593Smuzhiyun qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4330*4882a593Smuzhiyun {
4331*4882a593Smuzhiyun struct qed_vf_info *p_vf;
4332*4882a593Smuzhiyun
4333*4882a593Smuzhiyun p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4334*4882a593Smuzhiyun if (!p_vf || !p_vf->bulletin.p_virt)
4335*4882a593Smuzhiyun return 0;
4336*4882a593Smuzhiyun
4337*4882a593Smuzhiyun if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
4338*4882a593Smuzhiyun return 0;
4339*4882a593Smuzhiyun
4340*4882a593Smuzhiyun return p_vf->bulletin.p_virt->pvid;
4341*4882a593Smuzhiyun }
4342*4882a593Smuzhiyun
qed_iov_configure_tx_rate(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,int vfid,int val)4343*4882a593Smuzhiyun static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
4344*4882a593Smuzhiyun struct qed_ptt *p_ptt, int vfid, int val)
4345*4882a593Smuzhiyun {
4346*4882a593Smuzhiyun struct qed_vf_info *vf;
4347*4882a593Smuzhiyun u8 abs_vp_id = 0;
4348*4882a593Smuzhiyun u16 rl_id;
4349*4882a593Smuzhiyun int rc;
4350*4882a593Smuzhiyun
4351*4882a593Smuzhiyun vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4352*4882a593Smuzhiyun if (!vf)
4353*4882a593Smuzhiyun return -EINVAL;
4354*4882a593Smuzhiyun
4355*4882a593Smuzhiyun rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4356*4882a593Smuzhiyun if (rc)
4357*4882a593Smuzhiyun return rc;
4358*4882a593Smuzhiyun
4359*4882a593Smuzhiyun rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
4360*4882a593Smuzhiyun return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
4361*4882a593Smuzhiyun }
4362*4882a593Smuzhiyun
4363*4882a593Smuzhiyun static int
qed_iov_configure_min_tx_rate(struct qed_dev * cdev,int vfid,u32 rate)4364*4882a593Smuzhiyun qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
4365*4882a593Smuzhiyun {
4366*4882a593Smuzhiyun struct qed_vf_info *vf;
4367*4882a593Smuzhiyun u8 vport_id;
4368*4882a593Smuzhiyun int i;
4369*4882a593Smuzhiyun
4370*4882a593Smuzhiyun for_each_hwfn(cdev, i) {
4371*4882a593Smuzhiyun struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4372*4882a593Smuzhiyun
4373*4882a593Smuzhiyun if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4374*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
4375*4882a593Smuzhiyun "SR-IOV sanity check failed, can't set min rate\n");
4376*4882a593Smuzhiyun return -EINVAL;
4377*4882a593Smuzhiyun }
4378*4882a593Smuzhiyun }
4379*4882a593Smuzhiyun
4380*4882a593Smuzhiyun vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
4381*4882a593Smuzhiyun vport_id = vf->vport_id;
4382*4882a593Smuzhiyun
4383*4882a593Smuzhiyun return qed_configure_vport_wfq(cdev, vport_id, rate);
4384*4882a593Smuzhiyun }
4385*4882a593Smuzhiyun
qed_iov_get_vf_min_rate(struct qed_hwfn * p_hwfn,int vfid)4386*4882a593Smuzhiyun static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
4387*4882a593Smuzhiyun {
4388*4882a593Smuzhiyun struct qed_wfq_data *vf_vp_wfq;
4389*4882a593Smuzhiyun struct qed_vf_info *vf_info;
4390*4882a593Smuzhiyun
4391*4882a593Smuzhiyun vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4392*4882a593Smuzhiyun if (!vf_info)
4393*4882a593Smuzhiyun return 0;
4394*4882a593Smuzhiyun
4395*4882a593Smuzhiyun vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4396*4882a593Smuzhiyun
4397*4882a593Smuzhiyun if (vf_vp_wfq->configured)
4398*4882a593Smuzhiyun return vf_vp_wfq->min_speed;
4399*4882a593Smuzhiyun else
4400*4882a593Smuzhiyun return 0;
4401*4882a593Smuzhiyun }
4402*4882a593Smuzhiyun
4403*4882a593Smuzhiyun /**
4404*4882a593Smuzhiyun * qed_schedule_iov - schedules IOV task for VF and PF
4405*4882a593Smuzhiyun * @hwfn: hardware function pointer
4406*4882a593Smuzhiyun * @flag: IOV flag for VF/PF
4407*4882a593Smuzhiyun */
qed_schedule_iov(struct qed_hwfn * hwfn,enum qed_iov_wq_flag flag)4408*4882a593Smuzhiyun void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
4409*4882a593Smuzhiyun {
4410*4882a593Smuzhiyun smp_mb__before_atomic();
4411*4882a593Smuzhiyun set_bit(flag, &hwfn->iov_task_flags);
4412*4882a593Smuzhiyun smp_mb__after_atomic();
4413*4882a593Smuzhiyun DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
4414*4882a593Smuzhiyun queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
4415*4882a593Smuzhiyun }
4416*4882a593Smuzhiyun
qed_vf_start_iov_wq(struct qed_dev * cdev)4417*4882a593Smuzhiyun void qed_vf_start_iov_wq(struct qed_dev *cdev)
4418*4882a593Smuzhiyun {
4419*4882a593Smuzhiyun int i;
4420*4882a593Smuzhiyun
4421*4882a593Smuzhiyun for_each_hwfn(cdev, i)
4422*4882a593Smuzhiyun queue_delayed_work(cdev->hwfns[i].iov_wq,
4423*4882a593Smuzhiyun &cdev->hwfns[i].iov_task, 0);
4424*4882a593Smuzhiyun }
4425*4882a593Smuzhiyun
qed_sriov_disable(struct qed_dev * cdev,bool pci_enabled)4426*4882a593Smuzhiyun int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
4427*4882a593Smuzhiyun {
4428*4882a593Smuzhiyun int i, j;
4429*4882a593Smuzhiyun
4430*4882a593Smuzhiyun for_each_hwfn(cdev, i)
4431*4882a593Smuzhiyun if (cdev->hwfns[i].iov_wq)
4432*4882a593Smuzhiyun flush_workqueue(cdev->hwfns[i].iov_wq);
4433*4882a593Smuzhiyun
4434*4882a593Smuzhiyun /* Mark VFs for disablement */
4435*4882a593Smuzhiyun qed_iov_set_vfs_to_disable(cdev, true);
4436*4882a593Smuzhiyun
4437*4882a593Smuzhiyun if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
4438*4882a593Smuzhiyun pci_disable_sriov(cdev->pdev);
4439*4882a593Smuzhiyun
4440*4882a593Smuzhiyun if (cdev->recov_in_prog) {
4441*4882a593Smuzhiyun DP_VERBOSE(cdev,
4442*4882a593Smuzhiyun QED_MSG_IOV,
4443*4882a593Smuzhiyun "Skip SRIOV disable operations in the device since a recovery is in progress\n");
4444*4882a593Smuzhiyun goto out;
4445*4882a593Smuzhiyun }
4446*4882a593Smuzhiyun
4447*4882a593Smuzhiyun for_each_hwfn(cdev, i) {
4448*4882a593Smuzhiyun struct qed_hwfn *hwfn = &cdev->hwfns[i];
4449*4882a593Smuzhiyun struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4450*4882a593Smuzhiyun
4451*4882a593Smuzhiyun /* Failure to acquire the ptt in 100g creates an odd error
4452*4882a593Smuzhiyun * where the first engine has already relased IOV.
4453*4882a593Smuzhiyun */
4454*4882a593Smuzhiyun if (!ptt) {
4455*4882a593Smuzhiyun DP_ERR(hwfn, "Failed to acquire ptt\n");
4456*4882a593Smuzhiyun return -EBUSY;
4457*4882a593Smuzhiyun }
4458*4882a593Smuzhiyun
4459*4882a593Smuzhiyun /* Clean WFQ db and configure equal weight for all vports */
4460*4882a593Smuzhiyun qed_clean_wfq_db(hwfn, ptt);
4461*4882a593Smuzhiyun
4462*4882a593Smuzhiyun qed_for_each_vf(hwfn, j) {
4463*4882a593Smuzhiyun int k;
4464*4882a593Smuzhiyun
4465*4882a593Smuzhiyun if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
4466*4882a593Smuzhiyun continue;
4467*4882a593Smuzhiyun
4468*4882a593Smuzhiyun /* Wait until VF is disabled before releasing */
4469*4882a593Smuzhiyun for (k = 0; k < 100; k++) {
4470*4882a593Smuzhiyun if (!qed_iov_is_vf_stopped(hwfn, j))
4471*4882a593Smuzhiyun msleep(20);
4472*4882a593Smuzhiyun else
4473*4882a593Smuzhiyun break;
4474*4882a593Smuzhiyun }
4475*4882a593Smuzhiyun
4476*4882a593Smuzhiyun if (k < 100)
4477*4882a593Smuzhiyun qed_iov_release_hw_for_vf(&cdev->hwfns[i],
4478*4882a593Smuzhiyun ptt, j);
4479*4882a593Smuzhiyun else
4480*4882a593Smuzhiyun DP_ERR(hwfn,
4481*4882a593Smuzhiyun "Timeout waiting for VF's FLR to end\n");
4482*4882a593Smuzhiyun }
4483*4882a593Smuzhiyun
4484*4882a593Smuzhiyun qed_ptt_release(hwfn, ptt);
4485*4882a593Smuzhiyun }
4486*4882a593Smuzhiyun out:
4487*4882a593Smuzhiyun qed_iov_set_vfs_to_disable(cdev, false);
4488*4882a593Smuzhiyun
4489*4882a593Smuzhiyun return 0;
4490*4882a593Smuzhiyun }
4491*4882a593Smuzhiyun
qed_sriov_enable_qid_config(struct qed_hwfn * hwfn,u16 vfid,struct qed_iov_vf_init_params * params)4492*4882a593Smuzhiyun static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4493*4882a593Smuzhiyun u16 vfid,
4494*4882a593Smuzhiyun struct qed_iov_vf_init_params *params)
4495*4882a593Smuzhiyun {
4496*4882a593Smuzhiyun u16 base, i;
4497*4882a593Smuzhiyun
4498*4882a593Smuzhiyun /* Since we have an equal resource distribution per-VF, and we assume
4499*4882a593Smuzhiyun * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4500*4882a593Smuzhiyun * sequentially from there.
4501*4882a593Smuzhiyun */
4502*4882a593Smuzhiyun base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
4503*4882a593Smuzhiyun
4504*4882a593Smuzhiyun params->rel_vf_id = vfid;
4505*4882a593Smuzhiyun for (i = 0; i < params->num_queues; i++) {
4506*4882a593Smuzhiyun params->req_rx_queue[i] = base + i;
4507*4882a593Smuzhiyun params->req_tx_queue[i] = base + i;
4508*4882a593Smuzhiyun }
4509*4882a593Smuzhiyun }
4510*4882a593Smuzhiyun
qed_sriov_enable(struct qed_dev * cdev,int num)4511*4882a593Smuzhiyun static int qed_sriov_enable(struct qed_dev *cdev, int num)
4512*4882a593Smuzhiyun {
4513*4882a593Smuzhiyun struct qed_iov_vf_init_params params;
4514*4882a593Smuzhiyun struct qed_hwfn *hwfn;
4515*4882a593Smuzhiyun struct qed_ptt *ptt;
4516*4882a593Smuzhiyun int i, j, rc;
4517*4882a593Smuzhiyun
4518*4882a593Smuzhiyun if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
4519*4882a593Smuzhiyun DP_NOTICE(cdev, "Can start at most %d VFs\n",
4520*4882a593Smuzhiyun RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
4521*4882a593Smuzhiyun return -EINVAL;
4522*4882a593Smuzhiyun }
4523*4882a593Smuzhiyun
4524*4882a593Smuzhiyun memset(¶ms, 0, sizeof(params));
4525*4882a593Smuzhiyun
4526*4882a593Smuzhiyun /* Initialize HW for VF access */
4527*4882a593Smuzhiyun for_each_hwfn(cdev, j) {
4528*4882a593Smuzhiyun hwfn = &cdev->hwfns[j];
4529*4882a593Smuzhiyun ptt = qed_ptt_acquire(hwfn);
4530*4882a593Smuzhiyun
4531*4882a593Smuzhiyun /* Make sure not to use more than 16 queues per VF */
4532*4882a593Smuzhiyun params.num_queues = min_t(int,
4533*4882a593Smuzhiyun FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
4534*4882a593Smuzhiyun 16);
4535*4882a593Smuzhiyun
4536*4882a593Smuzhiyun if (!ptt) {
4537*4882a593Smuzhiyun DP_ERR(hwfn, "Failed to acquire ptt\n");
4538*4882a593Smuzhiyun rc = -EBUSY;
4539*4882a593Smuzhiyun goto err;
4540*4882a593Smuzhiyun }
4541*4882a593Smuzhiyun
4542*4882a593Smuzhiyun for (i = 0; i < num; i++) {
4543*4882a593Smuzhiyun if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
4544*4882a593Smuzhiyun continue;
4545*4882a593Smuzhiyun
4546*4882a593Smuzhiyun qed_sriov_enable_qid_config(hwfn, i, ¶ms);
4547*4882a593Smuzhiyun rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
4548*4882a593Smuzhiyun if (rc) {
4549*4882a593Smuzhiyun DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
4550*4882a593Smuzhiyun qed_ptt_release(hwfn, ptt);
4551*4882a593Smuzhiyun goto err;
4552*4882a593Smuzhiyun }
4553*4882a593Smuzhiyun }
4554*4882a593Smuzhiyun
4555*4882a593Smuzhiyun qed_ptt_release(hwfn, ptt);
4556*4882a593Smuzhiyun }
4557*4882a593Smuzhiyun
4558*4882a593Smuzhiyun /* Enable SRIOV PCIe functions */
4559*4882a593Smuzhiyun rc = pci_enable_sriov(cdev->pdev, num);
4560*4882a593Smuzhiyun if (rc) {
4561*4882a593Smuzhiyun DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
4562*4882a593Smuzhiyun goto err;
4563*4882a593Smuzhiyun }
4564*4882a593Smuzhiyun
4565*4882a593Smuzhiyun hwfn = QED_LEADING_HWFN(cdev);
4566*4882a593Smuzhiyun ptt = qed_ptt_acquire(hwfn);
4567*4882a593Smuzhiyun if (!ptt) {
4568*4882a593Smuzhiyun DP_ERR(hwfn, "Failed to acquire ptt\n");
4569*4882a593Smuzhiyun rc = -EBUSY;
4570*4882a593Smuzhiyun goto err;
4571*4882a593Smuzhiyun }
4572*4882a593Smuzhiyun
4573*4882a593Smuzhiyun rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
4574*4882a593Smuzhiyun if (rc)
4575*4882a593Smuzhiyun DP_INFO(cdev, "Failed to update eswitch mode\n");
4576*4882a593Smuzhiyun qed_ptt_release(hwfn, ptt);
4577*4882a593Smuzhiyun
4578*4882a593Smuzhiyun return num;
4579*4882a593Smuzhiyun
4580*4882a593Smuzhiyun err:
4581*4882a593Smuzhiyun qed_sriov_disable(cdev, false);
4582*4882a593Smuzhiyun return rc;
4583*4882a593Smuzhiyun }
4584*4882a593Smuzhiyun
qed_sriov_configure(struct qed_dev * cdev,int num_vfs_param)4585*4882a593Smuzhiyun static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
4586*4882a593Smuzhiyun {
4587*4882a593Smuzhiyun if (!IS_QED_SRIOV(cdev)) {
4588*4882a593Smuzhiyun DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
4589*4882a593Smuzhiyun return -EOPNOTSUPP;
4590*4882a593Smuzhiyun }
4591*4882a593Smuzhiyun
4592*4882a593Smuzhiyun if (num_vfs_param)
4593*4882a593Smuzhiyun return qed_sriov_enable(cdev, num_vfs_param);
4594*4882a593Smuzhiyun else
4595*4882a593Smuzhiyun return qed_sriov_disable(cdev, true);
4596*4882a593Smuzhiyun }
4597*4882a593Smuzhiyun
qed_sriov_pf_set_mac(struct qed_dev * cdev,u8 * mac,int vfid)4598*4882a593Smuzhiyun static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
4599*4882a593Smuzhiyun {
4600*4882a593Smuzhiyun int i;
4601*4882a593Smuzhiyun
4602*4882a593Smuzhiyun if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4603*4882a593Smuzhiyun DP_VERBOSE(cdev, QED_MSG_IOV,
4604*4882a593Smuzhiyun "Cannot set a VF MAC; Sriov is not enabled\n");
4605*4882a593Smuzhiyun return -EINVAL;
4606*4882a593Smuzhiyun }
4607*4882a593Smuzhiyun
4608*4882a593Smuzhiyun if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4609*4882a593Smuzhiyun DP_VERBOSE(cdev, QED_MSG_IOV,
4610*4882a593Smuzhiyun "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4611*4882a593Smuzhiyun return -EINVAL;
4612*4882a593Smuzhiyun }
4613*4882a593Smuzhiyun
4614*4882a593Smuzhiyun for_each_hwfn(cdev, i) {
4615*4882a593Smuzhiyun struct qed_hwfn *hwfn = &cdev->hwfns[i];
4616*4882a593Smuzhiyun struct qed_public_vf_info *vf_info;
4617*4882a593Smuzhiyun
4618*4882a593Smuzhiyun vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4619*4882a593Smuzhiyun if (!vf_info)
4620*4882a593Smuzhiyun continue;
4621*4882a593Smuzhiyun
4622*4882a593Smuzhiyun /* Set the MAC, and schedule the IOV task */
4623*4882a593Smuzhiyun if (vf_info->is_trusted_configured)
4624*4882a593Smuzhiyun ether_addr_copy(vf_info->mac, mac);
4625*4882a593Smuzhiyun else
4626*4882a593Smuzhiyun ether_addr_copy(vf_info->forced_mac, mac);
4627*4882a593Smuzhiyun
4628*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4629*4882a593Smuzhiyun }
4630*4882a593Smuzhiyun
4631*4882a593Smuzhiyun return 0;
4632*4882a593Smuzhiyun }
4633*4882a593Smuzhiyun
qed_sriov_pf_set_vlan(struct qed_dev * cdev,u16 vid,int vfid)4634*4882a593Smuzhiyun static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
4635*4882a593Smuzhiyun {
4636*4882a593Smuzhiyun int i;
4637*4882a593Smuzhiyun
4638*4882a593Smuzhiyun if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4639*4882a593Smuzhiyun DP_VERBOSE(cdev, QED_MSG_IOV,
4640*4882a593Smuzhiyun "Cannot set a VF MAC; Sriov is not enabled\n");
4641*4882a593Smuzhiyun return -EINVAL;
4642*4882a593Smuzhiyun }
4643*4882a593Smuzhiyun
4644*4882a593Smuzhiyun if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4645*4882a593Smuzhiyun DP_VERBOSE(cdev, QED_MSG_IOV,
4646*4882a593Smuzhiyun "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4647*4882a593Smuzhiyun return -EINVAL;
4648*4882a593Smuzhiyun }
4649*4882a593Smuzhiyun
4650*4882a593Smuzhiyun for_each_hwfn(cdev, i) {
4651*4882a593Smuzhiyun struct qed_hwfn *hwfn = &cdev->hwfns[i];
4652*4882a593Smuzhiyun struct qed_public_vf_info *vf_info;
4653*4882a593Smuzhiyun
4654*4882a593Smuzhiyun vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4655*4882a593Smuzhiyun if (!vf_info)
4656*4882a593Smuzhiyun continue;
4657*4882a593Smuzhiyun
4658*4882a593Smuzhiyun /* Set the forced vlan, and schedule the IOV task */
4659*4882a593Smuzhiyun vf_info->forced_vlan = vid;
4660*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4661*4882a593Smuzhiyun }
4662*4882a593Smuzhiyun
4663*4882a593Smuzhiyun return 0;
4664*4882a593Smuzhiyun }
4665*4882a593Smuzhiyun
qed_get_vf_config(struct qed_dev * cdev,int vf_id,struct ifla_vf_info * ivi)4666*4882a593Smuzhiyun static int qed_get_vf_config(struct qed_dev *cdev,
4667*4882a593Smuzhiyun int vf_id, struct ifla_vf_info *ivi)
4668*4882a593Smuzhiyun {
4669*4882a593Smuzhiyun struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
4670*4882a593Smuzhiyun struct qed_public_vf_info *vf_info;
4671*4882a593Smuzhiyun struct qed_mcp_link_state link;
4672*4882a593Smuzhiyun u32 tx_rate;
4673*4882a593Smuzhiyun int ret;
4674*4882a593Smuzhiyun
4675*4882a593Smuzhiyun /* Sanitize request */
4676*4882a593Smuzhiyun if (IS_VF(cdev))
4677*4882a593Smuzhiyun return -EINVAL;
4678*4882a593Smuzhiyun
4679*4882a593Smuzhiyun if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
4680*4882a593Smuzhiyun DP_VERBOSE(cdev, QED_MSG_IOV,
4681*4882a593Smuzhiyun "VF index [%d] isn't active\n", vf_id);
4682*4882a593Smuzhiyun return -EINVAL;
4683*4882a593Smuzhiyun }
4684*4882a593Smuzhiyun
4685*4882a593Smuzhiyun vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4686*4882a593Smuzhiyun
4687*4882a593Smuzhiyun ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
4688*4882a593Smuzhiyun if (ret)
4689*4882a593Smuzhiyun return ret;
4690*4882a593Smuzhiyun
4691*4882a593Smuzhiyun /* Fill information about VF */
4692*4882a593Smuzhiyun ivi->vf = vf_id;
4693*4882a593Smuzhiyun
4694*4882a593Smuzhiyun if (is_valid_ether_addr(vf_info->forced_mac))
4695*4882a593Smuzhiyun ether_addr_copy(ivi->mac, vf_info->forced_mac);
4696*4882a593Smuzhiyun else
4697*4882a593Smuzhiyun ether_addr_copy(ivi->mac, vf_info->mac);
4698*4882a593Smuzhiyun
4699*4882a593Smuzhiyun ivi->vlan = vf_info->forced_vlan;
4700*4882a593Smuzhiyun ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
4701*4882a593Smuzhiyun ivi->linkstate = vf_info->link_state;
4702*4882a593Smuzhiyun tx_rate = vf_info->tx_rate;
4703*4882a593Smuzhiyun ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
4704*4882a593Smuzhiyun ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
4705*4882a593Smuzhiyun ivi->trusted = vf_info->is_trusted_request;
4706*4882a593Smuzhiyun
4707*4882a593Smuzhiyun return 0;
4708*4882a593Smuzhiyun }
4709*4882a593Smuzhiyun
qed_inform_vf_link_state(struct qed_hwfn * hwfn)4710*4882a593Smuzhiyun void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
4711*4882a593Smuzhiyun {
4712*4882a593Smuzhiyun struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
4713*4882a593Smuzhiyun struct qed_mcp_link_capabilities caps;
4714*4882a593Smuzhiyun struct qed_mcp_link_params params;
4715*4882a593Smuzhiyun struct qed_mcp_link_state link;
4716*4882a593Smuzhiyun int i;
4717*4882a593Smuzhiyun
4718*4882a593Smuzhiyun if (!hwfn->pf_iov_info)
4719*4882a593Smuzhiyun return;
4720*4882a593Smuzhiyun
4721*4882a593Smuzhiyun /* Update bulletin of all future possible VFs with link configuration */
4722*4882a593Smuzhiyun for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
4723*4882a593Smuzhiyun struct qed_public_vf_info *vf_info;
4724*4882a593Smuzhiyun
4725*4882a593Smuzhiyun vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
4726*4882a593Smuzhiyun if (!vf_info)
4727*4882a593Smuzhiyun continue;
4728*4882a593Smuzhiyun
4729*4882a593Smuzhiyun /* Only hwfn0 is actually interested in the link speed.
4730*4882a593Smuzhiyun * But since only it would receive an MFW indication of link,
4731*4882a593Smuzhiyun * need to take configuration from it - otherwise things like
4732*4882a593Smuzhiyun * rate limiting for hwfn1 VF would not work.
4733*4882a593Smuzhiyun */
4734*4882a593Smuzhiyun memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn),
4735*4882a593Smuzhiyun sizeof(params));
4736*4882a593Smuzhiyun memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
4737*4882a593Smuzhiyun memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
4738*4882a593Smuzhiyun sizeof(caps));
4739*4882a593Smuzhiyun
4740*4882a593Smuzhiyun /* Modify link according to the VF's configured link state */
4741*4882a593Smuzhiyun switch (vf_info->link_state) {
4742*4882a593Smuzhiyun case IFLA_VF_LINK_STATE_DISABLE:
4743*4882a593Smuzhiyun link.link_up = false;
4744*4882a593Smuzhiyun break;
4745*4882a593Smuzhiyun case IFLA_VF_LINK_STATE_ENABLE:
4746*4882a593Smuzhiyun link.link_up = true;
4747*4882a593Smuzhiyun /* Set speed according to maximum supported by HW.
4748*4882a593Smuzhiyun * that is 40G for regular devices and 100G for CMT
4749*4882a593Smuzhiyun * mode devices.
4750*4882a593Smuzhiyun */
4751*4882a593Smuzhiyun link.speed = (hwfn->cdev->num_hwfns > 1) ?
4752*4882a593Smuzhiyun 100000 : 40000;
4753*4882a593Smuzhiyun default:
4754*4882a593Smuzhiyun /* In auto mode pass PF link image to VF */
4755*4882a593Smuzhiyun break;
4756*4882a593Smuzhiyun }
4757*4882a593Smuzhiyun
4758*4882a593Smuzhiyun if (link.link_up && vf_info->tx_rate) {
4759*4882a593Smuzhiyun struct qed_ptt *ptt;
4760*4882a593Smuzhiyun int rate;
4761*4882a593Smuzhiyun
4762*4882a593Smuzhiyun rate = min_t(int, vf_info->tx_rate, link.speed);
4763*4882a593Smuzhiyun
4764*4882a593Smuzhiyun ptt = qed_ptt_acquire(hwfn);
4765*4882a593Smuzhiyun if (!ptt) {
4766*4882a593Smuzhiyun DP_NOTICE(hwfn, "Failed to acquire PTT\n");
4767*4882a593Smuzhiyun return;
4768*4882a593Smuzhiyun }
4769*4882a593Smuzhiyun
4770*4882a593Smuzhiyun if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
4771*4882a593Smuzhiyun vf_info->tx_rate = rate;
4772*4882a593Smuzhiyun link.speed = rate;
4773*4882a593Smuzhiyun }
4774*4882a593Smuzhiyun
4775*4882a593Smuzhiyun qed_ptt_release(hwfn, ptt);
4776*4882a593Smuzhiyun }
4777*4882a593Smuzhiyun
4778*4882a593Smuzhiyun qed_iov_set_link(hwfn, i, ¶ms, &link, &caps);
4779*4882a593Smuzhiyun }
4780*4882a593Smuzhiyun
4781*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4782*4882a593Smuzhiyun }
4783*4882a593Smuzhiyun
qed_set_vf_link_state(struct qed_dev * cdev,int vf_id,int link_state)4784*4882a593Smuzhiyun static int qed_set_vf_link_state(struct qed_dev *cdev,
4785*4882a593Smuzhiyun int vf_id, int link_state)
4786*4882a593Smuzhiyun {
4787*4882a593Smuzhiyun int i;
4788*4882a593Smuzhiyun
4789*4882a593Smuzhiyun /* Sanitize request */
4790*4882a593Smuzhiyun if (IS_VF(cdev))
4791*4882a593Smuzhiyun return -EINVAL;
4792*4882a593Smuzhiyun
4793*4882a593Smuzhiyun if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
4794*4882a593Smuzhiyun DP_VERBOSE(cdev, QED_MSG_IOV,
4795*4882a593Smuzhiyun "VF index [%d] isn't active\n", vf_id);
4796*4882a593Smuzhiyun return -EINVAL;
4797*4882a593Smuzhiyun }
4798*4882a593Smuzhiyun
4799*4882a593Smuzhiyun /* Handle configuration of link state */
4800*4882a593Smuzhiyun for_each_hwfn(cdev, i) {
4801*4882a593Smuzhiyun struct qed_hwfn *hwfn = &cdev->hwfns[i];
4802*4882a593Smuzhiyun struct qed_public_vf_info *vf;
4803*4882a593Smuzhiyun
4804*4882a593Smuzhiyun vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4805*4882a593Smuzhiyun if (!vf)
4806*4882a593Smuzhiyun continue;
4807*4882a593Smuzhiyun
4808*4882a593Smuzhiyun if (vf->link_state == link_state)
4809*4882a593Smuzhiyun continue;
4810*4882a593Smuzhiyun
4811*4882a593Smuzhiyun vf->link_state = link_state;
4812*4882a593Smuzhiyun qed_inform_vf_link_state(&cdev->hwfns[i]);
4813*4882a593Smuzhiyun }
4814*4882a593Smuzhiyun
4815*4882a593Smuzhiyun return 0;
4816*4882a593Smuzhiyun }
4817*4882a593Smuzhiyun
qed_spoof_configure(struct qed_dev * cdev,int vfid,bool val)4818*4882a593Smuzhiyun static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
4819*4882a593Smuzhiyun {
4820*4882a593Smuzhiyun int i, rc = -EINVAL;
4821*4882a593Smuzhiyun
4822*4882a593Smuzhiyun for_each_hwfn(cdev, i) {
4823*4882a593Smuzhiyun struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4824*4882a593Smuzhiyun
4825*4882a593Smuzhiyun rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
4826*4882a593Smuzhiyun if (rc)
4827*4882a593Smuzhiyun break;
4828*4882a593Smuzhiyun }
4829*4882a593Smuzhiyun
4830*4882a593Smuzhiyun return rc;
4831*4882a593Smuzhiyun }
4832*4882a593Smuzhiyun
qed_configure_max_vf_rate(struct qed_dev * cdev,int vfid,int rate)4833*4882a593Smuzhiyun static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
4834*4882a593Smuzhiyun {
4835*4882a593Smuzhiyun int i;
4836*4882a593Smuzhiyun
4837*4882a593Smuzhiyun for_each_hwfn(cdev, i) {
4838*4882a593Smuzhiyun struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4839*4882a593Smuzhiyun struct qed_public_vf_info *vf;
4840*4882a593Smuzhiyun
4841*4882a593Smuzhiyun if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4842*4882a593Smuzhiyun DP_NOTICE(p_hwfn,
4843*4882a593Smuzhiyun "SR-IOV sanity check failed, can't set tx rate\n");
4844*4882a593Smuzhiyun return -EINVAL;
4845*4882a593Smuzhiyun }
4846*4882a593Smuzhiyun
4847*4882a593Smuzhiyun vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
4848*4882a593Smuzhiyun
4849*4882a593Smuzhiyun vf->tx_rate = rate;
4850*4882a593Smuzhiyun
4851*4882a593Smuzhiyun qed_inform_vf_link_state(p_hwfn);
4852*4882a593Smuzhiyun }
4853*4882a593Smuzhiyun
4854*4882a593Smuzhiyun return 0;
4855*4882a593Smuzhiyun }
4856*4882a593Smuzhiyun
qed_set_vf_rate(struct qed_dev * cdev,int vfid,u32 min_rate,u32 max_rate)4857*4882a593Smuzhiyun static int qed_set_vf_rate(struct qed_dev *cdev,
4858*4882a593Smuzhiyun int vfid, u32 min_rate, u32 max_rate)
4859*4882a593Smuzhiyun {
4860*4882a593Smuzhiyun int rc_min = 0, rc_max = 0;
4861*4882a593Smuzhiyun
4862*4882a593Smuzhiyun if (max_rate)
4863*4882a593Smuzhiyun rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
4864*4882a593Smuzhiyun
4865*4882a593Smuzhiyun if (min_rate)
4866*4882a593Smuzhiyun rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
4867*4882a593Smuzhiyun
4868*4882a593Smuzhiyun if (rc_max | rc_min)
4869*4882a593Smuzhiyun return -EINVAL;
4870*4882a593Smuzhiyun
4871*4882a593Smuzhiyun return 0;
4872*4882a593Smuzhiyun }
4873*4882a593Smuzhiyun
qed_set_vf_trust(struct qed_dev * cdev,int vfid,bool trust)4874*4882a593Smuzhiyun static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
4875*4882a593Smuzhiyun {
4876*4882a593Smuzhiyun int i;
4877*4882a593Smuzhiyun
4878*4882a593Smuzhiyun for_each_hwfn(cdev, i) {
4879*4882a593Smuzhiyun struct qed_hwfn *hwfn = &cdev->hwfns[i];
4880*4882a593Smuzhiyun struct qed_public_vf_info *vf;
4881*4882a593Smuzhiyun
4882*4882a593Smuzhiyun if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
4883*4882a593Smuzhiyun DP_NOTICE(hwfn,
4884*4882a593Smuzhiyun "SR-IOV sanity check failed, can't set trust\n");
4885*4882a593Smuzhiyun return -EINVAL;
4886*4882a593Smuzhiyun }
4887*4882a593Smuzhiyun
4888*4882a593Smuzhiyun vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
4889*4882a593Smuzhiyun
4890*4882a593Smuzhiyun if (vf->is_trusted_request == trust)
4891*4882a593Smuzhiyun return 0;
4892*4882a593Smuzhiyun vf->is_trusted_request = trust;
4893*4882a593Smuzhiyun
4894*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
4895*4882a593Smuzhiyun }
4896*4882a593Smuzhiyun
4897*4882a593Smuzhiyun return 0;
4898*4882a593Smuzhiyun }
4899*4882a593Smuzhiyun
qed_handle_vf_msg(struct qed_hwfn * hwfn)4900*4882a593Smuzhiyun static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4901*4882a593Smuzhiyun {
4902*4882a593Smuzhiyun u64 events[QED_VF_ARRAY_LENGTH];
4903*4882a593Smuzhiyun struct qed_ptt *ptt;
4904*4882a593Smuzhiyun int i;
4905*4882a593Smuzhiyun
4906*4882a593Smuzhiyun ptt = qed_ptt_acquire(hwfn);
4907*4882a593Smuzhiyun if (!ptt) {
4908*4882a593Smuzhiyun DP_VERBOSE(hwfn, QED_MSG_IOV,
4909*4882a593Smuzhiyun "Can't acquire PTT; re-scheduling\n");
4910*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
4911*4882a593Smuzhiyun return;
4912*4882a593Smuzhiyun }
4913*4882a593Smuzhiyun
4914*4882a593Smuzhiyun qed_iov_pf_get_pending_events(hwfn, events);
4915*4882a593Smuzhiyun
4916*4882a593Smuzhiyun DP_VERBOSE(hwfn, QED_MSG_IOV,
4917*4882a593Smuzhiyun "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4918*4882a593Smuzhiyun events[0], events[1], events[2]);
4919*4882a593Smuzhiyun
4920*4882a593Smuzhiyun qed_for_each_vf(hwfn, i) {
4921*4882a593Smuzhiyun /* Skip VFs with no pending messages */
4922*4882a593Smuzhiyun if (!(events[i / 64] & (1ULL << (i % 64))))
4923*4882a593Smuzhiyun continue;
4924*4882a593Smuzhiyun
4925*4882a593Smuzhiyun DP_VERBOSE(hwfn, QED_MSG_IOV,
4926*4882a593Smuzhiyun "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4927*4882a593Smuzhiyun i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4928*4882a593Smuzhiyun
4929*4882a593Smuzhiyun /* Copy VF's message to PF's request buffer for that VF */
4930*4882a593Smuzhiyun if (qed_iov_copy_vf_msg(hwfn, ptt, i))
4931*4882a593Smuzhiyun continue;
4932*4882a593Smuzhiyun
4933*4882a593Smuzhiyun qed_iov_process_mbx_req(hwfn, ptt, i);
4934*4882a593Smuzhiyun }
4935*4882a593Smuzhiyun
4936*4882a593Smuzhiyun qed_ptt_release(hwfn, ptt);
4937*4882a593Smuzhiyun }
4938*4882a593Smuzhiyun
qed_pf_validate_req_vf_mac(struct qed_hwfn * hwfn,u8 * mac,struct qed_public_vf_info * info)4939*4882a593Smuzhiyun static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn,
4940*4882a593Smuzhiyun u8 *mac,
4941*4882a593Smuzhiyun struct qed_public_vf_info *info)
4942*4882a593Smuzhiyun {
4943*4882a593Smuzhiyun if (info->is_trusted_configured) {
4944*4882a593Smuzhiyun if (is_valid_ether_addr(info->mac) &&
4945*4882a593Smuzhiyun (!mac || !ether_addr_equal(mac, info->mac)))
4946*4882a593Smuzhiyun return true;
4947*4882a593Smuzhiyun } else {
4948*4882a593Smuzhiyun if (is_valid_ether_addr(info->forced_mac) &&
4949*4882a593Smuzhiyun (!mac || !ether_addr_equal(mac, info->forced_mac)))
4950*4882a593Smuzhiyun return true;
4951*4882a593Smuzhiyun }
4952*4882a593Smuzhiyun
4953*4882a593Smuzhiyun return false;
4954*4882a593Smuzhiyun }
4955*4882a593Smuzhiyun
qed_set_bulletin_mac(struct qed_hwfn * hwfn,struct qed_public_vf_info * info,int vfid)4956*4882a593Smuzhiyun static void qed_set_bulletin_mac(struct qed_hwfn *hwfn,
4957*4882a593Smuzhiyun struct qed_public_vf_info *info,
4958*4882a593Smuzhiyun int vfid)
4959*4882a593Smuzhiyun {
4960*4882a593Smuzhiyun if (info->is_trusted_configured)
4961*4882a593Smuzhiyun qed_iov_bulletin_set_mac(hwfn, info->mac, vfid);
4962*4882a593Smuzhiyun else
4963*4882a593Smuzhiyun qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid);
4964*4882a593Smuzhiyun }
4965*4882a593Smuzhiyun
qed_handle_pf_set_vf_unicast(struct qed_hwfn * hwfn)4966*4882a593Smuzhiyun static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
4967*4882a593Smuzhiyun {
4968*4882a593Smuzhiyun int i;
4969*4882a593Smuzhiyun
4970*4882a593Smuzhiyun qed_for_each_vf(hwfn, i) {
4971*4882a593Smuzhiyun struct qed_public_vf_info *info;
4972*4882a593Smuzhiyun bool update = false;
4973*4882a593Smuzhiyun u8 *mac;
4974*4882a593Smuzhiyun
4975*4882a593Smuzhiyun info = qed_iov_get_public_vf_info(hwfn, i, true);
4976*4882a593Smuzhiyun if (!info)
4977*4882a593Smuzhiyun continue;
4978*4882a593Smuzhiyun
4979*4882a593Smuzhiyun /* Update data on bulletin board */
4980*4882a593Smuzhiyun if (info->is_trusted_configured)
4981*4882a593Smuzhiyun mac = qed_iov_bulletin_get_mac(hwfn, i);
4982*4882a593Smuzhiyun else
4983*4882a593Smuzhiyun mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
4984*4882a593Smuzhiyun
4985*4882a593Smuzhiyun if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) {
4986*4882a593Smuzhiyun DP_VERBOSE(hwfn,
4987*4882a593Smuzhiyun QED_MSG_IOV,
4988*4882a593Smuzhiyun "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4989*4882a593Smuzhiyun i,
4990*4882a593Smuzhiyun hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4991*4882a593Smuzhiyun
4992*4882a593Smuzhiyun /* Update bulletin board with MAC */
4993*4882a593Smuzhiyun qed_set_bulletin_mac(hwfn, info, i);
4994*4882a593Smuzhiyun update = true;
4995*4882a593Smuzhiyun }
4996*4882a593Smuzhiyun
4997*4882a593Smuzhiyun if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
4998*4882a593Smuzhiyun info->forced_vlan) {
4999*4882a593Smuzhiyun DP_VERBOSE(hwfn,
5000*4882a593Smuzhiyun QED_MSG_IOV,
5001*4882a593Smuzhiyun "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
5002*4882a593Smuzhiyun info->forced_vlan,
5003*4882a593Smuzhiyun i,
5004*4882a593Smuzhiyun hwfn->cdev->p_iov_info->first_vf_in_pf + i);
5005*4882a593Smuzhiyun qed_iov_bulletin_set_forced_vlan(hwfn,
5006*4882a593Smuzhiyun info->forced_vlan, i);
5007*4882a593Smuzhiyun update = true;
5008*4882a593Smuzhiyun }
5009*4882a593Smuzhiyun
5010*4882a593Smuzhiyun if (update)
5011*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5012*4882a593Smuzhiyun }
5013*4882a593Smuzhiyun }
5014*4882a593Smuzhiyun
qed_handle_bulletin_post(struct qed_hwfn * hwfn)5015*4882a593Smuzhiyun static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
5016*4882a593Smuzhiyun {
5017*4882a593Smuzhiyun struct qed_ptt *ptt;
5018*4882a593Smuzhiyun int i;
5019*4882a593Smuzhiyun
5020*4882a593Smuzhiyun ptt = qed_ptt_acquire(hwfn);
5021*4882a593Smuzhiyun if (!ptt) {
5022*4882a593Smuzhiyun DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
5023*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5024*4882a593Smuzhiyun return;
5025*4882a593Smuzhiyun }
5026*4882a593Smuzhiyun
5027*4882a593Smuzhiyun qed_for_each_vf(hwfn, i)
5028*4882a593Smuzhiyun qed_iov_post_vf_bulletin(hwfn, i, ptt);
5029*4882a593Smuzhiyun
5030*4882a593Smuzhiyun qed_ptt_release(hwfn, ptt);
5031*4882a593Smuzhiyun }
5032*4882a593Smuzhiyun
qed_update_mac_for_vf_trust_change(struct qed_hwfn * hwfn,int vf_id)5033*4882a593Smuzhiyun static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
5034*4882a593Smuzhiyun {
5035*4882a593Smuzhiyun struct qed_public_vf_info *vf_info;
5036*4882a593Smuzhiyun struct qed_vf_info *vf;
5037*4882a593Smuzhiyun u8 *force_mac;
5038*4882a593Smuzhiyun int i;
5039*4882a593Smuzhiyun
5040*4882a593Smuzhiyun vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
5041*4882a593Smuzhiyun vf = qed_iov_get_vf_info(hwfn, vf_id, true);
5042*4882a593Smuzhiyun
5043*4882a593Smuzhiyun if (!vf_info || !vf)
5044*4882a593Smuzhiyun return;
5045*4882a593Smuzhiyun
5046*4882a593Smuzhiyun /* Force MAC converted to generic MAC in case of VF trust on */
5047*4882a593Smuzhiyun if (vf_info->is_trusted_configured &&
5048*4882a593Smuzhiyun (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) {
5049*4882a593Smuzhiyun force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id);
5050*4882a593Smuzhiyun
5051*4882a593Smuzhiyun if (force_mac) {
5052*4882a593Smuzhiyun /* Clear existing shadow copy of MAC to have a clean
5053*4882a593Smuzhiyun * slate.
5054*4882a593Smuzhiyun */
5055*4882a593Smuzhiyun for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5056*4882a593Smuzhiyun if (ether_addr_equal(vf->shadow_config.macs[i],
5057*4882a593Smuzhiyun vf_info->mac)) {
5058*4882a593Smuzhiyun eth_zero_addr(vf->shadow_config.macs[i]);
5059*4882a593Smuzhiyun DP_VERBOSE(hwfn, QED_MSG_IOV,
5060*4882a593Smuzhiyun "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
5061*4882a593Smuzhiyun vf_info->mac, vf_id);
5062*4882a593Smuzhiyun break;
5063*4882a593Smuzhiyun }
5064*4882a593Smuzhiyun }
5065*4882a593Smuzhiyun
5066*4882a593Smuzhiyun ether_addr_copy(vf_info->mac, force_mac);
5067*4882a593Smuzhiyun eth_zero_addr(vf_info->forced_mac);
5068*4882a593Smuzhiyun vf->bulletin.p_virt->valid_bitmap &=
5069*4882a593Smuzhiyun ~BIT(MAC_ADDR_FORCED);
5070*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5071*4882a593Smuzhiyun }
5072*4882a593Smuzhiyun }
5073*4882a593Smuzhiyun
5074*4882a593Smuzhiyun /* Update shadow copy with VF MAC when trust mode is turned off */
5075*4882a593Smuzhiyun if (!vf_info->is_trusted_configured) {
5076*4882a593Smuzhiyun u8 empty_mac[ETH_ALEN];
5077*4882a593Smuzhiyun
5078*4882a593Smuzhiyun eth_zero_addr(empty_mac);
5079*4882a593Smuzhiyun for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5080*4882a593Smuzhiyun if (ether_addr_equal(vf->shadow_config.macs[i],
5081*4882a593Smuzhiyun empty_mac)) {
5082*4882a593Smuzhiyun ether_addr_copy(vf->shadow_config.macs[i],
5083*4882a593Smuzhiyun vf_info->mac);
5084*4882a593Smuzhiyun DP_VERBOSE(hwfn, QED_MSG_IOV,
5085*4882a593Smuzhiyun "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
5086*4882a593Smuzhiyun vf_info->mac, vf_id);
5087*4882a593Smuzhiyun break;
5088*4882a593Smuzhiyun }
5089*4882a593Smuzhiyun }
5090*4882a593Smuzhiyun /* Clear bulletin when trust mode is turned off,
5091*4882a593Smuzhiyun * to have a clean slate for next (normal) operations.
5092*4882a593Smuzhiyun */
5093*4882a593Smuzhiyun qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id);
5094*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5095*4882a593Smuzhiyun }
5096*4882a593Smuzhiyun }
5097*4882a593Smuzhiyun
qed_iov_handle_trust_change(struct qed_hwfn * hwfn)5098*4882a593Smuzhiyun static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5099*4882a593Smuzhiyun {
5100*4882a593Smuzhiyun struct qed_sp_vport_update_params params;
5101*4882a593Smuzhiyun struct qed_filter_accept_flags *flags;
5102*4882a593Smuzhiyun struct qed_public_vf_info *vf_info;
5103*4882a593Smuzhiyun struct qed_vf_info *vf;
5104*4882a593Smuzhiyun u8 mask;
5105*4882a593Smuzhiyun int i;
5106*4882a593Smuzhiyun
5107*4882a593Smuzhiyun mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
5108*4882a593Smuzhiyun flags = ¶ms.accept_flags;
5109*4882a593Smuzhiyun
5110*4882a593Smuzhiyun qed_for_each_vf(hwfn, i) {
5111*4882a593Smuzhiyun /* Need to make sure current requested configuration didn't
5112*4882a593Smuzhiyun * flip so that we'll end up configuring something that's not
5113*4882a593Smuzhiyun * needed.
5114*4882a593Smuzhiyun */
5115*4882a593Smuzhiyun vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
5116*4882a593Smuzhiyun if (vf_info->is_trusted_configured ==
5117*4882a593Smuzhiyun vf_info->is_trusted_request)
5118*4882a593Smuzhiyun continue;
5119*4882a593Smuzhiyun vf_info->is_trusted_configured = vf_info->is_trusted_request;
5120*4882a593Smuzhiyun
5121*4882a593Smuzhiyun /* Handle forced MAC mode */
5122*4882a593Smuzhiyun qed_update_mac_for_vf_trust_change(hwfn, i);
5123*4882a593Smuzhiyun
5124*4882a593Smuzhiyun /* Validate that the VF has a configured vport */
5125*4882a593Smuzhiyun vf = qed_iov_get_vf_info(hwfn, i, true);
5126*4882a593Smuzhiyun if (!vf->vport_instance)
5127*4882a593Smuzhiyun continue;
5128*4882a593Smuzhiyun
5129*4882a593Smuzhiyun memset(¶ms, 0, sizeof(params));
5130*4882a593Smuzhiyun params.opaque_fid = vf->opaque_fid;
5131*4882a593Smuzhiyun params.vport_id = vf->vport_id;
5132*4882a593Smuzhiyun
5133*4882a593Smuzhiyun params.update_ctl_frame_check = 1;
5134*4882a593Smuzhiyun params.mac_chk_en = !vf_info->is_trusted_configured;
5135*4882a593Smuzhiyun params.update_accept_any_vlan_flg = 0;
5136*4882a593Smuzhiyun
5137*4882a593Smuzhiyun if (vf_info->accept_any_vlan && vf_info->forced_vlan) {
5138*4882a593Smuzhiyun params.update_accept_any_vlan_flg = 1;
5139*4882a593Smuzhiyun params.accept_any_vlan = vf_info->accept_any_vlan;
5140*4882a593Smuzhiyun }
5141*4882a593Smuzhiyun
5142*4882a593Smuzhiyun if (vf_info->rx_accept_mode & mask) {
5143*4882a593Smuzhiyun flags->update_rx_mode_config = 1;
5144*4882a593Smuzhiyun flags->rx_accept_filter = vf_info->rx_accept_mode;
5145*4882a593Smuzhiyun }
5146*4882a593Smuzhiyun
5147*4882a593Smuzhiyun if (vf_info->tx_accept_mode & mask) {
5148*4882a593Smuzhiyun flags->update_tx_mode_config = 1;
5149*4882a593Smuzhiyun flags->tx_accept_filter = vf_info->tx_accept_mode;
5150*4882a593Smuzhiyun }
5151*4882a593Smuzhiyun
5152*4882a593Smuzhiyun /* Remove if needed; Otherwise this would set the mask */
5153*4882a593Smuzhiyun if (!vf_info->is_trusted_configured) {
5154*4882a593Smuzhiyun flags->rx_accept_filter &= ~mask;
5155*4882a593Smuzhiyun flags->tx_accept_filter &= ~mask;
5156*4882a593Smuzhiyun params.accept_any_vlan = false;
5157*4882a593Smuzhiyun }
5158*4882a593Smuzhiyun
5159*4882a593Smuzhiyun if (flags->update_rx_mode_config ||
5160*4882a593Smuzhiyun flags->update_tx_mode_config ||
5161*4882a593Smuzhiyun params.update_ctl_frame_check ||
5162*4882a593Smuzhiyun params.update_accept_any_vlan_flg) {
5163*4882a593Smuzhiyun DP_VERBOSE(hwfn, QED_MSG_IOV,
5164*4882a593Smuzhiyun "vport update config for %s VF[abs 0x%x rel 0x%x]\n",
5165*4882a593Smuzhiyun vf_info->is_trusted_configured ? "trusted" : "untrusted",
5166*4882a593Smuzhiyun vf->abs_vf_id, vf->relative_vf_id);
5167*4882a593Smuzhiyun qed_sp_vport_update(hwfn, ¶ms,
5168*4882a593Smuzhiyun QED_SPQ_MODE_EBLOCK, NULL);
5169*4882a593Smuzhiyun }
5170*4882a593Smuzhiyun }
5171*4882a593Smuzhiyun }
5172*4882a593Smuzhiyun
qed_iov_pf_task(struct work_struct * work)5173*4882a593Smuzhiyun static void qed_iov_pf_task(struct work_struct *work)
5174*4882a593Smuzhiyun
5175*4882a593Smuzhiyun {
5176*4882a593Smuzhiyun struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
5177*4882a593Smuzhiyun iov_task.work);
5178*4882a593Smuzhiyun int rc;
5179*4882a593Smuzhiyun
5180*4882a593Smuzhiyun if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
5181*4882a593Smuzhiyun return;
5182*4882a593Smuzhiyun
5183*4882a593Smuzhiyun if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
5184*4882a593Smuzhiyun struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
5185*4882a593Smuzhiyun
5186*4882a593Smuzhiyun if (!ptt) {
5187*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5188*4882a593Smuzhiyun return;
5189*4882a593Smuzhiyun }
5190*4882a593Smuzhiyun
5191*4882a593Smuzhiyun rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
5192*4882a593Smuzhiyun if (rc)
5193*4882a593Smuzhiyun qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5194*4882a593Smuzhiyun
5195*4882a593Smuzhiyun qed_ptt_release(hwfn, ptt);
5196*4882a593Smuzhiyun }
5197*4882a593Smuzhiyun
5198*4882a593Smuzhiyun if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
5199*4882a593Smuzhiyun qed_handle_vf_msg(hwfn);
5200*4882a593Smuzhiyun
5201*4882a593Smuzhiyun if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
5202*4882a593Smuzhiyun &hwfn->iov_task_flags))
5203*4882a593Smuzhiyun qed_handle_pf_set_vf_unicast(hwfn);
5204*4882a593Smuzhiyun
5205*4882a593Smuzhiyun if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
5206*4882a593Smuzhiyun &hwfn->iov_task_flags))
5207*4882a593Smuzhiyun qed_handle_bulletin_post(hwfn);
5208*4882a593Smuzhiyun
5209*4882a593Smuzhiyun if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
5210*4882a593Smuzhiyun qed_iov_handle_trust_change(hwfn);
5211*4882a593Smuzhiyun }
5212*4882a593Smuzhiyun
qed_iov_wq_stop(struct qed_dev * cdev,bool schedule_first)5213*4882a593Smuzhiyun void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
5214*4882a593Smuzhiyun {
5215*4882a593Smuzhiyun int i;
5216*4882a593Smuzhiyun
5217*4882a593Smuzhiyun for_each_hwfn(cdev, i) {
5218*4882a593Smuzhiyun if (!cdev->hwfns[i].iov_wq)
5219*4882a593Smuzhiyun continue;
5220*4882a593Smuzhiyun
5221*4882a593Smuzhiyun if (schedule_first) {
5222*4882a593Smuzhiyun qed_schedule_iov(&cdev->hwfns[i],
5223*4882a593Smuzhiyun QED_IOV_WQ_STOP_WQ_FLAG);
5224*4882a593Smuzhiyun cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
5225*4882a593Smuzhiyun }
5226*4882a593Smuzhiyun
5227*4882a593Smuzhiyun flush_workqueue(cdev->hwfns[i].iov_wq);
5228*4882a593Smuzhiyun destroy_workqueue(cdev->hwfns[i].iov_wq);
5229*4882a593Smuzhiyun }
5230*4882a593Smuzhiyun }
5231*4882a593Smuzhiyun
qed_iov_wq_start(struct qed_dev * cdev)5232*4882a593Smuzhiyun int qed_iov_wq_start(struct qed_dev *cdev)
5233*4882a593Smuzhiyun {
5234*4882a593Smuzhiyun char name[NAME_SIZE];
5235*4882a593Smuzhiyun int i;
5236*4882a593Smuzhiyun
5237*4882a593Smuzhiyun for_each_hwfn(cdev, i) {
5238*4882a593Smuzhiyun struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5239*4882a593Smuzhiyun
5240*4882a593Smuzhiyun /* PFs needs a dedicated workqueue only if they support IOV.
5241*4882a593Smuzhiyun * VFs always require one.
5242*4882a593Smuzhiyun */
5243*4882a593Smuzhiyun if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
5244*4882a593Smuzhiyun continue;
5245*4882a593Smuzhiyun
5246*4882a593Smuzhiyun snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
5247*4882a593Smuzhiyun cdev->pdev->bus->number,
5248*4882a593Smuzhiyun PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
5249*4882a593Smuzhiyun
5250*4882a593Smuzhiyun p_hwfn->iov_wq = create_singlethread_workqueue(name);
5251*4882a593Smuzhiyun if (!p_hwfn->iov_wq) {
5252*4882a593Smuzhiyun DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
5253*4882a593Smuzhiyun return -ENOMEM;
5254*4882a593Smuzhiyun }
5255*4882a593Smuzhiyun
5256*4882a593Smuzhiyun if (IS_PF(cdev))
5257*4882a593Smuzhiyun INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
5258*4882a593Smuzhiyun else
5259*4882a593Smuzhiyun INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
5260*4882a593Smuzhiyun }
5261*4882a593Smuzhiyun
5262*4882a593Smuzhiyun return 0;
5263*4882a593Smuzhiyun }
5264*4882a593Smuzhiyun
5265*4882a593Smuzhiyun const struct qed_iov_hv_ops qed_iov_ops_pass = {
5266*4882a593Smuzhiyun .configure = &qed_sriov_configure,
5267*4882a593Smuzhiyun .set_mac = &qed_sriov_pf_set_mac,
5268*4882a593Smuzhiyun .set_vlan = &qed_sriov_pf_set_vlan,
5269*4882a593Smuzhiyun .get_config = &qed_get_vf_config,
5270*4882a593Smuzhiyun .set_link_state = &qed_set_vf_link_state,
5271*4882a593Smuzhiyun .set_spoof = &qed_spoof_configure,
5272*4882a593Smuzhiyun .set_rate = &qed_set_vf_rate,
5273*4882a593Smuzhiyun .set_trust = &qed_set_vf_trust,
5274*4882a593Smuzhiyun };
5275