1*4882a593Smuzhiyun /* Broadcom NetXtreme-C/E network driver.
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright (c) 2014-2016 Broadcom Corporation
4*4882a593Smuzhiyun * Copyright (c) 2016-2018 Broadcom Limited
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
7*4882a593Smuzhiyun * it under the terms of the GNU General Public License as published by
8*4882a593Smuzhiyun * the Free Software Foundation.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/netdevice.h>
14*4882a593Smuzhiyun #include <linux/if_vlan.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/etherdevice.h>
17*4882a593Smuzhiyun #include "bnxt_hsi.h"
18*4882a593Smuzhiyun #include "bnxt.h"
19*4882a593Smuzhiyun #include "bnxt_ulp.h"
20*4882a593Smuzhiyun #include "bnxt_sriov.h"
21*4882a593Smuzhiyun #include "bnxt_vfr.h"
22*4882a593Smuzhiyun #include "bnxt_ethtool.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #ifdef CONFIG_BNXT_SRIOV
bnxt_hwrm_fwd_async_event_cmpl(struct bnxt * bp,struct bnxt_vf_info * vf,u16 event_id)25*4882a593Smuzhiyun static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
26*4882a593Smuzhiyun struct bnxt_vf_info *vf, u16 event_id)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun struct hwrm_fwd_async_event_cmpl_input req = {0};
29*4882a593Smuzhiyun struct hwrm_async_event_cmpl *async_cmpl;
30*4882a593Smuzhiyun int rc = 0;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
33*4882a593Smuzhiyun if (vf)
34*4882a593Smuzhiyun req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
35*4882a593Smuzhiyun else
36*4882a593Smuzhiyun /* broadcast this async event to all VFs */
37*4882a593Smuzhiyun req.encap_async_event_target_id = cpu_to_le16(0xffff);
38*4882a593Smuzhiyun async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
39*4882a593Smuzhiyun async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
40*4882a593Smuzhiyun async_cmpl->event_id = cpu_to_le16(event_id);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
43*4882a593Smuzhiyun if (rc)
44*4882a593Smuzhiyun netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
45*4882a593Smuzhiyun rc);
46*4882a593Smuzhiyun return rc;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
bnxt_vf_ndo_prep(struct bnxt * bp,int vf_id)49*4882a593Smuzhiyun static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
52*4882a593Smuzhiyun netdev_err(bp->dev, "vf ndo called though PF is down\n");
53*4882a593Smuzhiyun return -EINVAL;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun if (!bp->pf.active_vfs) {
56*4882a593Smuzhiyun netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
57*4882a593Smuzhiyun return -EINVAL;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun if (vf_id >= bp->pf.active_vfs) {
60*4882a593Smuzhiyun netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
61*4882a593Smuzhiyun return -EINVAL;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun return 0;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
bnxt_set_vf_spoofchk(struct net_device * dev,int vf_id,bool setting)66*4882a593Smuzhiyun int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun struct hwrm_func_cfg_input req = {0};
69*4882a593Smuzhiyun struct bnxt *bp = netdev_priv(dev);
70*4882a593Smuzhiyun struct bnxt_vf_info *vf;
71*4882a593Smuzhiyun bool old_setting = false;
72*4882a593Smuzhiyun u32 func_flags;
73*4882a593Smuzhiyun int rc;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun if (bp->hwrm_spec_code < 0x10701)
76*4882a593Smuzhiyun return -ENOTSUPP;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun rc = bnxt_vf_ndo_prep(bp, vf_id);
79*4882a593Smuzhiyun if (rc)
80*4882a593Smuzhiyun return rc;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun vf = &bp->pf.vf[vf_id];
83*4882a593Smuzhiyun if (vf->flags & BNXT_VF_SPOOFCHK)
84*4882a593Smuzhiyun old_setting = true;
85*4882a593Smuzhiyun if (old_setting == setting)
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (setting)
89*4882a593Smuzhiyun func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
90*4882a593Smuzhiyun else
91*4882a593Smuzhiyun func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
92*4882a593Smuzhiyun /*TODO: if the driver supports VLAN filter on guest VLAN,
93*4882a593Smuzhiyun * the spoof check should also include vlan anti-spoofing
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
96*4882a593Smuzhiyun req.fid = cpu_to_le16(vf->fw_fid);
97*4882a593Smuzhiyun req.flags = cpu_to_le32(func_flags);
98*4882a593Smuzhiyun rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
99*4882a593Smuzhiyun if (!rc) {
100*4882a593Smuzhiyun if (setting)
101*4882a593Smuzhiyun vf->flags |= BNXT_VF_SPOOFCHK;
102*4882a593Smuzhiyun else
103*4882a593Smuzhiyun vf->flags &= ~BNXT_VF_SPOOFCHK;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun return rc;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
bnxt_hwrm_func_qcfg_flags(struct bnxt * bp,struct bnxt_vf_info * vf)108*4882a593Smuzhiyun static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
111*4882a593Smuzhiyun struct hwrm_func_qcfg_input req = {0};
112*4882a593Smuzhiyun int rc;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
115*4882a593Smuzhiyun req.fid = cpu_to_le16(vf->fw_fid);
116*4882a593Smuzhiyun mutex_lock(&bp->hwrm_cmd_lock);
117*4882a593Smuzhiyun rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
118*4882a593Smuzhiyun if (rc) {
119*4882a593Smuzhiyun mutex_unlock(&bp->hwrm_cmd_lock);
120*4882a593Smuzhiyun return rc;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun vf->func_qcfg_flags = le16_to_cpu(resp->flags);
123*4882a593Smuzhiyun mutex_unlock(&bp->hwrm_cmd_lock);
124*4882a593Smuzhiyun return 0;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
bnxt_is_trusted_vf(struct bnxt * bp,struct bnxt_vf_info * vf)127*4882a593Smuzhiyun static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
130*4882a593Smuzhiyun return !!(vf->flags & BNXT_VF_TRUST);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun bnxt_hwrm_func_qcfg_flags(bp, vf);
133*4882a593Smuzhiyun return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
bnxt_hwrm_set_trusted_vf(struct bnxt * bp,struct bnxt_vf_info * vf)136*4882a593Smuzhiyun static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct hwrm_func_cfg_input req = {0};
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
141*4882a593Smuzhiyun return 0;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
144*4882a593Smuzhiyun req.fid = cpu_to_le16(vf->fw_fid);
145*4882a593Smuzhiyun if (vf->flags & BNXT_VF_TRUST)
146*4882a593Smuzhiyun req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
147*4882a593Smuzhiyun else
148*4882a593Smuzhiyun req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
149*4882a593Smuzhiyun return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
bnxt_set_vf_trust(struct net_device * dev,int vf_id,bool trusted)152*4882a593Smuzhiyun int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun struct bnxt *bp = netdev_priv(dev);
155*4882a593Smuzhiyun struct bnxt_vf_info *vf;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (bnxt_vf_ndo_prep(bp, vf_id))
158*4882a593Smuzhiyun return -EINVAL;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun vf = &bp->pf.vf[vf_id];
161*4882a593Smuzhiyun if (trusted)
162*4882a593Smuzhiyun vf->flags |= BNXT_VF_TRUST;
163*4882a593Smuzhiyun else
164*4882a593Smuzhiyun vf->flags &= ~BNXT_VF_TRUST;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun bnxt_hwrm_set_trusted_vf(bp, vf);
167*4882a593Smuzhiyun return 0;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
bnxt_get_vf_config(struct net_device * dev,int vf_id,struct ifla_vf_info * ivi)170*4882a593Smuzhiyun int bnxt_get_vf_config(struct net_device *dev, int vf_id,
171*4882a593Smuzhiyun struct ifla_vf_info *ivi)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun struct bnxt *bp = netdev_priv(dev);
174*4882a593Smuzhiyun struct bnxt_vf_info *vf;
175*4882a593Smuzhiyun int rc;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun rc = bnxt_vf_ndo_prep(bp, vf_id);
178*4882a593Smuzhiyun if (rc)
179*4882a593Smuzhiyun return rc;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun ivi->vf = vf_id;
182*4882a593Smuzhiyun vf = &bp->pf.vf[vf_id];
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (is_valid_ether_addr(vf->mac_addr))
185*4882a593Smuzhiyun memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
186*4882a593Smuzhiyun else
187*4882a593Smuzhiyun memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
188*4882a593Smuzhiyun ivi->max_tx_rate = vf->max_tx_rate;
189*4882a593Smuzhiyun ivi->min_tx_rate = vf->min_tx_rate;
190*4882a593Smuzhiyun ivi->vlan = vf->vlan;
191*4882a593Smuzhiyun if (vf->flags & BNXT_VF_QOS)
192*4882a593Smuzhiyun ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
193*4882a593Smuzhiyun else
194*4882a593Smuzhiyun ivi->qos = 0;
195*4882a593Smuzhiyun ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
196*4882a593Smuzhiyun ivi->trusted = bnxt_is_trusted_vf(bp, vf);
197*4882a593Smuzhiyun if (!(vf->flags & BNXT_VF_LINK_FORCED))
198*4882a593Smuzhiyun ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
199*4882a593Smuzhiyun else if (vf->flags & BNXT_VF_LINK_UP)
200*4882a593Smuzhiyun ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
201*4882a593Smuzhiyun else
202*4882a593Smuzhiyun ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun return 0;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
bnxt_set_vf_mac(struct net_device * dev,int vf_id,u8 * mac)207*4882a593Smuzhiyun int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun struct hwrm_func_cfg_input req = {0};
210*4882a593Smuzhiyun struct bnxt *bp = netdev_priv(dev);
211*4882a593Smuzhiyun struct bnxt_vf_info *vf;
212*4882a593Smuzhiyun int rc;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun rc = bnxt_vf_ndo_prep(bp, vf_id);
215*4882a593Smuzhiyun if (rc)
216*4882a593Smuzhiyun return rc;
217*4882a593Smuzhiyun /* reject bc or mc mac addr, zero mac addr means allow
218*4882a593Smuzhiyun * VF to use its own mac addr
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun if (is_multicast_ether_addr(mac)) {
221*4882a593Smuzhiyun netdev_err(dev, "Invalid VF ethernet address\n");
222*4882a593Smuzhiyun return -EINVAL;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun vf = &bp->pf.vf[vf_id];
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun memcpy(vf->mac_addr, mac, ETH_ALEN);
227*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
228*4882a593Smuzhiyun req.fid = cpu_to_le16(vf->fw_fid);
229*4882a593Smuzhiyun req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
230*4882a593Smuzhiyun memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
231*4882a593Smuzhiyun return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
bnxt_set_vf_vlan(struct net_device * dev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)234*4882a593Smuzhiyun int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
235*4882a593Smuzhiyun __be16 vlan_proto)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct hwrm_func_cfg_input req = {0};
238*4882a593Smuzhiyun struct bnxt *bp = netdev_priv(dev);
239*4882a593Smuzhiyun struct bnxt_vf_info *vf;
240*4882a593Smuzhiyun u16 vlan_tag;
241*4882a593Smuzhiyun int rc;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (bp->hwrm_spec_code < 0x10201)
244*4882a593Smuzhiyun return -ENOTSUPP;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (vlan_proto != htons(ETH_P_8021Q))
247*4882a593Smuzhiyun return -EPROTONOSUPPORT;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun rc = bnxt_vf_ndo_prep(bp, vf_id);
250*4882a593Smuzhiyun if (rc)
251*4882a593Smuzhiyun return rc;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* TODO: needed to implement proper handling of user priority,
254*4882a593Smuzhiyun * currently fail the command if there is valid priority
255*4882a593Smuzhiyun */
256*4882a593Smuzhiyun if (vlan_id > 4095 || qos)
257*4882a593Smuzhiyun return -EINVAL;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun vf = &bp->pf.vf[vf_id];
260*4882a593Smuzhiyun vlan_tag = vlan_id;
261*4882a593Smuzhiyun if (vlan_tag == vf->vlan)
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
265*4882a593Smuzhiyun req.fid = cpu_to_le16(vf->fw_fid);
266*4882a593Smuzhiyun req.dflt_vlan = cpu_to_le16(vlan_tag);
267*4882a593Smuzhiyun req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
268*4882a593Smuzhiyun rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
269*4882a593Smuzhiyun if (!rc)
270*4882a593Smuzhiyun vf->vlan = vlan_tag;
271*4882a593Smuzhiyun return rc;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
bnxt_set_vf_bw(struct net_device * dev,int vf_id,int min_tx_rate,int max_tx_rate)274*4882a593Smuzhiyun int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
275*4882a593Smuzhiyun int max_tx_rate)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct hwrm_func_cfg_input req = {0};
278*4882a593Smuzhiyun struct bnxt *bp = netdev_priv(dev);
279*4882a593Smuzhiyun struct bnxt_vf_info *vf;
280*4882a593Smuzhiyun u32 pf_link_speed;
281*4882a593Smuzhiyun int rc;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun rc = bnxt_vf_ndo_prep(bp, vf_id);
284*4882a593Smuzhiyun if (rc)
285*4882a593Smuzhiyun return rc;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun vf = &bp->pf.vf[vf_id];
288*4882a593Smuzhiyun pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
289*4882a593Smuzhiyun if (max_tx_rate > pf_link_speed) {
290*4882a593Smuzhiyun netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
291*4882a593Smuzhiyun max_tx_rate, vf_id);
292*4882a593Smuzhiyun return -EINVAL;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
296*4882a593Smuzhiyun netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
297*4882a593Smuzhiyun min_tx_rate, vf_id);
298*4882a593Smuzhiyun return -EINVAL;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
301*4882a593Smuzhiyun return 0;
302*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
303*4882a593Smuzhiyun req.fid = cpu_to_le16(vf->fw_fid);
304*4882a593Smuzhiyun req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
305*4882a593Smuzhiyun req.max_bw = cpu_to_le32(max_tx_rate);
306*4882a593Smuzhiyun req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
307*4882a593Smuzhiyun req.min_bw = cpu_to_le32(min_tx_rate);
308*4882a593Smuzhiyun rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
309*4882a593Smuzhiyun if (!rc) {
310*4882a593Smuzhiyun vf->min_tx_rate = min_tx_rate;
311*4882a593Smuzhiyun vf->max_tx_rate = max_tx_rate;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun return rc;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
bnxt_set_vf_link_state(struct net_device * dev,int vf_id,int link)316*4882a593Smuzhiyun int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun struct bnxt *bp = netdev_priv(dev);
319*4882a593Smuzhiyun struct bnxt_vf_info *vf;
320*4882a593Smuzhiyun int rc;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun rc = bnxt_vf_ndo_prep(bp, vf_id);
323*4882a593Smuzhiyun if (rc)
324*4882a593Smuzhiyun return rc;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun vf = &bp->pf.vf[vf_id];
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
329*4882a593Smuzhiyun switch (link) {
330*4882a593Smuzhiyun case IFLA_VF_LINK_STATE_AUTO:
331*4882a593Smuzhiyun vf->flags |= BNXT_VF_LINK_UP;
332*4882a593Smuzhiyun break;
333*4882a593Smuzhiyun case IFLA_VF_LINK_STATE_DISABLE:
334*4882a593Smuzhiyun vf->flags |= BNXT_VF_LINK_FORCED;
335*4882a593Smuzhiyun break;
336*4882a593Smuzhiyun case IFLA_VF_LINK_STATE_ENABLE:
337*4882a593Smuzhiyun vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
338*4882a593Smuzhiyun break;
339*4882a593Smuzhiyun default:
340*4882a593Smuzhiyun netdev_err(bp->dev, "Invalid link option\n");
341*4882a593Smuzhiyun rc = -EINVAL;
342*4882a593Smuzhiyun break;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
345*4882a593Smuzhiyun rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
346*4882a593Smuzhiyun ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
347*4882a593Smuzhiyun return rc;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
bnxt_set_vf_attr(struct bnxt * bp,int num_vfs)350*4882a593Smuzhiyun static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun int i;
353*4882a593Smuzhiyun struct bnxt_vf_info *vf;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun for (i = 0; i < num_vfs; i++) {
356*4882a593Smuzhiyun vf = &bp->pf.vf[i];
357*4882a593Smuzhiyun memset(vf, 0, sizeof(*vf));
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun return 0;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
bnxt_hwrm_func_vf_resource_free(struct bnxt * bp,int num_vfs)362*4882a593Smuzhiyun static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun int i, rc = 0;
365*4882a593Smuzhiyun struct bnxt_pf_info *pf = &bp->pf;
366*4882a593Smuzhiyun struct hwrm_func_vf_resc_free_input req = {0};
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun mutex_lock(&bp->hwrm_cmd_lock);
371*4882a593Smuzhiyun for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
372*4882a593Smuzhiyun req.vf_id = cpu_to_le16(i);
373*4882a593Smuzhiyun rc = _hwrm_send_message(bp, &req, sizeof(req),
374*4882a593Smuzhiyun HWRM_CMD_TIMEOUT);
375*4882a593Smuzhiyun if (rc)
376*4882a593Smuzhiyun break;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun mutex_unlock(&bp->hwrm_cmd_lock);
379*4882a593Smuzhiyun return rc;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
bnxt_free_vf_resources(struct bnxt * bp)382*4882a593Smuzhiyun static void bnxt_free_vf_resources(struct bnxt *bp)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct pci_dev *pdev = bp->pdev;
385*4882a593Smuzhiyun int i;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun kfree(bp->pf.vf_event_bmap);
388*4882a593Smuzhiyun bp->pf.vf_event_bmap = NULL;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
391*4882a593Smuzhiyun if (bp->pf.hwrm_cmd_req_addr[i]) {
392*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
393*4882a593Smuzhiyun bp->pf.hwrm_cmd_req_addr[i],
394*4882a593Smuzhiyun bp->pf.hwrm_cmd_req_dma_addr[i]);
395*4882a593Smuzhiyun bp->pf.hwrm_cmd_req_addr[i] = NULL;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun bp->pf.active_vfs = 0;
400*4882a593Smuzhiyun kfree(bp->pf.vf);
401*4882a593Smuzhiyun bp->pf.vf = NULL;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
bnxt_alloc_vf_resources(struct bnxt * bp,int num_vfs)404*4882a593Smuzhiyun static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun struct pci_dev *pdev = bp->pdev;
407*4882a593Smuzhiyun u32 nr_pages, size, i, j, k = 0;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
410*4882a593Smuzhiyun if (!bp->pf.vf)
411*4882a593Smuzhiyun return -ENOMEM;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun bnxt_set_vf_attr(bp, num_vfs);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
416*4882a593Smuzhiyun nr_pages = size / BNXT_PAGE_SIZE;
417*4882a593Smuzhiyun if (size & (BNXT_PAGE_SIZE - 1))
418*4882a593Smuzhiyun nr_pages++;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
421*4882a593Smuzhiyun bp->pf.hwrm_cmd_req_addr[i] =
422*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
423*4882a593Smuzhiyun &bp->pf.hwrm_cmd_req_dma_addr[i],
424*4882a593Smuzhiyun GFP_KERNEL);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (!bp->pf.hwrm_cmd_req_addr[i])
427*4882a593Smuzhiyun return -ENOMEM;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
430*4882a593Smuzhiyun struct bnxt_vf_info *vf = &bp->pf.vf[k];
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
433*4882a593Smuzhiyun j * BNXT_HWRM_REQ_MAX_SIZE;
434*4882a593Smuzhiyun vf->hwrm_cmd_req_dma_addr =
435*4882a593Smuzhiyun bp->pf.hwrm_cmd_req_dma_addr[i] + j *
436*4882a593Smuzhiyun BNXT_HWRM_REQ_MAX_SIZE;
437*4882a593Smuzhiyun k++;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /* Max 128 VF's */
442*4882a593Smuzhiyun bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
443*4882a593Smuzhiyun if (!bp->pf.vf_event_bmap)
444*4882a593Smuzhiyun return -ENOMEM;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun bp->pf.hwrm_cmd_req_pages = nr_pages;
447*4882a593Smuzhiyun return 0;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
bnxt_hwrm_func_buf_rgtr(struct bnxt * bp)450*4882a593Smuzhiyun static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun struct hwrm_func_buf_rgtr_input req = {0};
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
457*4882a593Smuzhiyun req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
458*4882a593Smuzhiyun req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
459*4882a593Smuzhiyun req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
460*4882a593Smuzhiyun req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
461*4882a593Smuzhiyun req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
462*4882a593Smuzhiyun req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* Caller holds bp->hwrm_cmd_lock mutex lock */
__bnxt_set_vf_params(struct bnxt * bp,int vf_id)468*4882a593Smuzhiyun static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun struct hwrm_func_cfg_input req = {0};
471*4882a593Smuzhiyun struct bnxt_vf_info *vf;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun vf = &bp->pf.vf[vf_id];
474*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
475*4882a593Smuzhiyun req.fid = cpu_to_le16(vf->fw_fid);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun if (is_valid_ether_addr(vf->mac_addr)) {
478*4882a593Smuzhiyun req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
479*4882a593Smuzhiyun memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun if (vf->vlan) {
482*4882a593Smuzhiyun req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
483*4882a593Smuzhiyun req.dflt_vlan = cpu_to_le16(vf->vlan);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun if (vf->max_tx_rate) {
486*4882a593Smuzhiyun req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
487*4882a593Smuzhiyun req.max_bw = cpu_to_le32(vf->max_tx_rate);
488*4882a593Smuzhiyun #ifdef HAVE_IFLA_TX_RATE
489*4882a593Smuzhiyun req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
490*4882a593Smuzhiyun req.min_bw = cpu_to_le32(vf->min_tx_rate);
491*4882a593Smuzhiyun #endif
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun if (vf->flags & BNXT_VF_TRUST)
494*4882a593Smuzhiyun req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /* Only called by PF to reserve resources for VFs, returns actual number of
500*4882a593Smuzhiyun * VFs configured, or < 0 on error.
501*4882a593Smuzhiyun */
bnxt_hwrm_func_vf_resc_cfg(struct bnxt * bp,int num_vfs,bool reset)502*4882a593Smuzhiyun static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun struct hwrm_func_vf_resource_cfg_input req = {0};
505*4882a593Smuzhiyun struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
506*4882a593Smuzhiyun u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
507*4882a593Smuzhiyun u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
508*4882a593Smuzhiyun struct bnxt_pf_info *pf = &bp->pf;
509*4882a593Smuzhiyun int i, rc = 0, min = 1;
510*4882a593Smuzhiyun u16 vf_msix = 0;
511*4882a593Smuzhiyun u16 vf_rss;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun if (bp->flags & BNXT_FLAG_CHIP_P5) {
516*4882a593Smuzhiyun vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
517*4882a593Smuzhiyun vf_ring_grps = 0;
518*4882a593Smuzhiyun } else {
519*4882a593Smuzhiyun vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
522*4882a593Smuzhiyun vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
523*4882a593Smuzhiyun if (bp->flags & BNXT_FLAG_AGG_RINGS)
524*4882a593Smuzhiyun vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
525*4882a593Smuzhiyun else
526*4882a593Smuzhiyun vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
527*4882a593Smuzhiyun vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
528*4882a593Smuzhiyun vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
529*4882a593Smuzhiyun vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
530*4882a593Smuzhiyun vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
533*4882a593Smuzhiyun if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
534*4882a593Smuzhiyun min = 0;
535*4882a593Smuzhiyun req.min_rsscos_ctx = cpu_to_le16(min);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
538*4882a593Smuzhiyun pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
539*4882a593Smuzhiyun req.min_cmpl_rings = cpu_to_le16(min);
540*4882a593Smuzhiyun req.min_tx_rings = cpu_to_le16(min);
541*4882a593Smuzhiyun req.min_rx_rings = cpu_to_le16(min);
542*4882a593Smuzhiyun req.min_l2_ctxs = cpu_to_le16(min);
543*4882a593Smuzhiyun req.min_vnics = cpu_to_le16(min);
544*4882a593Smuzhiyun req.min_stat_ctx = cpu_to_le16(min);
545*4882a593Smuzhiyun if (!(bp->flags & BNXT_FLAG_CHIP_P5))
546*4882a593Smuzhiyun req.min_hw_ring_grps = cpu_to_le16(min);
547*4882a593Smuzhiyun } else {
548*4882a593Smuzhiyun vf_cp_rings /= num_vfs;
549*4882a593Smuzhiyun vf_tx_rings /= num_vfs;
550*4882a593Smuzhiyun vf_rx_rings /= num_vfs;
551*4882a593Smuzhiyun vf_vnics /= num_vfs;
552*4882a593Smuzhiyun vf_stat_ctx /= num_vfs;
553*4882a593Smuzhiyun vf_ring_grps /= num_vfs;
554*4882a593Smuzhiyun vf_rss /= num_vfs;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
557*4882a593Smuzhiyun req.min_tx_rings = cpu_to_le16(vf_tx_rings);
558*4882a593Smuzhiyun req.min_rx_rings = cpu_to_le16(vf_rx_rings);
559*4882a593Smuzhiyun req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
560*4882a593Smuzhiyun req.min_vnics = cpu_to_le16(vf_vnics);
561*4882a593Smuzhiyun req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
562*4882a593Smuzhiyun req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
563*4882a593Smuzhiyun req.min_rsscos_ctx = cpu_to_le16(vf_rss);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
566*4882a593Smuzhiyun req.max_tx_rings = cpu_to_le16(vf_tx_rings);
567*4882a593Smuzhiyun req.max_rx_rings = cpu_to_le16(vf_rx_rings);
568*4882a593Smuzhiyun req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
569*4882a593Smuzhiyun req.max_vnics = cpu_to_le16(vf_vnics);
570*4882a593Smuzhiyun req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
571*4882a593Smuzhiyun req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
572*4882a593Smuzhiyun req.max_rsscos_ctx = cpu_to_le16(vf_rss);
573*4882a593Smuzhiyun if (bp->flags & BNXT_FLAG_CHIP_P5)
574*4882a593Smuzhiyun req.max_msix = cpu_to_le16(vf_msix / num_vfs);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun mutex_lock(&bp->hwrm_cmd_lock);
577*4882a593Smuzhiyun for (i = 0; i < num_vfs; i++) {
578*4882a593Smuzhiyun if (reset)
579*4882a593Smuzhiyun __bnxt_set_vf_params(bp, i);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun req.vf_id = cpu_to_le16(pf->first_vf_id + i);
582*4882a593Smuzhiyun rc = _hwrm_send_message(bp, &req, sizeof(req),
583*4882a593Smuzhiyun HWRM_CMD_TIMEOUT);
584*4882a593Smuzhiyun if (rc)
585*4882a593Smuzhiyun break;
586*4882a593Smuzhiyun pf->active_vfs = i + 1;
587*4882a593Smuzhiyun pf->vf[i].fw_fid = pf->first_vf_id + i;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun mutex_unlock(&bp->hwrm_cmd_lock);
590*4882a593Smuzhiyun if (pf->active_vfs) {
591*4882a593Smuzhiyun u16 n = pf->active_vfs;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
594*4882a593Smuzhiyun hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
595*4882a593Smuzhiyun hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
596*4882a593Smuzhiyun n;
597*4882a593Smuzhiyun hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
598*4882a593Smuzhiyun hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n;
599*4882a593Smuzhiyun hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
600*4882a593Smuzhiyun hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
601*4882a593Smuzhiyun if (bp->flags & BNXT_FLAG_CHIP_P5)
602*4882a593Smuzhiyun hw_resc->max_nqs -= vf_msix;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun rc = pf->active_vfs;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun return rc;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /* Only called by PF to reserve resources for VFs, returns actual number of
610*4882a593Smuzhiyun * VFs configured, or < 0 on error.
611*4882a593Smuzhiyun */
bnxt_hwrm_func_cfg(struct bnxt * bp,int num_vfs)612*4882a593Smuzhiyun static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun u32 rc = 0, mtu, i;
615*4882a593Smuzhiyun u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
616*4882a593Smuzhiyun struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
617*4882a593Smuzhiyun struct hwrm_func_cfg_input req = {0};
618*4882a593Smuzhiyun struct bnxt_pf_info *pf = &bp->pf;
619*4882a593Smuzhiyun int total_vf_tx_rings = 0;
620*4882a593Smuzhiyun u16 vf_ring_grps;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /* Remaining rings are distributed equally amongs VF's for now */
625*4882a593Smuzhiyun vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
626*4882a593Smuzhiyun vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
627*4882a593Smuzhiyun if (bp->flags & BNXT_FLAG_AGG_RINGS)
628*4882a593Smuzhiyun vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
629*4882a593Smuzhiyun num_vfs;
630*4882a593Smuzhiyun else
631*4882a593Smuzhiyun vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
632*4882a593Smuzhiyun num_vfs;
633*4882a593Smuzhiyun vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
634*4882a593Smuzhiyun vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
635*4882a593Smuzhiyun vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
636*4882a593Smuzhiyun vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
639*4882a593Smuzhiyun FUNC_CFG_REQ_ENABLES_MRU |
640*4882a593Smuzhiyun FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
641*4882a593Smuzhiyun FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
642*4882a593Smuzhiyun FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
643*4882a593Smuzhiyun FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
644*4882a593Smuzhiyun FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
645*4882a593Smuzhiyun FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
646*4882a593Smuzhiyun FUNC_CFG_REQ_ENABLES_NUM_VNICS |
647*4882a593Smuzhiyun FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
650*4882a593Smuzhiyun req.mru = cpu_to_le16(mtu);
651*4882a593Smuzhiyun req.mtu = cpu_to_le16(mtu);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun req.num_rsscos_ctxs = cpu_to_le16(1);
654*4882a593Smuzhiyun req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
655*4882a593Smuzhiyun req.num_tx_rings = cpu_to_le16(vf_tx_rings);
656*4882a593Smuzhiyun req.num_rx_rings = cpu_to_le16(vf_rx_rings);
657*4882a593Smuzhiyun req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
658*4882a593Smuzhiyun req.num_l2_ctxs = cpu_to_le16(4);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun req.num_vnics = cpu_to_le16(vf_vnics);
661*4882a593Smuzhiyun /* FIXME spec currently uses 1 bit for stats ctx */
662*4882a593Smuzhiyun req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun mutex_lock(&bp->hwrm_cmd_lock);
665*4882a593Smuzhiyun for (i = 0; i < num_vfs; i++) {
666*4882a593Smuzhiyun int vf_tx_rsvd = vf_tx_rings;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun req.fid = cpu_to_le16(pf->first_vf_id + i);
669*4882a593Smuzhiyun rc = _hwrm_send_message(bp, &req, sizeof(req),
670*4882a593Smuzhiyun HWRM_CMD_TIMEOUT);
671*4882a593Smuzhiyun if (rc)
672*4882a593Smuzhiyun break;
673*4882a593Smuzhiyun pf->active_vfs = i + 1;
674*4882a593Smuzhiyun pf->vf[i].fw_fid = le16_to_cpu(req.fid);
675*4882a593Smuzhiyun rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
676*4882a593Smuzhiyun &vf_tx_rsvd);
677*4882a593Smuzhiyun if (rc)
678*4882a593Smuzhiyun break;
679*4882a593Smuzhiyun total_vf_tx_rings += vf_tx_rsvd;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun mutex_unlock(&bp->hwrm_cmd_lock);
682*4882a593Smuzhiyun if (pf->active_vfs) {
683*4882a593Smuzhiyun hw_resc->max_tx_rings -= total_vf_tx_rings;
684*4882a593Smuzhiyun hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
685*4882a593Smuzhiyun hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
686*4882a593Smuzhiyun hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
687*4882a593Smuzhiyun hw_resc->max_rsscos_ctxs -= num_vfs;
688*4882a593Smuzhiyun hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
689*4882a593Smuzhiyun hw_resc->max_vnics -= vf_vnics * num_vfs;
690*4882a593Smuzhiyun rc = pf->active_vfs;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun return rc;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
bnxt_func_cfg(struct bnxt * bp,int num_vfs,bool reset)695*4882a593Smuzhiyun static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun if (BNXT_NEW_RM(bp))
698*4882a593Smuzhiyun return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
699*4882a593Smuzhiyun else
700*4882a593Smuzhiyun return bnxt_hwrm_func_cfg(bp, num_vfs);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
bnxt_cfg_hw_sriov(struct bnxt * bp,int * num_vfs,bool reset)703*4882a593Smuzhiyun int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun int rc;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /* Register buffers for VFs */
708*4882a593Smuzhiyun rc = bnxt_hwrm_func_buf_rgtr(bp);
709*4882a593Smuzhiyun if (rc)
710*4882a593Smuzhiyun return rc;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /* Reserve resources for VFs */
713*4882a593Smuzhiyun rc = bnxt_func_cfg(bp, *num_vfs, reset);
714*4882a593Smuzhiyun if (rc != *num_vfs) {
715*4882a593Smuzhiyun if (rc <= 0) {
716*4882a593Smuzhiyun netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
717*4882a593Smuzhiyun *num_vfs = 0;
718*4882a593Smuzhiyun return rc;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
721*4882a593Smuzhiyun rc);
722*4882a593Smuzhiyun *num_vfs = rc;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun bnxt_ulp_sriov_cfg(bp, *num_vfs);
726*4882a593Smuzhiyun return 0;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
bnxt_sriov_enable(struct bnxt * bp,int * num_vfs)729*4882a593Smuzhiyun static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun int rc = 0, vfs_supported;
732*4882a593Smuzhiyun int min_rx_rings, min_tx_rings, min_rss_ctxs;
733*4882a593Smuzhiyun struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
734*4882a593Smuzhiyun int tx_ok = 0, rx_ok = 0, rss_ok = 0;
735*4882a593Smuzhiyun int avail_cp, avail_stat;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /* Check if we can enable requested num of vf's. At a mininum
738*4882a593Smuzhiyun * we require 1 RX 1 TX rings for each VF. In this minimum conf
739*4882a593Smuzhiyun * features like TPA will not be available.
740*4882a593Smuzhiyun */
741*4882a593Smuzhiyun vfs_supported = *num_vfs;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
744*4882a593Smuzhiyun avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
745*4882a593Smuzhiyun avail_cp = min_t(int, avail_cp, avail_stat);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun while (vfs_supported) {
748*4882a593Smuzhiyun min_rx_rings = vfs_supported;
749*4882a593Smuzhiyun min_tx_rings = vfs_supported;
750*4882a593Smuzhiyun min_rss_ctxs = vfs_supported;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun if (bp->flags & BNXT_FLAG_AGG_RINGS) {
753*4882a593Smuzhiyun if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
754*4882a593Smuzhiyun min_rx_rings)
755*4882a593Smuzhiyun rx_ok = 1;
756*4882a593Smuzhiyun } else {
757*4882a593Smuzhiyun if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
758*4882a593Smuzhiyun min_rx_rings)
759*4882a593Smuzhiyun rx_ok = 1;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
762*4882a593Smuzhiyun avail_cp < min_rx_rings)
763*4882a593Smuzhiyun rx_ok = 0;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
766*4882a593Smuzhiyun avail_cp >= min_tx_rings)
767*4882a593Smuzhiyun tx_ok = 1;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
770*4882a593Smuzhiyun min_rss_ctxs)
771*4882a593Smuzhiyun rss_ok = 1;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun if (tx_ok && rx_ok && rss_ok)
774*4882a593Smuzhiyun break;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun vfs_supported--;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (!vfs_supported) {
780*4882a593Smuzhiyun netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
781*4882a593Smuzhiyun return -EINVAL;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun if (vfs_supported != *num_vfs) {
785*4882a593Smuzhiyun netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
786*4882a593Smuzhiyun *num_vfs, vfs_supported);
787*4882a593Smuzhiyun *num_vfs = vfs_supported;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun rc = bnxt_alloc_vf_resources(bp, *num_vfs);
791*4882a593Smuzhiyun if (rc)
792*4882a593Smuzhiyun goto err_out1;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
795*4882a593Smuzhiyun if (rc)
796*4882a593Smuzhiyun goto err_out2;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun rc = pci_enable_sriov(bp->pdev, *num_vfs);
799*4882a593Smuzhiyun if (rc)
800*4882a593Smuzhiyun goto err_out2;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun return 0;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun err_out2:
805*4882a593Smuzhiyun /* Free the resources reserved for various VF's */
806*4882a593Smuzhiyun bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun err_out1:
809*4882a593Smuzhiyun bnxt_free_vf_resources(bp);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun return rc;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
bnxt_sriov_disable(struct bnxt * bp)814*4882a593Smuzhiyun void bnxt_sriov_disable(struct bnxt *bp)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun u16 num_vfs = pci_num_vf(bp->pdev);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun if (!num_vfs)
819*4882a593Smuzhiyun return;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /* synchronize VF and VF-rep create and destroy */
822*4882a593Smuzhiyun mutex_lock(&bp->sriov_lock);
823*4882a593Smuzhiyun bnxt_vf_reps_destroy(bp);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun if (pci_vfs_assigned(bp->pdev)) {
826*4882a593Smuzhiyun bnxt_hwrm_fwd_async_event_cmpl(
827*4882a593Smuzhiyun bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
828*4882a593Smuzhiyun netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
829*4882a593Smuzhiyun num_vfs);
830*4882a593Smuzhiyun } else {
831*4882a593Smuzhiyun pci_disable_sriov(bp->pdev);
832*4882a593Smuzhiyun /* Free the HW resources reserved for various VF's */
833*4882a593Smuzhiyun bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun mutex_unlock(&bp->sriov_lock);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun bnxt_free_vf_resources(bp);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun /* Reclaim all resources for the PF. */
840*4882a593Smuzhiyun rtnl_lock();
841*4882a593Smuzhiyun bnxt_restore_pf_fw_resources(bp);
842*4882a593Smuzhiyun rtnl_unlock();
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun bnxt_ulp_sriov_cfg(bp, 0);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
bnxt_sriov_configure(struct pci_dev * pdev,int num_vfs)847*4882a593Smuzhiyun int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
850*4882a593Smuzhiyun struct bnxt *bp = netdev_priv(dev);
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
853*4882a593Smuzhiyun netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
854*4882a593Smuzhiyun return 0;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun rtnl_lock();
858*4882a593Smuzhiyun if (!netif_running(dev)) {
859*4882a593Smuzhiyun netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
860*4882a593Smuzhiyun rtnl_unlock();
861*4882a593Smuzhiyun return 0;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
864*4882a593Smuzhiyun netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
865*4882a593Smuzhiyun rtnl_unlock();
866*4882a593Smuzhiyun return 0;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun bp->sriov_cfg = true;
869*4882a593Smuzhiyun rtnl_unlock();
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (pci_vfs_assigned(bp->pdev)) {
872*4882a593Smuzhiyun netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
873*4882a593Smuzhiyun num_vfs = 0;
874*4882a593Smuzhiyun goto sriov_cfg_exit;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /* Check if enabled VFs is same as requested */
878*4882a593Smuzhiyun if (num_vfs && num_vfs == bp->pf.active_vfs)
879*4882a593Smuzhiyun goto sriov_cfg_exit;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun /* if there are previous existing VFs, clean them up */
882*4882a593Smuzhiyun bnxt_sriov_disable(bp);
883*4882a593Smuzhiyun if (!num_vfs)
884*4882a593Smuzhiyun goto sriov_cfg_exit;
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun bnxt_sriov_enable(bp, &num_vfs);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun sriov_cfg_exit:
889*4882a593Smuzhiyun bp->sriov_cfg = false;
890*4882a593Smuzhiyun wake_up(&bp->sriov_cfg_wait);
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun return num_vfs;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
bnxt_hwrm_fwd_resp(struct bnxt * bp,struct bnxt_vf_info * vf,void * encap_resp,__le64 encap_resp_addr,__le16 encap_resp_cpr,u32 msg_size)895*4882a593Smuzhiyun static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
896*4882a593Smuzhiyun void *encap_resp, __le64 encap_resp_addr,
897*4882a593Smuzhiyun __le16 encap_resp_cpr, u32 msg_size)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun int rc = 0;
900*4882a593Smuzhiyun struct hwrm_fwd_resp_input req = {0};
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
903*4882a593Smuzhiyun return -EINVAL;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /* Set the new target id */
908*4882a593Smuzhiyun req.target_id = cpu_to_le16(vf->fw_fid);
909*4882a593Smuzhiyun req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
910*4882a593Smuzhiyun req.encap_resp_len = cpu_to_le16(msg_size);
911*4882a593Smuzhiyun req.encap_resp_addr = encap_resp_addr;
912*4882a593Smuzhiyun req.encap_resp_cmpl_ring = encap_resp_cpr;
913*4882a593Smuzhiyun memcpy(req.encap_resp, encap_resp, msg_size);
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
916*4882a593Smuzhiyun if (rc)
917*4882a593Smuzhiyun netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
918*4882a593Smuzhiyun return rc;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
bnxt_hwrm_fwd_err_resp(struct bnxt * bp,struct bnxt_vf_info * vf,u32 msg_size)921*4882a593Smuzhiyun static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
922*4882a593Smuzhiyun u32 msg_size)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun int rc = 0;
925*4882a593Smuzhiyun struct hwrm_reject_fwd_resp_input req = {0};
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
928*4882a593Smuzhiyun return -EINVAL;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
931*4882a593Smuzhiyun /* Set the new target id */
932*4882a593Smuzhiyun req.target_id = cpu_to_le16(vf->fw_fid);
933*4882a593Smuzhiyun req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
934*4882a593Smuzhiyun memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
937*4882a593Smuzhiyun if (rc)
938*4882a593Smuzhiyun netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
939*4882a593Smuzhiyun return rc;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
bnxt_hwrm_exec_fwd_resp(struct bnxt * bp,struct bnxt_vf_info * vf,u32 msg_size)942*4882a593Smuzhiyun static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
943*4882a593Smuzhiyun u32 msg_size)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun int rc = 0;
946*4882a593Smuzhiyun struct hwrm_exec_fwd_resp_input req = {0};
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
949*4882a593Smuzhiyun return -EINVAL;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
952*4882a593Smuzhiyun /* Set the new target id */
953*4882a593Smuzhiyun req.target_id = cpu_to_le16(vf->fw_fid);
954*4882a593Smuzhiyun req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
955*4882a593Smuzhiyun memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
958*4882a593Smuzhiyun if (rc)
959*4882a593Smuzhiyun netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
960*4882a593Smuzhiyun return rc;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
bnxt_vf_configure_mac(struct bnxt * bp,struct bnxt_vf_info * vf)963*4882a593Smuzhiyun static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
966*4882a593Smuzhiyun struct hwrm_func_vf_cfg_input *req =
967*4882a593Smuzhiyun (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun /* Allow VF to set a valid MAC address, if trust is set to on or
970*4882a593Smuzhiyun * if the PF assigned MAC address is zero
971*4882a593Smuzhiyun */
972*4882a593Smuzhiyun if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
973*4882a593Smuzhiyun bool trust = bnxt_is_trusted_vf(bp, vf);
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun if (is_valid_ether_addr(req->dflt_mac_addr) &&
976*4882a593Smuzhiyun (trust || !is_valid_ether_addr(vf->mac_addr) ||
977*4882a593Smuzhiyun ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
978*4882a593Smuzhiyun ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
979*4882a593Smuzhiyun return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
bnxt_vf_validate_set_mac(struct bnxt * bp,struct bnxt_vf_info * vf)986*4882a593Smuzhiyun static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
989*4882a593Smuzhiyun struct hwrm_cfa_l2_filter_alloc_input *req =
990*4882a593Smuzhiyun (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
991*4882a593Smuzhiyun bool mac_ok = false;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun if (!is_valid_ether_addr((const u8 *)req->l2_addr))
994*4882a593Smuzhiyun return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun /* Allow VF to set a valid MAC address, if trust is set to on.
997*4882a593Smuzhiyun * Or VF MAC address must first match MAC address in PF's context.
998*4882a593Smuzhiyun * Otherwise, it must match the VF MAC address if firmware spec >=
999*4882a593Smuzhiyun * 1.2.2
1000*4882a593Smuzhiyun */
1001*4882a593Smuzhiyun if (bnxt_is_trusted_vf(bp, vf)) {
1002*4882a593Smuzhiyun mac_ok = true;
1003*4882a593Smuzhiyun } else if (is_valid_ether_addr(vf->mac_addr)) {
1004*4882a593Smuzhiyun if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
1005*4882a593Smuzhiyun mac_ok = true;
1006*4882a593Smuzhiyun } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
1007*4882a593Smuzhiyun if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
1008*4882a593Smuzhiyun mac_ok = true;
1009*4882a593Smuzhiyun } else {
1010*4882a593Smuzhiyun /* There are two cases:
1011*4882a593Smuzhiyun * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1012*4882a593Smuzhiyun * to the PF and so it doesn't have to match
1013*4882a593Smuzhiyun * 2.Allow VF to modify it's own MAC when PF has not assigned a
1014*4882a593Smuzhiyun * valid MAC address and firmware spec >= 0x10202
1015*4882a593Smuzhiyun */
1016*4882a593Smuzhiyun mac_ok = true;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun if (mac_ok)
1019*4882a593Smuzhiyun return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1020*4882a593Smuzhiyun return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun
bnxt_vf_set_link(struct bnxt * bp,struct bnxt_vf_info * vf)1023*4882a593Smuzhiyun static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1024*4882a593Smuzhiyun {
1025*4882a593Smuzhiyun int rc = 0;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
1028*4882a593Smuzhiyun /* real link */
1029*4882a593Smuzhiyun rc = bnxt_hwrm_exec_fwd_resp(
1030*4882a593Smuzhiyun bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1031*4882a593Smuzhiyun } else {
1032*4882a593Smuzhiyun struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
1033*4882a593Smuzhiyun struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun phy_qcfg_req =
1036*4882a593Smuzhiyun (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
1037*4882a593Smuzhiyun mutex_lock(&bp->hwrm_cmd_lock);
1038*4882a593Smuzhiyun memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1039*4882a593Smuzhiyun sizeof(phy_qcfg_resp));
1040*4882a593Smuzhiyun mutex_unlock(&bp->hwrm_cmd_lock);
1041*4882a593Smuzhiyun phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
1042*4882a593Smuzhiyun phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
1043*4882a593Smuzhiyun phy_qcfg_resp.valid = 1;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun if (vf->flags & BNXT_VF_LINK_UP) {
1046*4882a593Smuzhiyun /* if physical link is down, force link up on VF */
1047*4882a593Smuzhiyun if (phy_qcfg_resp.link !=
1048*4882a593Smuzhiyun PORT_PHY_QCFG_RESP_LINK_LINK) {
1049*4882a593Smuzhiyun phy_qcfg_resp.link =
1050*4882a593Smuzhiyun PORT_PHY_QCFG_RESP_LINK_LINK;
1051*4882a593Smuzhiyun phy_qcfg_resp.link_speed = cpu_to_le16(
1052*4882a593Smuzhiyun PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1053*4882a593Smuzhiyun phy_qcfg_resp.duplex_cfg =
1054*4882a593Smuzhiyun PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1055*4882a593Smuzhiyun phy_qcfg_resp.duplex_state =
1056*4882a593Smuzhiyun PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1057*4882a593Smuzhiyun phy_qcfg_resp.pause =
1058*4882a593Smuzhiyun (PORT_PHY_QCFG_RESP_PAUSE_TX |
1059*4882a593Smuzhiyun PORT_PHY_QCFG_RESP_PAUSE_RX);
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun } else {
1062*4882a593Smuzhiyun /* force link down */
1063*4882a593Smuzhiyun phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1064*4882a593Smuzhiyun phy_qcfg_resp.link_speed = 0;
1065*4882a593Smuzhiyun phy_qcfg_resp.duplex_state =
1066*4882a593Smuzhiyun PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1067*4882a593Smuzhiyun phy_qcfg_resp.pause = 0;
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1070*4882a593Smuzhiyun phy_qcfg_req->resp_addr,
1071*4882a593Smuzhiyun phy_qcfg_req->cmpl_ring,
1072*4882a593Smuzhiyun sizeof(phy_qcfg_resp));
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun return rc;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
bnxt_vf_req_validate_snd(struct bnxt * bp,struct bnxt_vf_info * vf)1077*4882a593Smuzhiyun static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun int rc = 0;
1080*4882a593Smuzhiyun struct input *encap_req = vf->hwrm_cmd_req_addr;
1081*4882a593Smuzhiyun u32 req_type = le16_to_cpu(encap_req->req_type);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun switch (req_type) {
1084*4882a593Smuzhiyun case HWRM_FUNC_VF_CFG:
1085*4882a593Smuzhiyun rc = bnxt_vf_configure_mac(bp, vf);
1086*4882a593Smuzhiyun break;
1087*4882a593Smuzhiyun case HWRM_CFA_L2_FILTER_ALLOC:
1088*4882a593Smuzhiyun rc = bnxt_vf_validate_set_mac(bp, vf);
1089*4882a593Smuzhiyun break;
1090*4882a593Smuzhiyun case HWRM_FUNC_CFG:
1091*4882a593Smuzhiyun /* TODO Validate if VF is allowed to change mac address,
1092*4882a593Smuzhiyun * mtu, num of rings etc
1093*4882a593Smuzhiyun */
1094*4882a593Smuzhiyun rc = bnxt_hwrm_exec_fwd_resp(
1095*4882a593Smuzhiyun bp, vf, sizeof(struct hwrm_func_cfg_input));
1096*4882a593Smuzhiyun break;
1097*4882a593Smuzhiyun case HWRM_PORT_PHY_QCFG:
1098*4882a593Smuzhiyun rc = bnxt_vf_set_link(bp, vf);
1099*4882a593Smuzhiyun break;
1100*4882a593Smuzhiyun default:
1101*4882a593Smuzhiyun break;
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun return rc;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
bnxt_hwrm_exec_fwd_req(struct bnxt * bp)1106*4882a593Smuzhiyun void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun /* Scan through VF's and process commands */
1111*4882a593Smuzhiyun while (1) {
1112*4882a593Smuzhiyun vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1113*4882a593Smuzhiyun if (vf_id >= active_vfs)
1114*4882a593Smuzhiyun break;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun clear_bit(vf_id, bp->pf.vf_event_bmap);
1117*4882a593Smuzhiyun bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1118*4882a593Smuzhiyun i = vf_id + 1;
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun
bnxt_update_vf_mac(struct bnxt * bp)1122*4882a593Smuzhiyun void bnxt_update_vf_mac(struct bnxt *bp)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun struct hwrm_func_qcaps_input req = {0};
1125*4882a593Smuzhiyun struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
1128*4882a593Smuzhiyun req.fid = cpu_to_le16(0xffff);
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun mutex_lock(&bp->hwrm_cmd_lock);
1131*4882a593Smuzhiyun if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
1132*4882a593Smuzhiyun goto update_vf_mac_exit;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun /* Store MAC address from the firmware. There are 2 cases:
1135*4882a593Smuzhiyun * 1. MAC address is valid. It is assigned from the PF and we
1136*4882a593Smuzhiyun * need to override the current VF MAC address with it.
1137*4882a593Smuzhiyun * 2. MAC address is zero. The VF will use a random MAC address by
1138*4882a593Smuzhiyun * default but the stored zero MAC will allow the VF user to change
1139*4882a593Smuzhiyun * the random MAC address using ndo_set_mac_address() if he wants.
1140*4882a593Smuzhiyun */
1141*4882a593Smuzhiyun if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
1142*4882a593Smuzhiyun memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun /* overwrite netdev dev_addr with admin VF MAC */
1145*4882a593Smuzhiyun if (is_valid_ether_addr(bp->vf.mac_addr))
1146*4882a593Smuzhiyun memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
1147*4882a593Smuzhiyun update_vf_mac_exit:
1148*4882a593Smuzhiyun mutex_unlock(&bp->hwrm_cmd_lock);
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
bnxt_approve_mac(struct bnxt * bp,u8 * mac,bool strict)1151*4882a593Smuzhiyun int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun struct hwrm_func_vf_cfg_input req = {0};
1154*4882a593Smuzhiyun int rc = 0;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun if (!BNXT_VF(bp))
1157*4882a593Smuzhiyun return 0;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun if (bp->hwrm_spec_code < 0x10202) {
1160*4882a593Smuzhiyun if (is_valid_ether_addr(bp->vf.mac_addr))
1161*4882a593Smuzhiyun rc = -EADDRNOTAVAIL;
1162*4882a593Smuzhiyun goto mac_done;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
1165*4882a593Smuzhiyun req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1166*4882a593Smuzhiyun memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1167*4882a593Smuzhiyun rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1168*4882a593Smuzhiyun mac_done:
1169*4882a593Smuzhiyun if (rc && strict) {
1170*4882a593Smuzhiyun rc = -EADDRNOTAVAIL;
1171*4882a593Smuzhiyun netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1172*4882a593Smuzhiyun mac);
1173*4882a593Smuzhiyun return rc;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun return 0;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun #else
1178*4882a593Smuzhiyun
bnxt_cfg_hw_sriov(struct bnxt * bp,int * num_vfs,bool reset)1179*4882a593Smuzhiyun int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
1180*4882a593Smuzhiyun {
1181*4882a593Smuzhiyun if (*num_vfs)
1182*4882a593Smuzhiyun return -EOPNOTSUPP;
1183*4882a593Smuzhiyun return 0;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun
bnxt_sriov_disable(struct bnxt * bp)1186*4882a593Smuzhiyun void bnxt_sriov_disable(struct bnxt *bp)
1187*4882a593Smuzhiyun {
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
bnxt_hwrm_exec_fwd_req(struct bnxt * bp)1190*4882a593Smuzhiyun void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1191*4882a593Smuzhiyun {
1192*4882a593Smuzhiyun netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun
bnxt_update_vf_mac(struct bnxt * bp)1195*4882a593Smuzhiyun void bnxt_update_vf_mac(struct bnxt *bp)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun
bnxt_approve_mac(struct bnxt * bp,u8 * mac,bool strict)1199*4882a593Smuzhiyun int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun return 0;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun #endif
1204