1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 1999 - 2018 Intel Corporation. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include "vf.h"
5*4882a593Smuzhiyun #include "ixgbevf.h"
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun /* On Hyper-V, to reset, we need to read from this offset
8*4882a593Smuzhiyun * from the PCI config space. This is the mechanism used on
9*4882a593Smuzhiyun * Hyper-V to support PF/VF communication.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #define IXGBE_HV_RESET_OFFSET 0x201
12*4882a593Smuzhiyun
ixgbevf_write_msg_read_ack(struct ixgbe_hw * hw,u32 * msg,u32 * retmsg,u16 size)13*4882a593Smuzhiyun static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
14*4882a593Smuzhiyun u32 *retmsg, u16 size)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun struct ixgbe_mbx_info *mbx = &hw->mbx;
17*4882a593Smuzhiyun s32 retval = mbx->ops.write_posted(hw, msg, size);
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun if (retval)
20*4882a593Smuzhiyun return retval;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun return mbx->ops.read_posted(hw, retmsg, size);
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /**
26*4882a593Smuzhiyun * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
27*4882a593Smuzhiyun * @hw: pointer to hardware structure
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Starts the hardware by filling the bus info structure and media type, clears
30*4882a593Smuzhiyun * all on chip counters, initializes receive address registers, multicast
31*4882a593Smuzhiyun * table, VLAN filter table, calls routine to set up link and flow control
32*4882a593Smuzhiyun * settings, and leaves transmit and receive units disabled and uninitialized
33*4882a593Smuzhiyun **/
ixgbevf_start_hw_vf(struct ixgbe_hw * hw)34*4882a593Smuzhiyun static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun /* Clear adapter stopped flag */
37*4882a593Smuzhiyun hw->adapter_stopped = false;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun return 0;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /**
43*4882a593Smuzhiyun * ixgbevf_init_hw_vf - virtual function hardware initialization
44*4882a593Smuzhiyun * @hw: pointer to hardware structure
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * Initialize the hardware by resetting the hardware and then starting
47*4882a593Smuzhiyun * the hardware
48*4882a593Smuzhiyun **/
ixgbevf_init_hw_vf(struct ixgbe_hw * hw)49*4882a593Smuzhiyun static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun s32 status = hw->mac.ops.start_hw(hw);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun return status;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun * ixgbevf_reset_hw_vf - Performs hardware reset
60*4882a593Smuzhiyun * @hw: pointer to hardware structure
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * Resets the hardware by resetting the transmit and receive units, masks and
63*4882a593Smuzhiyun * clears all interrupts.
64*4882a593Smuzhiyun **/
ixgbevf_reset_hw_vf(struct ixgbe_hw * hw)65*4882a593Smuzhiyun static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct ixgbe_mbx_info *mbx = &hw->mbx;
68*4882a593Smuzhiyun u32 timeout = IXGBE_VF_INIT_TIMEOUT;
69*4882a593Smuzhiyun s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
70*4882a593Smuzhiyun u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
71*4882a593Smuzhiyun u8 *addr = (u8 *)(&msgbuf[1]);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Call adapter stop to disable tx/rx and clear interrupts */
74*4882a593Smuzhiyun hw->mac.ops.stop_adapter(hw);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* reset the api version */
77*4882a593Smuzhiyun hw->api_version = ixgbe_mbox_api_10;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
80*4882a593Smuzhiyun IXGBE_WRITE_FLUSH(hw);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* we cannot reset while the RSTI / RSTD bits are asserted */
83*4882a593Smuzhiyun while (!mbx->ops.check_for_rst(hw) && timeout) {
84*4882a593Smuzhiyun timeout--;
85*4882a593Smuzhiyun udelay(5);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (!timeout)
89*4882a593Smuzhiyun return IXGBE_ERR_RESET_FAILED;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* mailbox timeout can now become active */
92*4882a593Smuzhiyun mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun msgbuf[0] = IXGBE_VF_RESET;
95*4882a593Smuzhiyun mbx->ops.write_posted(hw, msgbuf, 1);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun mdelay(10);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* set our "perm_addr" based on info provided by PF
100*4882a593Smuzhiyun * also set up the mc_filter_type which is piggy backed
101*4882a593Smuzhiyun * on the mac address in word 3
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
104*4882a593Smuzhiyun if (ret_val)
105*4882a593Smuzhiyun return ret_val;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* New versions of the PF may NACK the reset return message
108*4882a593Smuzhiyun * to indicate that no MAC address has yet been assigned for
109*4882a593Smuzhiyun * the VF.
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
112*4882a593Smuzhiyun msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
113*4882a593Smuzhiyun return IXGBE_ERR_INVALID_MAC_ADDR;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
116*4882a593Smuzhiyun ether_addr_copy(hw->mac.perm_addr, addr);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun return 0;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /**
124*4882a593Smuzhiyun * Hyper-V variant; the VF/PF communication is through the PCI
125*4882a593Smuzhiyun * config space.
126*4882a593Smuzhiyun * @hw: pointer to private hardware struct
127*4882a593Smuzhiyun */
ixgbevf_hv_reset_hw_vf(struct ixgbe_hw * hw)128*4882a593Smuzhiyun static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
131*4882a593Smuzhiyun struct ixgbevf_adapter *adapter = hw->back;
132*4882a593Smuzhiyun int i;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun for (i = 0; i < 6; i++)
135*4882a593Smuzhiyun pci_read_config_byte(adapter->pdev,
136*4882a593Smuzhiyun (i + IXGBE_HV_RESET_OFFSET),
137*4882a593Smuzhiyun &hw->mac.perm_addr[i]);
138*4882a593Smuzhiyun return 0;
139*4882a593Smuzhiyun #else
140*4882a593Smuzhiyun pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
141*4882a593Smuzhiyun return -EOPNOTSUPP;
142*4882a593Smuzhiyun #endif
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /**
146*4882a593Smuzhiyun * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
147*4882a593Smuzhiyun * @hw: pointer to hardware structure
148*4882a593Smuzhiyun *
149*4882a593Smuzhiyun * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
150*4882a593Smuzhiyun * disables transmit and receive units. The adapter_stopped flag is used by
151*4882a593Smuzhiyun * the shared code and drivers to determine if the adapter is in a stopped
152*4882a593Smuzhiyun * state and should not touch the hardware.
153*4882a593Smuzhiyun **/
ixgbevf_stop_hw_vf(struct ixgbe_hw * hw)154*4882a593Smuzhiyun static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun u32 number_of_queues;
157*4882a593Smuzhiyun u32 reg_val;
158*4882a593Smuzhiyun u16 i;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* Set the adapter_stopped flag so other driver functions stop touching
161*4882a593Smuzhiyun * the hardware
162*4882a593Smuzhiyun */
163*4882a593Smuzhiyun hw->adapter_stopped = true;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* Disable the receive unit by stopped each queue */
166*4882a593Smuzhiyun number_of_queues = hw->mac.max_rx_queues;
167*4882a593Smuzhiyun for (i = 0; i < number_of_queues; i++) {
168*4882a593Smuzhiyun reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
169*4882a593Smuzhiyun if (reg_val & IXGBE_RXDCTL_ENABLE) {
170*4882a593Smuzhiyun reg_val &= ~IXGBE_RXDCTL_ENABLE;
171*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun IXGBE_WRITE_FLUSH(hw);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /* Clear interrupt mask to stop from interrupts being generated */
178*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* Clear any pending interrupts */
181*4882a593Smuzhiyun IXGBE_READ_REG(hw, IXGBE_VTEICR);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* Disable the transmit unit. Each queue must be disabled. */
184*4882a593Smuzhiyun number_of_queues = hw->mac.max_tx_queues;
185*4882a593Smuzhiyun for (i = 0; i < number_of_queues; i++) {
186*4882a593Smuzhiyun reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
187*4882a593Smuzhiyun if (reg_val & IXGBE_TXDCTL_ENABLE) {
188*4882a593Smuzhiyun reg_val &= ~IXGBE_TXDCTL_ENABLE;
189*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return 0;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /**
197*4882a593Smuzhiyun * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
198*4882a593Smuzhiyun * @hw: pointer to hardware structure
199*4882a593Smuzhiyun * @mc_addr: the multicast address
200*4882a593Smuzhiyun *
201*4882a593Smuzhiyun * Extracts the 12 bits, from a multicast address, to determine which
202*4882a593Smuzhiyun * bit-vector to set in the multicast table. The hardware uses 12 bits, from
203*4882a593Smuzhiyun * incoming Rx multicast addresses, to determine the bit-vector to check in
204*4882a593Smuzhiyun * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
205*4882a593Smuzhiyun * by the MO field of the MCSTCTRL. The MO field is set during initialization
206*4882a593Smuzhiyun * to mc_filter_type.
207*4882a593Smuzhiyun **/
ixgbevf_mta_vector(struct ixgbe_hw * hw,u8 * mc_addr)208*4882a593Smuzhiyun static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun u32 vector = 0;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun switch (hw->mac.mc_filter_type) {
213*4882a593Smuzhiyun case 0: /* use bits [47:36] of the address */
214*4882a593Smuzhiyun vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
215*4882a593Smuzhiyun break;
216*4882a593Smuzhiyun case 1: /* use bits [46:35] of the address */
217*4882a593Smuzhiyun vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
218*4882a593Smuzhiyun break;
219*4882a593Smuzhiyun case 2: /* use bits [45:34] of the address */
220*4882a593Smuzhiyun vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
221*4882a593Smuzhiyun break;
222*4882a593Smuzhiyun case 3: /* use bits [43:32] of the address */
223*4882a593Smuzhiyun vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
224*4882a593Smuzhiyun break;
225*4882a593Smuzhiyun default: /* Invalid mc_filter_type */
226*4882a593Smuzhiyun break;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* vector can only be 12-bits or boundary will be exceeded */
230*4882a593Smuzhiyun vector &= 0xFFF;
231*4882a593Smuzhiyun return vector;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /**
235*4882a593Smuzhiyun * ixgbevf_get_mac_addr_vf - Read device MAC address
236*4882a593Smuzhiyun * @hw: pointer to the HW structure
237*4882a593Smuzhiyun * @mac_addr: pointer to storage for retrieved MAC address
238*4882a593Smuzhiyun **/
ixgbevf_get_mac_addr_vf(struct ixgbe_hw * hw,u8 * mac_addr)239*4882a593Smuzhiyun static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun ether_addr_copy(mac_addr, hw->mac.perm_addr);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
ixgbevf_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)246*4882a593Smuzhiyun static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun u32 msgbuf[3], msgbuf_chk;
249*4882a593Smuzhiyun u8 *msg_addr = (u8 *)(&msgbuf[1]);
250*4882a593Smuzhiyun s32 ret_val;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun memset(msgbuf, 0, sizeof(msgbuf));
253*4882a593Smuzhiyun /* If index is one then this is the start of a new list and needs
254*4882a593Smuzhiyun * indication to the PF so it can do it's own list management.
255*4882a593Smuzhiyun * If it is zero then that tells the PF to just clear all of
256*4882a593Smuzhiyun * this VF's macvlans and there is no new list.
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
259*4882a593Smuzhiyun msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
260*4882a593Smuzhiyun msgbuf_chk = msgbuf[0];
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (addr)
263*4882a593Smuzhiyun ether_addr_copy(msg_addr, addr);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
266*4882a593Smuzhiyun ARRAY_SIZE(msgbuf));
267*4882a593Smuzhiyun if (!ret_val) {
268*4882a593Smuzhiyun msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
271*4882a593Smuzhiyun return -ENOMEM;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun return ret_val;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)277*4882a593Smuzhiyun static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun return -EOPNOTSUPP;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /**
283*4882a593Smuzhiyun * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
284*4882a593Smuzhiyun * @hw: pointer to hardware structure
285*4882a593Smuzhiyun * @reta: buffer to fill with RETA contents.
286*4882a593Smuzhiyun * @num_rx_queues: Number of Rx queues configured for this port
287*4882a593Smuzhiyun *
288*4882a593Smuzhiyun * The "reta" buffer should be big enough to contain 32 registers.
289*4882a593Smuzhiyun *
290*4882a593Smuzhiyun * Returns: 0 on success.
291*4882a593Smuzhiyun * if API doesn't support this operation - (-EOPNOTSUPP).
292*4882a593Smuzhiyun */
ixgbevf_get_reta_locked(struct ixgbe_hw * hw,u32 * reta,int num_rx_queues)293*4882a593Smuzhiyun int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun int err, i, j;
296*4882a593Smuzhiyun u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
297*4882a593Smuzhiyun u32 *hw_reta = &msgbuf[1];
298*4882a593Smuzhiyun u32 mask = 0;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* We have to use a mailbox for 82599 and x540 devices only.
301*4882a593Smuzhiyun * For these devices RETA has 128 entries.
302*4882a593Smuzhiyun * Also these VFs support up to 4 RSS queues. Therefore PF will compress
303*4882a593Smuzhiyun * 16 RETA entries in each DWORD giving 2 bits to each entry.
304*4882a593Smuzhiyun */
305*4882a593Smuzhiyun int dwords = IXGBEVF_82599_RETA_SIZE / 16;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* We support the RSS querying for 82599 and x540 devices only.
308*4882a593Smuzhiyun * Thus return an error if API doesn't support RETA querying or querying
309*4882a593Smuzhiyun * is not supported for this device type.
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun switch (hw->api_version) {
312*4882a593Smuzhiyun case ixgbe_mbox_api_14:
313*4882a593Smuzhiyun case ixgbe_mbox_api_13:
314*4882a593Smuzhiyun case ixgbe_mbox_api_12:
315*4882a593Smuzhiyun if (hw->mac.type < ixgbe_mac_X550_vf)
316*4882a593Smuzhiyun break;
317*4882a593Smuzhiyun fallthrough;
318*4882a593Smuzhiyun default:
319*4882a593Smuzhiyun return -EOPNOTSUPP;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun msgbuf[0] = IXGBE_VF_GET_RETA;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (err)
327*4882a593Smuzhiyun return err;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun if (err)
332*4882a593Smuzhiyun return err;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* If the operation has been refused by a PF return -EPERM */
337*4882a593Smuzhiyun if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
338*4882a593Smuzhiyun return -EPERM;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /* If we didn't get an ACK there must have been
341*4882a593Smuzhiyun * some sort of mailbox error so we should treat it
342*4882a593Smuzhiyun * as such.
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
345*4882a593Smuzhiyun return IXGBE_ERR_MBX;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* ixgbevf doesn't support more than 2 queues at the moment */
348*4882a593Smuzhiyun if (num_rx_queues > 1)
349*4882a593Smuzhiyun mask = 0x1;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun for (i = 0; i < dwords; i++)
352*4882a593Smuzhiyun for (j = 0; j < 16; j++)
353*4882a593Smuzhiyun reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun return 0;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /**
359*4882a593Smuzhiyun * ixgbevf_get_rss_key_locked - get the RSS Random Key
360*4882a593Smuzhiyun * @hw: pointer to the HW structure
361*4882a593Smuzhiyun * @rss_key: buffer to fill with RSS Hash Key contents.
362*4882a593Smuzhiyun *
363*4882a593Smuzhiyun * The "rss_key" buffer should be big enough to contain 10 registers.
364*4882a593Smuzhiyun *
365*4882a593Smuzhiyun * Returns: 0 on success.
366*4882a593Smuzhiyun * if API doesn't support this operation - (-EOPNOTSUPP).
367*4882a593Smuzhiyun */
ixgbevf_get_rss_key_locked(struct ixgbe_hw * hw,u8 * rss_key)368*4882a593Smuzhiyun int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun int err;
371*4882a593Smuzhiyun u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /* We currently support the RSS Random Key retrieval for 82599 and x540
374*4882a593Smuzhiyun * devices only.
375*4882a593Smuzhiyun *
376*4882a593Smuzhiyun * Thus return an error if API doesn't support RSS Random Key retrieval
377*4882a593Smuzhiyun * or if the operation is not supported for this device type.
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun switch (hw->api_version) {
380*4882a593Smuzhiyun case ixgbe_mbox_api_14:
381*4882a593Smuzhiyun case ixgbe_mbox_api_13:
382*4882a593Smuzhiyun case ixgbe_mbox_api_12:
383*4882a593Smuzhiyun if (hw->mac.type < ixgbe_mac_X550_vf)
384*4882a593Smuzhiyun break;
385*4882a593Smuzhiyun fallthrough;
386*4882a593Smuzhiyun default:
387*4882a593Smuzhiyun return -EOPNOTSUPP;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
391*4882a593Smuzhiyun err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (err)
394*4882a593Smuzhiyun return err;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (err)
399*4882a593Smuzhiyun return err;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* If the operation has been refused by a PF return -EPERM */
404*4882a593Smuzhiyun if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
405*4882a593Smuzhiyun return -EPERM;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* If we didn't get an ACK there must have been
408*4882a593Smuzhiyun * some sort of mailbox error so we should treat it
409*4882a593Smuzhiyun * as such.
410*4882a593Smuzhiyun */
411*4882a593Smuzhiyun if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
412*4882a593Smuzhiyun return IXGBE_ERR_MBX;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun * ixgbevf_set_rar_vf - set device MAC address
421*4882a593Smuzhiyun * @hw: pointer to hardware structure
422*4882a593Smuzhiyun * @index: Receive address register to write
423*4882a593Smuzhiyun * @addr: Address to put into receive address register
424*4882a593Smuzhiyun * @vmdq: Unused in this implementation
425*4882a593Smuzhiyun **/
ixgbevf_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)426*4882a593Smuzhiyun static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
427*4882a593Smuzhiyun u32 vmdq)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun u32 msgbuf[3];
430*4882a593Smuzhiyun u8 *msg_addr = (u8 *)(&msgbuf[1]);
431*4882a593Smuzhiyun s32 ret_val;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun memset(msgbuf, 0, sizeof(msgbuf));
434*4882a593Smuzhiyun msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
435*4882a593Smuzhiyun ether_addr_copy(msg_addr, addr);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
438*4882a593Smuzhiyun ARRAY_SIZE(msgbuf));
439*4882a593Smuzhiyun msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /* if nacked the address was rejected, use "perm_addr" */
442*4882a593Smuzhiyun if (!ret_val &&
443*4882a593Smuzhiyun (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
444*4882a593Smuzhiyun ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
445*4882a593Smuzhiyun return IXGBE_ERR_MBX;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun return ret_val;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /**
452*4882a593Smuzhiyun * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
453*4882a593Smuzhiyun * @hw: pointer to hardware structure
454*4882a593Smuzhiyun * @index: Receive address register to write
455*4882a593Smuzhiyun * @addr: Address to put into receive address register
456*4882a593Smuzhiyun * @vmdq: Unused in this implementation
457*4882a593Smuzhiyun *
458*4882a593Smuzhiyun * We don't really allow setting the device MAC address. However,
459*4882a593Smuzhiyun * if the address being set is the permanent MAC address we will
460*4882a593Smuzhiyun * permit that.
461*4882a593Smuzhiyun **/
ixgbevf_hv_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)462*4882a593Smuzhiyun static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
463*4882a593Smuzhiyun u32 vmdq)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun if (ether_addr_equal(addr, hw->mac.perm_addr))
466*4882a593Smuzhiyun return 0;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun return -EOPNOTSUPP;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /**
472*4882a593Smuzhiyun * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
473*4882a593Smuzhiyun * @hw: pointer to the HW structure
474*4882a593Smuzhiyun * @netdev: pointer to net device structure
475*4882a593Smuzhiyun *
476*4882a593Smuzhiyun * Updates the Multicast Table Array.
477*4882a593Smuzhiyun **/
ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)478*4882a593Smuzhiyun static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
479*4882a593Smuzhiyun struct net_device *netdev)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct netdev_hw_addr *ha;
482*4882a593Smuzhiyun u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
483*4882a593Smuzhiyun u16 *vector_list = (u16 *)&msgbuf[1];
484*4882a593Smuzhiyun u32 cnt, i;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /* Each entry in the list uses 1 16 bit word. We have 30
487*4882a593Smuzhiyun * 16 bit words available in our HW msg buffer (minus 1 for the
488*4882a593Smuzhiyun * msg type). That's 30 hash values if we pack 'em right. If
489*4882a593Smuzhiyun * there are more than 30 MC addresses to add then punt the
490*4882a593Smuzhiyun * extras for now and then add code to handle more than 30 later.
491*4882a593Smuzhiyun * It would be unusual for a server to request that many multi-cast
492*4882a593Smuzhiyun * addresses except for in large enterprise network environments.
493*4882a593Smuzhiyun */
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun cnt = netdev_mc_count(netdev);
496*4882a593Smuzhiyun if (cnt > 30)
497*4882a593Smuzhiyun cnt = 30;
498*4882a593Smuzhiyun msgbuf[0] = IXGBE_VF_SET_MULTICAST;
499*4882a593Smuzhiyun msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun i = 0;
502*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, netdev) {
503*4882a593Smuzhiyun if (i == cnt)
504*4882a593Smuzhiyun break;
505*4882a593Smuzhiyun if (is_link_local_ether_addr(ha->addr))
506*4882a593Smuzhiyun continue;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
512*4882a593Smuzhiyun IXGBE_VFMAILBOX_SIZE);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /**
516*4882a593Smuzhiyun * Hyper-V variant - just a stub.
517*4882a593Smuzhiyun * @hw: unused
518*4882a593Smuzhiyun * @netdev: unused
519*4882a593Smuzhiyun */
ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)520*4882a593Smuzhiyun static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
521*4882a593Smuzhiyun struct net_device *netdev)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun return -EOPNOTSUPP;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun /**
527*4882a593Smuzhiyun * ixgbevf_update_xcast_mode - Update Multicast mode
528*4882a593Smuzhiyun * @hw: pointer to the HW structure
529*4882a593Smuzhiyun * @xcast_mode: new multicast mode
530*4882a593Smuzhiyun *
531*4882a593Smuzhiyun * Updates the Multicast Mode of VF.
532*4882a593Smuzhiyun **/
ixgbevf_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)533*4882a593Smuzhiyun static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun u32 msgbuf[2];
536*4882a593Smuzhiyun s32 err;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun switch (hw->api_version) {
539*4882a593Smuzhiyun case ixgbe_mbox_api_12:
540*4882a593Smuzhiyun /* promisc introduced in 1.3 version */
541*4882a593Smuzhiyun if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
542*4882a593Smuzhiyun return -EOPNOTSUPP;
543*4882a593Smuzhiyun fallthrough;
544*4882a593Smuzhiyun case ixgbe_mbox_api_14:
545*4882a593Smuzhiyun case ixgbe_mbox_api_13:
546*4882a593Smuzhiyun break;
547*4882a593Smuzhiyun default:
548*4882a593Smuzhiyun return -EOPNOTSUPP;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
552*4882a593Smuzhiyun msgbuf[1] = xcast_mode;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
555*4882a593Smuzhiyun ARRAY_SIZE(msgbuf));
556*4882a593Smuzhiyun if (err)
557*4882a593Smuzhiyun return err;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
560*4882a593Smuzhiyun if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
561*4882a593Smuzhiyun return -EPERM;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /**
567*4882a593Smuzhiyun * Hyper-V variant - just a stub.
568*4882a593Smuzhiyun * @hw: unused
569*4882a593Smuzhiyun * @xcast_mode: unused
570*4882a593Smuzhiyun */
ixgbevf_hv_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)571*4882a593Smuzhiyun static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun return -EOPNOTSUPP;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /**
577*4882a593Smuzhiyun * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
578*4882a593Smuzhiyun * @hw: pointer to the HW structure
579*4882a593Smuzhiyun * @vlan: 12 bit VLAN ID
580*4882a593Smuzhiyun * @vind: unused by VF drivers
581*4882a593Smuzhiyun * @vlan_on: if true then set bit, else clear bit
582*4882a593Smuzhiyun **/
ixgbevf_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)583*4882a593Smuzhiyun static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
584*4882a593Smuzhiyun bool vlan_on)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun u32 msgbuf[2];
587*4882a593Smuzhiyun s32 err;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun msgbuf[0] = IXGBE_VF_SET_VLAN;
590*4882a593Smuzhiyun msgbuf[1] = vlan;
591*4882a593Smuzhiyun /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
592*4882a593Smuzhiyun msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
595*4882a593Smuzhiyun ARRAY_SIZE(msgbuf));
596*4882a593Smuzhiyun if (err)
597*4882a593Smuzhiyun goto mbx_err;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /* remove extra bits from the message */
600*4882a593Smuzhiyun msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
601*4882a593Smuzhiyun msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
604*4882a593Smuzhiyun err = IXGBE_ERR_INVALID_ARGUMENT;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun mbx_err:
607*4882a593Smuzhiyun return err;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun /**
611*4882a593Smuzhiyun * Hyper-V variant - just a stub.
612*4882a593Smuzhiyun * @hw: unused
613*4882a593Smuzhiyun * @vlan: unused
614*4882a593Smuzhiyun * @vind: unused
615*4882a593Smuzhiyun * @vlan_on: unused
616*4882a593Smuzhiyun */
ixgbevf_hv_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)617*4882a593Smuzhiyun static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
618*4882a593Smuzhiyun bool vlan_on)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun return -EOPNOTSUPP;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /**
624*4882a593Smuzhiyun * ixgbevf_setup_mac_link_vf - Setup MAC link settings
625*4882a593Smuzhiyun * @hw: pointer to hardware structure
626*4882a593Smuzhiyun * @speed: Unused in this implementation
627*4882a593Smuzhiyun * @autoneg: Unused in this implementation
628*4882a593Smuzhiyun * @autoneg_wait_to_complete: Unused in this implementation
629*4882a593Smuzhiyun *
630*4882a593Smuzhiyun * Do nothing and return success. VF drivers are not allowed to change
631*4882a593Smuzhiyun * global settings. Maintained for driver compatibility.
632*4882a593Smuzhiyun **/
ixgbevf_setup_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)633*4882a593Smuzhiyun static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
634*4882a593Smuzhiyun ixgbe_link_speed speed, bool autoneg,
635*4882a593Smuzhiyun bool autoneg_wait_to_complete)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun return 0;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /**
641*4882a593Smuzhiyun * ixgbevf_check_mac_link_vf - Get link/speed status
642*4882a593Smuzhiyun * @hw: pointer to hardware structure
643*4882a593Smuzhiyun * @speed: pointer to link speed
644*4882a593Smuzhiyun * @link_up: true is link is up, false otherwise
645*4882a593Smuzhiyun * @autoneg_wait_to_complete: unused
646*4882a593Smuzhiyun *
647*4882a593Smuzhiyun * Reads the links register to determine if link is up and the current speed
648*4882a593Smuzhiyun **/
ixgbevf_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)649*4882a593Smuzhiyun static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
650*4882a593Smuzhiyun ixgbe_link_speed *speed,
651*4882a593Smuzhiyun bool *link_up,
652*4882a593Smuzhiyun bool autoneg_wait_to_complete)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun struct ixgbe_mbx_info *mbx = &hw->mbx;
655*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
656*4882a593Smuzhiyun s32 ret_val = 0;
657*4882a593Smuzhiyun u32 links_reg;
658*4882a593Smuzhiyun u32 in_msg = 0;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /* If we were hit with a reset drop the link */
661*4882a593Smuzhiyun if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
662*4882a593Smuzhiyun mac->get_link_status = true;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (!mac->get_link_status)
665*4882a593Smuzhiyun goto out;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /* if link status is down no point in checking to see if pf is up */
668*4882a593Smuzhiyun links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
669*4882a593Smuzhiyun if (!(links_reg & IXGBE_LINKS_UP))
670*4882a593Smuzhiyun goto out;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
673*4882a593Smuzhiyun * before the link status is correct
674*4882a593Smuzhiyun */
675*4882a593Smuzhiyun if (mac->type == ixgbe_mac_82599_vf) {
676*4882a593Smuzhiyun int i;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun for (i = 0; i < 5; i++) {
679*4882a593Smuzhiyun udelay(100);
680*4882a593Smuzhiyun links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun if (!(links_reg & IXGBE_LINKS_UP))
683*4882a593Smuzhiyun goto out;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun switch (links_reg & IXGBE_LINKS_SPEED_82599) {
688*4882a593Smuzhiyun case IXGBE_LINKS_SPEED_10G_82599:
689*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_10GB_FULL;
690*4882a593Smuzhiyun break;
691*4882a593Smuzhiyun case IXGBE_LINKS_SPEED_1G_82599:
692*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_1GB_FULL;
693*4882a593Smuzhiyun break;
694*4882a593Smuzhiyun case IXGBE_LINKS_SPEED_100_82599:
695*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_100_FULL;
696*4882a593Smuzhiyun break;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /* if the read failed it could just be a mailbox collision, best wait
700*4882a593Smuzhiyun * until we are called again and don't report an error
701*4882a593Smuzhiyun */
702*4882a593Smuzhiyun if (mbx->ops.read(hw, &in_msg, 1))
703*4882a593Smuzhiyun goto out;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
706*4882a593Smuzhiyun /* msg is not CTS and is NACK we must have lost CTS status */
707*4882a593Smuzhiyun if (in_msg & IXGBE_VT_MSGTYPE_NACK)
708*4882a593Smuzhiyun ret_val = -1;
709*4882a593Smuzhiyun goto out;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /* the pf is talking, if we timed out in the past we reinit */
713*4882a593Smuzhiyun if (!mbx->timeout) {
714*4882a593Smuzhiyun ret_val = -1;
715*4882a593Smuzhiyun goto out;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /* if we passed all the tests above then the link is up and we no
719*4882a593Smuzhiyun * longer need to check for link
720*4882a593Smuzhiyun */
721*4882a593Smuzhiyun mac->get_link_status = false;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun out:
724*4882a593Smuzhiyun *link_up = !mac->get_link_status;
725*4882a593Smuzhiyun return ret_val;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /**
729*4882a593Smuzhiyun * Hyper-V variant; there is no mailbox communication.
730*4882a593Smuzhiyun * @hw: pointer to private hardware struct
731*4882a593Smuzhiyun * @speed: pointer to link speed
732*4882a593Smuzhiyun * @link_up: true is link is up, false otherwise
733*4882a593Smuzhiyun * @autoneg_wait_to_complete: unused
734*4882a593Smuzhiyun */
ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)735*4882a593Smuzhiyun static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
736*4882a593Smuzhiyun ixgbe_link_speed *speed,
737*4882a593Smuzhiyun bool *link_up,
738*4882a593Smuzhiyun bool autoneg_wait_to_complete)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun struct ixgbe_mbx_info *mbx = &hw->mbx;
741*4882a593Smuzhiyun struct ixgbe_mac_info *mac = &hw->mac;
742*4882a593Smuzhiyun u32 links_reg;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun /* If we were hit with a reset drop the link */
745*4882a593Smuzhiyun if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
746*4882a593Smuzhiyun mac->get_link_status = true;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (!mac->get_link_status)
749*4882a593Smuzhiyun goto out;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /* if link status is down no point in checking to see if pf is up */
752*4882a593Smuzhiyun links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
753*4882a593Smuzhiyun if (!(links_reg & IXGBE_LINKS_UP))
754*4882a593Smuzhiyun goto out;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
757*4882a593Smuzhiyun * before the link status is correct
758*4882a593Smuzhiyun */
759*4882a593Smuzhiyun if (mac->type == ixgbe_mac_82599_vf) {
760*4882a593Smuzhiyun int i;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun for (i = 0; i < 5; i++) {
763*4882a593Smuzhiyun udelay(100);
764*4882a593Smuzhiyun links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun if (!(links_reg & IXGBE_LINKS_UP))
767*4882a593Smuzhiyun goto out;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun switch (links_reg & IXGBE_LINKS_SPEED_82599) {
772*4882a593Smuzhiyun case IXGBE_LINKS_SPEED_10G_82599:
773*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_10GB_FULL;
774*4882a593Smuzhiyun break;
775*4882a593Smuzhiyun case IXGBE_LINKS_SPEED_1G_82599:
776*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_1GB_FULL;
777*4882a593Smuzhiyun break;
778*4882a593Smuzhiyun case IXGBE_LINKS_SPEED_100_82599:
779*4882a593Smuzhiyun *speed = IXGBE_LINK_SPEED_100_FULL;
780*4882a593Smuzhiyun break;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /* if we passed all the tests above then the link is up and we no
784*4882a593Smuzhiyun * longer need to check for link
785*4882a593Smuzhiyun */
786*4882a593Smuzhiyun mac->get_link_status = false;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun out:
789*4882a593Smuzhiyun *link_up = !mac->get_link_status;
790*4882a593Smuzhiyun return 0;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /**
794*4882a593Smuzhiyun * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
795*4882a593Smuzhiyun * @hw: pointer to the HW structure
796*4882a593Smuzhiyun * @max_size: value to assign to max frame size
797*4882a593Smuzhiyun **/
ixgbevf_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)798*4882a593Smuzhiyun static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun u32 msgbuf[2];
801*4882a593Smuzhiyun s32 ret_val;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun msgbuf[0] = IXGBE_VF_SET_LPE;
804*4882a593Smuzhiyun msgbuf[1] = max_size;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
807*4882a593Smuzhiyun ARRAY_SIZE(msgbuf));
808*4882a593Smuzhiyun if (ret_val)
809*4882a593Smuzhiyun return ret_val;
810*4882a593Smuzhiyun if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
811*4882a593Smuzhiyun (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
812*4882a593Smuzhiyun return IXGBE_ERR_MBX;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun return 0;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /**
818*4882a593Smuzhiyun * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
819*4882a593Smuzhiyun * @hw: pointer to the HW structure
820*4882a593Smuzhiyun * @max_size: value to assign to max frame size
821*4882a593Smuzhiyun * Hyper-V variant.
822*4882a593Smuzhiyun **/
ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)823*4882a593Smuzhiyun static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun u32 reg;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /* If we are on Hyper-V, we implement this functionality
828*4882a593Smuzhiyun * differently.
829*4882a593Smuzhiyun */
830*4882a593Smuzhiyun reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
831*4882a593Smuzhiyun /* CRC == 4 */
832*4882a593Smuzhiyun reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
833*4882a593Smuzhiyun IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun return 0;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /**
839*4882a593Smuzhiyun * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
840*4882a593Smuzhiyun * @hw: pointer to the HW structure
841*4882a593Smuzhiyun * @api: integer containing requested API version
842*4882a593Smuzhiyun **/
ixgbevf_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)843*4882a593Smuzhiyun static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun int err;
846*4882a593Smuzhiyun u32 msg[3];
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun /* Negotiate the mailbox API version */
849*4882a593Smuzhiyun msg[0] = IXGBE_VF_API_NEGOTIATE;
850*4882a593Smuzhiyun msg[1] = api;
851*4882a593Smuzhiyun msg[2] = 0;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
854*4882a593Smuzhiyun if (!err) {
855*4882a593Smuzhiyun msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /* Store value and return 0 on success */
858*4882a593Smuzhiyun if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
859*4882a593Smuzhiyun hw->api_version = api;
860*4882a593Smuzhiyun return 0;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun err = IXGBE_ERR_INVALID_ARGUMENT;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun return err;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /**
870*4882a593Smuzhiyun * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
871*4882a593Smuzhiyun * @hw: pointer to the HW structure
872*4882a593Smuzhiyun * @api: integer containing requested API version
873*4882a593Smuzhiyun * Hyper-V version - only ixgbe_mbox_api_10 supported.
874*4882a593Smuzhiyun **/
ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)875*4882a593Smuzhiyun static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun /* Hyper-V only supports api version ixgbe_mbox_api_10 */
878*4882a593Smuzhiyun if (api != ixgbe_mbox_api_10)
879*4882a593Smuzhiyun return IXGBE_ERR_INVALID_ARGUMENT;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun return 0;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
ixgbevf_get_queues(struct ixgbe_hw * hw,unsigned int * num_tcs,unsigned int * default_tc)884*4882a593Smuzhiyun int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
885*4882a593Smuzhiyun unsigned int *default_tc)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun int err;
888*4882a593Smuzhiyun u32 msg[5];
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /* do nothing if API doesn't support ixgbevf_get_queues */
891*4882a593Smuzhiyun switch (hw->api_version) {
892*4882a593Smuzhiyun case ixgbe_mbox_api_11:
893*4882a593Smuzhiyun case ixgbe_mbox_api_12:
894*4882a593Smuzhiyun case ixgbe_mbox_api_13:
895*4882a593Smuzhiyun case ixgbe_mbox_api_14:
896*4882a593Smuzhiyun break;
897*4882a593Smuzhiyun default:
898*4882a593Smuzhiyun return 0;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun /* Fetch queue configuration from the PF */
902*4882a593Smuzhiyun msg[0] = IXGBE_VF_GET_QUEUE;
903*4882a593Smuzhiyun msg[1] = msg[2] = msg[3] = msg[4] = 0;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
906*4882a593Smuzhiyun if (!err) {
907*4882a593Smuzhiyun msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun /* if we we didn't get an ACK there must have been
910*4882a593Smuzhiyun * some sort of mailbox error so we should treat it
911*4882a593Smuzhiyun * as such
912*4882a593Smuzhiyun */
913*4882a593Smuzhiyun if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
914*4882a593Smuzhiyun return IXGBE_ERR_MBX;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun /* record and validate values from message */
917*4882a593Smuzhiyun hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
918*4882a593Smuzhiyun if (hw->mac.max_tx_queues == 0 ||
919*4882a593Smuzhiyun hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
920*4882a593Smuzhiyun hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
923*4882a593Smuzhiyun if (hw->mac.max_rx_queues == 0 ||
924*4882a593Smuzhiyun hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
925*4882a593Smuzhiyun hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
928*4882a593Smuzhiyun /* in case of unknown state assume we cannot tag frames */
929*4882a593Smuzhiyun if (*num_tcs > hw->mac.max_rx_queues)
930*4882a593Smuzhiyun *num_tcs = 1;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun *default_tc = msg[IXGBE_VF_DEF_QUEUE];
933*4882a593Smuzhiyun /* default to queue 0 on out-of-bounds queue number */
934*4882a593Smuzhiyun if (*default_tc >= hw->mac.max_tx_queues)
935*4882a593Smuzhiyun *default_tc = 0;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun return err;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
942*4882a593Smuzhiyun .init_hw = ixgbevf_init_hw_vf,
943*4882a593Smuzhiyun .reset_hw = ixgbevf_reset_hw_vf,
944*4882a593Smuzhiyun .start_hw = ixgbevf_start_hw_vf,
945*4882a593Smuzhiyun .get_mac_addr = ixgbevf_get_mac_addr_vf,
946*4882a593Smuzhiyun .stop_adapter = ixgbevf_stop_hw_vf,
947*4882a593Smuzhiyun .setup_link = ixgbevf_setup_mac_link_vf,
948*4882a593Smuzhiyun .check_link = ixgbevf_check_mac_link_vf,
949*4882a593Smuzhiyun .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
950*4882a593Smuzhiyun .set_rar = ixgbevf_set_rar_vf,
951*4882a593Smuzhiyun .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
952*4882a593Smuzhiyun .update_xcast_mode = ixgbevf_update_xcast_mode,
953*4882a593Smuzhiyun .set_uc_addr = ixgbevf_set_uc_addr_vf,
954*4882a593Smuzhiyun .set_vfta = ixgbevf_set_vfta_vf,
955*4882a593Smuzhiyun .set_rlpml = ixgbevf_set_rlpml_vf,
956*4882a593Smuzhiyun };
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
959*4882a593Smuzhiyun .init_hw = ixgbevf_init_hw_vf,
960*4882a593Smuzhiyun .reset_hw = ixgbevf_hv_reset_hw_vf,
961*4882a593Smuzhiyun .start_hw = ixgbevf_start_hw_vf,
962*4882a593Smuzhiyun .get_mac_addr = ixgbevf_get_mac_addr_vf,
963*4882a593Smuzhiyun .stop_adapter = ixgbevf_stop_hw_vf,
964*4882a593Smuzhiyun .setup_link = ixgbevf_setup_mac_link_vf,
965*4882a593Smuzhiyun .check_link = ixgbevf_hv_check_mac_link_vf,
966*4882a593Smuzhiyun .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
967*4882a593Smuzhiyun .set_rar = ixgbevf_hv_set_rar_vf,
968*4882a593Smuzhiyun .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
969*4882a593Smuzhiyun .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
970*4882a593Smuzhiyun .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
971*4882a593Smuzhiyun .set_vfta = ixgbevf_hv_set_vfta_vf,
972*4882a593Smuzhiyun .set_rlpml = ixgbevf_hv_set_rlpml_vf,
973*4882a593Smuzhiyun };
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun const struct ixgbevf_info ixgbevf_82599_vf_info = {
976*4882a593Smuzhiyun .mac = ixgbe_mac_82599_vf,
977*4882a593Smuzhiyun .mac_ops = &ixgbevf_mac_ops,
978*4882a593Smuzhiyun };
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
981*4882a593Smuzhiyun .mac = ixgbe_mac_82599_vf,
982*4882a593Smuzhiyun .mac_ops = &ixgbevf_hv_mac_ops,
983*4882a593Smuzhiyun };
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun const struct ixgbevf_info ixgbevf_X540_vf_info = {
986*4882a593Smuzhiyun .mac = ixgbe_mac_X540_vf,
987*4882a593Smuzhiyun .mac_ops = &ixgbevf_mac_ops,
988*4882a593Smuzhiyun };
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
991*4882a593Smuzhiyun .mac = ixgbe_mac_X540_vf,
992*4882a593Smuzhiyun .mac_ops = &ixgbevf_hv_mac_ops,
993*4882a593Smuzhiyun };
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun const struct ixgbevf_info ixgbevf_X550_vf_info = {
996*4882a593Smuzhiyun .mac = ixgbe_mac_X550_vf,
997*4882a593Smuzhiyun .mac_ops = &ixgbevf_mac_ops,
998*4882a593Smuzhiyun };
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1001*4882a593Smuzhiyun .mac = ixgbe_mac_X550_vf,
1002*4882a593Smuzhiyun .mac_ops = &ixgbevf_hv_mac_ops,
1003*4882a593Smuzhiyun };
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1006*4882a593Smuzhiyun .mac = ixgbe_mac_X550EM_x_vf,
1007*4882a593Smuzhiyun .mac_ops = &ixgbevf_mac_ops,
1008*4882a593Smuzhiyun };
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1011*4882a593Smuzhiyun .mac = ixgbe_mac_X550EM_x_vf,
1012*4882a593Smuzhiyun .mac_ops = &ixgbevf_hv_mac_ops,
1013*4882a593Smuzhiyun };
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1016*4882a593Smuzhiyun .mac = ixgbe_mac_x550em_a_vf,
1017*4882a593Smuzhiyun .mac_ops = &ixgbevf_mac_ops,
1018*4882a593Smuzhiyun };
1019