1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3*4882a593Smuzhiyun * driver for Linux.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This software is available to you under a choice of one of two
8*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
9*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
10*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
11*4882a593Smuzhiyun * OpenIB.org BSD license below:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
14*4882a593Smuzhiyun * without modification, are permitted provided that the following
15*4882a593Smuzhiyun * conditions are met:
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * - Redistributions of source code must retain the above
18*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
19*4882a593Smuzhiyun * disclaimer.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
22*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
23*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
24*4882a593Smuzhiyun * provided with the distribution.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33*4882a593Smuzhiyun * SOFTWARE.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include <linux/pci.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include "t4vf_common.h"
39*4882a593Smuzhiyun #include "t4vf_defs.h"
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #include "../cxgb4/t4_regs.h"
42*4882a593Smuzhiyun #include "../cxgb4/t4_values.h"
43*4882a593Smuzhiyun #include "../cxgb4/t4fw_api.h"
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * Wait for the device to become ready (signified by our "who am I" register
47*4882a593Smuzhiyun * returning a value other than all 1's). Return an error if it doesn't
48*4882a593Smuzhiyun * become ready ...
49*4882a593Smuzhiyun */
t4vf_wait_dev_ready(struct adapter * adapter)50*4882a593Smuzhiyun int t4vf_wait_dev_ready(struct adapter *adapter)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
53*4882a593Smuzhiyun const u32 notready1 = 0xffffffff;
54*4882a593Smuzhiyun const u32 notready2 = 0xeeeeeeee;
55*4882a593Smuzhiyun u32 val;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun val = t4_read_reg(adapter, whoami);
58*4882a593Smuzhiyun if (val != notready1 && val != notready2)
59*4882a593Smuzhiyun return 0;
60*4882a593Smuzhiyun msleep(500);
61*4882a593Smuzhiyun val = t4_read_reg(adapter, whoami);
62*4882a593Smuzhiyun if (val != notready1 && val != notready2)
63*4882a593Smuzhiyun return 0;
64*4882a593Smuzhiyun else
65*4882a593Smuzhiyun return -EIO;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun * Get the reply to a mailbox command and store it in @rpl in big-endian order
70*4882a593Smuzhiyun * (since the firmware data structures are specified in a big-endian layout).
71*4882a593Smuzhiyun */
get_mbox_rpl(struct adapter * adapter,__be64 * rpl,int size,u32 mbox_data)72*4882a593Smuzhiyun static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
73*4882a593Smuzhiyun u32 mbox_data)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun for ( ; size; size -= 8, mbox_data += 8)
76*4882a593Smuzhiyun *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /**
80*4882a593Smuzhiyun * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log
81*4882a593Smuzhiyun * @adapter: the adapter
82*4882a593Smuzhiyun * @cmd: the Firmware Mailbox Command or Reply
83*4882a593Smuzhiyun * @size: command length in bytes
84*4882a593Smuzhiyun * @access: the time (ms) needed to access the Firmware Mailbox
85*4882a593Smuzhiyun * @execute: the time (ms) the command spent being executed
86*4882a593Smuzhiyun */
t4vf_record_mbox(struct adapter * adapter,const __be64 * cmd,int size,int access,int execute)87*4882a593Smuzhiyun static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd,
88*4882a593Smuzhiyun int size, int access, int execute)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun struct mbox_cmd_log *log = adapter->mbox_log;
91*4882a593Smuzhiyun struct mbox_cmd *entry;
92*4882a593Smuzhiyun int i;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun entry = mbox_cmd_log_entry(log, log->cursor++);
95*4882a593Smuzhiyun if (log->cursor == log->size)
96*4882a593Smuzhiyun log->cursor = 0;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun for (i = 0; i < size / 8; i++)
99*4882a593Smuzhiyun entry->cmd[i] = be64_to_cpu(cmd[i]);
100*4882a593Smuzhiyun while (i < MBOX_LEN / 8)
101*4882a593Smuzhiyun entry->cmd[i++] = 0;
102*4882a593Smuzhiyun entry->timestamp = jiffies;
103*4882a593Smuzhiyun entry->seqno = log->seqno++;
104*4882a593Smuzhiyun entry->access = access;
105*4882a593Smuzhiyun entry->execute = execute;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /**
109*4882a593Smuzhiyun * t4vf_wr_mbox_core - send a command to FW through the mailbox
110*4882a593Smuzhiyun * @adapter: the adapter
111*4882a593Smuzhiyun * @cmd: the command to write
112*4882a593Smuzhiyun * @size: command length in bytes
113*4882a593Smuzhiyun * @rpl: where to optionally store the reply
114*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * Sends the given command to FW through the mailbox and waits for the
117*4882a593Smuzhiyun * FW to execute the command. If @rpl is not %NULL it is used to store
118*4882a593Smuzhiyun * the FW's reply to the command. The command and its optional reply
119*4882a593Smuzhiyun * are of the same length. FW can take up to 500 ms to respond.
120*4882a593Smuzhiyun * @sleep_ok determines whether we may sleep while awaiting the response.
121*4882a593Smuzhiyun * If sleeping is allowed we use progressive backoff otherwise we spin.
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * The return value is 0 on success or a negative errno on failure. A
124*4882a593Smuzhiyun * failure can happen either because we are not able to execute the
125*4882a593Smuzhiyun * command or FW executes it but signals an error. In the latter case
126*4882a593Smuzhiyun * the return value is the error code indicated by FW (negated).
127*4882a593Smuzhiyun */
t4vf_wr_mbox_core(struct adapter * adapter,const void * cmd,int size,void * rpl,bool sleep_ok)128*4882a593Smuzhiyun int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
129*4882a593Smuzhiyun void *rpl, bool sleep_ok)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun static const int delay[] = {
132*4882a593Smuzhiyun 1, 1, 3, 5, 10, 10, 20, 50, 100
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun u16 access = 0, execute = 0;
136*4882a593Smuzhiyun u32 v, mbox_data;
137*4882a593Smuzhiyun int i, ms, delay_idx, ret;
138*4882a593Smuzhiyun const __be64 *p;
139*4882a593Smuzhiyun u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
140*4882a593Smuzhiyun u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
141*4882a593Smuzhiyun __be64 cmd_rpl[MBOX_LEN / 8];
142*4882a593Smuzhiyun struct mbox_list entry;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* In T6, mailbox size is changed to 128 bytes to avoid
145*4882a593Smuzhiyun * invalidating the entire prefetch buffer.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
148*4882a593Smuzhiyun mbox_data = T4VF_MBDATA_BASE_ADDR;
149*4882a593Smuzhiyun else
150*4882a593Smuzhiyun mbox_data = T6VF_MBDATA_BASE_ADDR;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun * Commands must be multiples of 16 bytes in length and may not be
154*4882a593Smuzhiyun * larger than the size of the Mailbox Data register array.
155*4882a593Smuzhiyun */
156*4882a593Smuzhiyun if ((size % 16) != 0 ||
157*4882a593Smuzhiyun size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
158*4882a593Smuzhiyun return -EINVAL;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* Queue ourselves onto the mailbox access list. When our entry is at
161*4882a593Smuzhiyun * the front of the list, we have rights to access the mailbox. So we
162*4882a593Smuzhiyun * wait [for a while] till we're at the front [or bail out with an
163*4882a593Smuzhiyun * EBUSY] ...
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun spin_lock(&adapter->mbox_lock);
166*4882a593Smuzhiyun list_add_tail(&entry.list, &adapter->mlist.list);
167*4882a593Smuzhiyun spin_unlock(&adapter->mbox_lock);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun delay_idx = 0;
170*4882a593Smuzhiyun ms = delay[0];
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun for (i = 0; ; i += ms) {
173*4882a593Smuzhiyun /* If we've waited too long, return a busy indication. This
174*4882a593Smuzhiyun * really ought to be based on our initial position in the
175*4882a593Smuzhiyun * mailbox access list but this is a start. We very rearely
176*4882a593Smuzhiyun * contend on access to the mailbox ...
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun if (i > FW_CMD_MAX_TIMEOUT) {
179*4882a593Smuzhiyun spin_lock(&adapter->mbox_lock);
180*4882a593Smuzhiyun list_del(&entry.list);
181*4882a593Smuzhiyun spin_unlock(&adapter->mbox_lock);
182*4882a593Smuzhiyun ret = -EBUSY;
183*4882a593Smuzhiyun t4vf_record_mbox(adapter, cmd, size, access, ret);
184*4882a593Smuzhiyun return ret;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* If we're at the head, break out and start the mailbox
188*4882a593Smuzhiyun * protocol.
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun if (list_first_entry(&adapter->mlist.list, struct mbox_list,
191*4882a593Smuzhiyun list) == &entry)
192*4882a593Smuzhiyun break;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* Delay for a bit before checking again ... */
195*4882a593Smuzhiyun if (sleep_ok) {
196*4882a593Smuzhiyun ms = delay[delay_idx]; /* last element may repeat */
197*4882a593Smuzhiyun if (delay_idx < ARRAY_SIZE(delay) - 1)
198*4882a593Smuzhiyun delay_idx++;
199*4882a593Smuzhiyun msleep(ms);
200*4882a593Smuzhiyun } else {
201*4882a593Smuzhiyun mdelay(ms);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun * Loop trying to get ownership of the mailbox. Return an error
207*4882a593Smuzhiyun * if we can't gain ownership.
208*4882a593Smuzhiyun */
209*4882a593Smuzhiyun v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
210*4882a593Smuzhiyun for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
211*4882a593Smuzhiyun v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
212*4882a593Smuzhiyun if (v != MBOX_OWNER_DRV) {
213*4882a593Smuzhiyun spin_lock(&adapter->mbox_lock);
214*4882a593Smuzhiyun list_del(&entry.list);
215*4882a593Smuzhiyun spin_unlock(&adapter->mbox_lock);
216*4882a593Smuzhiyun ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
217*4882a593Smuzhiyun t4vf_record_mbox(adapter, cmd, size, access, ret);
218*4882a593Smuzhiyun return ret;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * Write the command array into the Mailbox Data register array and
223*4882a593Smuzhiyun * transfer ownership of the mailbox to the firmware.
224*4882a593Smuzhiyun *
225*4882a593Smuzhiyun * For the VFs, the Mailbox Data "registers" are actually backed by
226*4882a593Smuzhiyun * T4's "MA" interface rather than PL Registers (as is the case for
227*4882a593Smuzhiyun * the PFs). Because these are in different coherency domains, the
228*4882a593Smuzhiyun * write to the VF's PL-register-backed Mailbox Control can race in
229*4882a593Smuzhiyun * front of the writes to the MA-backed VF Mailbox Data "registers".
230*4882a593Smuzhiyun * So we need to do a read-back on at least one byte of the VF Mailbox
231*4882a593Smuzhiyun * Data registers before doing the write to the VF Mailbox Control
232*4882a593Smuzhiyun * register.
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun if (cmd_op != FW_VI_STATS_CMD)
235*4882a593Smuzhiyun t4vf_record_mbox(adapter, cmd, size, access, 0);
236*4882a593Smuzhiyun for (i = 0, p = cmd; i < size; i += 8)
237*4882a593Smuzhiyun t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
238*4882a593Smuzhiyun t4_read_reg(adapter, mbox_data); /* flush write */
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun t4_write_reg(adapter, mbox_ctl,
241*4882a593Smuzhiyun MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
242*4882a593Smuzhiyun t4_read_reg(adapter, mbox_ctl); /* flush write */
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * Spin waiting for firmware to acknowledge processing our command.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun delay_idx = 0;
248*4882a593Smuzhiyun ms = delay[0];
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
251*4882a593Smuzhiyun if (sleep_ok) {
252*4882a593Smuzhiyun ms = delay[delay_idx];
253*4882a593Smuzhiyun if (delay_idx < ARRAY_SIZE(delay) - 1)
254*4882a593Smuzhiyun delay_idx++;
255*4882a593Smuzhiyun msleep(ms);
256*4882a593Smuzhiyun } else
257*4882a593Smuzhiyun mdelay(ms);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * If we're the owner, see if this is the reply we wanted.
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun v = t4_read_reg(adapter, mbox_ctl);
263*4882a593Smuzhiyun if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun * If the Message Valid bit isn't on, revoke ownership
266*4882a593Smuzhiyun * of the mailbox and continue waiting for our reply.
267*4882a593Smuzhiyun */
268*4882a593Smuzhiyun if ((v & MBMSGVALID_F) == 0) {
269*4882a593Smuzhiyun t4_write_reg(adapter, mbox_ctl,
270*4882a593Smuzhiyun MBOWNER_V(MBOX_OWNER_NONE));
271*4882a593Smuzhiyun continue;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun * We now have our reply. Extract the command return
276*4882a593Smuzhiyun * value, copy the reply back to our caller's buffer
277*4882a593Smuzhiyun * (if specified) and revoke ownership of the mailbox.
278*4882a593Smuzhiyun * We return the (negated) firmware command return
279*4882a593Smuzhiyun * code (this depends on FW_SUCCESS == 0).
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun get_mbox_rpl(adapter, cmd_rpl, size, mbox_data);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* return value in low-order little-endian word */
284*4882a593Smuzhiyun v = be64_to_cpu(cmd_rpl[0]);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (rpl) {
287*4882a593Smuzhiyun /* request bit in high-order BE word */
288*4882a593Smuzhiyun WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
289*4882a593Smuzhiyun & FW_CMD_REQUEST_F) == 0);
290*4882a593Smuzhiyun memcpy(rpl, cmd_rpl, size);
291*4882a593Smuzhiyun WARN_ON((be32_to_cpu(*(__be32 *)rpl)
292*4882a593Smuzhiyun & FW_CMD_REQUEST_F) != 0);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun t4_write_reg(adapter, mbox_ctl,
295*4882a593Smuzhiyun MBOWNER_V(MBOX_OWNER_NONE));
296*4882a593Smuzhiyun execute = i + ms;
297*4882a593Smuzhiyun if (cmd_op != FW_VI_STATS_CMD)
298*4882a593Smuzhiyun t4vf_record_mbox(adapter, cmd_rpl, size, access,
299*4882a593Smuzhiyun execute);
300*4882a593Smuzhiyun spin_lock(&adapter->mbox_lock);
301*4882a593Smuzhiyun list_del(&entry.list);
302*4882a593Smuzhiyun spin_unlock(&adapter->mbox_lock);
303*4882a593Smuzhiyun return -FW_CMD_RETVAL_G(v);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* We timed out. Return the error ... */
308*4882a593Smuzhiyun ret = -ETIMEDOUT;
309*4882a593Smuzhiyun t4vf_record_mbox(adapter, cmd, size, access, ret);
310*4882a593Smuzhiyun spin_lock(&adapter->mbox_lock);
311*4882a593Smuzhiyun list_del(&entry.list);
312*4882a593Smuzhiyun spin_unlock(&adapter->mbox_lock);
313*4882a593Smuzhiyun return ret;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* In the Physical Function Driver Common Code, the ADVERT_MASK is used to
317*4882a593Smuzhiyun * mask out bits in the Advertised Port Capabilities which are managed via
318*4882a593Smuzhiyun * separate controls, like Pause Frames and Forward Error Correction. In the
319*4882a593Smuzhiyun * Virtual Function Common Code, since we never perform L1 Configuration on
320*4882a593Smuzhiyun * the Link, the only things we really need to filter out are things which
321*4882a593Smuzhiyun * we decode and report separately like Speed.
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
324*4882a593Smuzhiyun FW_PORT_CAP32_802_3_PAUSE | \
325*4882a593Smuzhiyun FW_PORT_CAP32_802_3_ASM_DIR | \
326*4882a593Smuzhiyun FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M) | \
327*4882a593Smuzhiyun FW_PORT_CAP32_ANEG)
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /**
330*4882a593Smuzhiyun * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
331*4882a593Smuzhiyun * @caps16: a 16-bit Port Capabilities value
332*4882a593Smuzhiyun *
333*4882a593Smuzhiyun * Returns the equivalent 32-bit Port Capabilities value.
334*4882a593Smuzhiyun */
fwcaps16_to_caps32(fw_port_cap16_t caps16)335*4882a593Smuzhiyun static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun fw_port_cap32_t caps32 = 0;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun #define CAP16_TO_CAP32(__cap) \
340*4882a593Smuzhiyun do { \
341*4882a593Smuzhiyun if (caps16 & FW_PORT_CAP_##__cap) \
342*4882a593Smuzhiyun caps32 |= FW_PORT_CAP32_##__cap; \
343*4882a593Smuzhiyun } while (0)
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_100M);
346*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_1G);
347*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_25G);
348*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_10G);
349*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_40G);
350*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_100G);
351*4882a593Smuzhiyun CAP16_TO_CAP32(FC_RX);
352*4882a593Smuzhiyun CAP16_TO_CAP32(FC_TX);
353*4882a593Smuzhiyun CAP16_TO_CAP32(ANEG);
354*4882a593Smuzhiyun CAP16_TO_CAP32(MDIAUTO);
355*4882a593Smuzhiyun CAP16_TO_CAP32(MDISTRAIGHT);
356*4882a593Smuzhiyun CAP16_TO_CAP32(FEC_RS);
357*4882a593Smuzhiyun CAP16_TO_CAP32(FEC_BASER_RS);
358*4882a593Smuzhiyun CAP16_TO_CAP32(802_3_PAUSE);
359*4882a593Smuzhiyun CAP16_TO_CAP32(802_3_ASM_DIR);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun #undef CAP16_TO_CAP32
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun return caps32;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* Translate Firmware Pause specification to Common Code */
fwcap_to_cc_pause(fw_port_cap32_t fw_pause)367*4882a593Smuzhiyun static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun enum cc_pause cc_pause = 0;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (fw_pause & FW_PORT_CAP32_FC_RX)
372*4882a593Smuzhiyun cc_pause |= PAUSE_RX;
373*4882a593Smuzhiyun if (fw_pause & FW_PORT_CAP32_FC_TX)
374*4882a593Smuzhiyun cc_pause |= PAUSE_TX;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun return cc_pause;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* Translate Firmware Forward Error Correction specification to Common Code */
fwcap_to_cc_fec(fw_port_cap32_t fw_fec)380*4882a593Smuzhiyun static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun enum cc_fec cc_fec = 0;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (fw_fec & FW_PORT_CAP32_FEC_RS)
385*4882a593Smuzhiyun cc_fec |= FEC_RS;
386*4882a593Smuzhiyun if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
387*4882a593Smuzhiyun cc_fec |= FEC_BASER_RS;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun return cc_fec;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* Return the highest speed set in the port capabilities, in Mb/s. */
fwcap_to_speed(fw_port_cap32_t caps)393*4882a593Smuzhiyun static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun #define TEST_SPEED_RETURN(__caps_speed, __speed) \
396*4882a593Smuzhiyun do { \
397*4882a593Smuzhiyun if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
398*4882a593Smuzhiyun return __speed; \
399*4882a593Smuzhiyun } while (0)
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun TEST_SPEED_RETURN(400G, 400000);
402*4882a593Smuzhiyun TEST_SPEED_RETURN(200G, 200000);
403*4882a593Smuzhiyun TEST_SPEED_RETURN(100G, 100000);
404*4882a593Smuzhiyun TEST_SPEED_RETURN(50G, 50000);
405*4882a593Smuzhiyun TEST_SPEED_RETURN(40G, 40000);
406*4882a593Smuzhiyun TEST_SPEED_RETURN(25G, 25000);
407*4882a593Smuzhiyun TEST_SPEED_RETURN(10G, 10000);
408*4882a593Smuzhiyun TEST_SPEED_RETURN(1G, 1000);
409*4882a593Smuzhiyun TEST_SPEED_RETURN(100M, 100);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun #undef TEST_SPEED_RETURN
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /**
417*4882a593Smuzhiyun * fwcap_to_fwspeed - return highest speed in Port Capabilities
418*4882a593Smuzhiyun * @acaps: advertised Port Capabilities
419*4882a593Smuzhiyun *
420*4882a593Smuzhiyun * Get the highest speed for the port from the advertised Port
421*4882a593Smuzhiyun * Capabilities. It will be either the highest speed from the list of
422*4882a593Smuzhiyun * speeds or whatever user has set using ethtool.
423*4882a593Smuzhiyun */
fwcap_to_fwspeed(fw_port_cap32_t acaps)424*4882a593Smuzhiyun static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun #define TEST_SPEED_RETURN(__caps_speed) \
427*4882a593Smuzhiyun do { \
428*4882a593Smuzhiyun if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
429*4882a593Smuzhiyun return FW_PORT_CAP32_SPEED_##__caps_speed; \
430*4882a593Smuzhiyun } while (0)
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun TEST_SPEED_RETURN(400G);
433*4882a593Smuzhiyun TEST_SPEED_RETURN(200G);
434*4882a593Smuzhiyun TEST_SPEED_RETURN(100G);
435*4882a593Smuzhiyun TEST_SPEED_RETURN(50G);
436*4882a593Smuzhiyun TEST_SPEED_RETURN(40G);
437*4882a593Smuzhiyun TEST_SPEED_RETURN(25G);
438*4882a593Smuzhiyun TEST_SPEED_RETURN(10G);
439*4882a593Smuzhiyun TEST_SPEED_RETURN(1G);
440*4882a593Smuzhiyun TEST_SPEED_RETURN(100M);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun #undef TEST_SPEED_RETURN
443*4882a593Smuzhiyun return 0;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /*
447*4882a593Smuzhiyun * init_link_config - initialize a link's SW state
448*4882a593Smuzhiyun * @lc: structure holding the link state
449*4882a593Smuzhiyun * @pcaps: link Port Capabilities
450*4882a593Smuzhiyun * @acaps: link current Advertised Port Capabilities
451*4882a593Smuzhiyun *
452*4882a593Smuzhiyun * Initializes the SW state maintained for each link, including the link's
453*4882a593Smuzhiyun * capabilities and default speed/flow-control/autonegotiation settings.
454*4882a593Smuzhiyun */
init_link_config(struct link_config * lc,fw_port_cap32_t pcaps,fw_port_cap32_t acaps)455*4882a593Smuzhiyun static void init_link_config(struct link_config *lc,
456*4882a593Smuzhiyun fw_port_cap32_t pcaps,
457*4882a593Smuzhiyun fw_port_cap32_t acaps)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun lc->pcaps = pcaps;
460*4882a593Smuzhiyun lc->lpacaps = 0;
461*4882a593Smuzhiyun lc->speed_caps = 0;
462*4882a593Smuzhiyun lc->speed = 0;
463*4882a593Smuzhiyun lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* For Forward Error Control, we default to whatever the Firmware
466*4882a593Smuzhiyun * tells us the Link is currently advertising.
467*4882a593Smuzhiyun */
468*4882a593Smuzhiyun lc->auto_fec = fwcap_to_cc_fec(acaps);
469*4882a593Smuzhiyun lc->requested_fec = FEC_AUTO;
470*4882a593Smuzhiyun lc->fec = lc->auto_fec;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* If the Port is capable of Auto-Negtotiation, initialize it as
473*4882a593Smuzhiyun * "enabled" and copy over all of the Physical Port Capabilities
474*4882a593Smuzhiyun * to the Advertised Port Capabilities. Otherwise mark it as
475*4882a593Smuzhiyun * Auto-Negotiate disabled and select the highest supported speed
476*4882a593Smuzhiyun * for the link. Note parallel structure in t4_link_l1cfg_core()
477*4882a593Smuzhiyun * and t4_handle_get_port_info().
478*4882a593Smuzhiyun */
479*4882a593Smuzhiyun if (lc->pcaps & FW_PORT_CAP32_ANEG) {
480*4882a593Smuzhiyun lc->acaps = acaps & ADVERT_MASK;
481*4882a593Smuzhiyun lc->autoneg = AUTONEG_ENABLE;
482*4882a593Smuzhiyun lc->requested_fc |= PAUSE_AUTONEG;
483*4882a593Smuzhiyun } else {
484*4882a593Smuzhiyun lc->acaps = 0;
485*4882a593Smuzhiyun lc->autoneg = AUTONEG_DISABLE;
486*4882a593Smuzhiyun lc->speed_caps = fwcap_to_fwspeed(acaps);
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /**
491*4882a593Smuzhiyun * t4vf_port_init - initialize port hardware/software state
492*4882a593Smuzhiyun * @adapter: the adapter
493*4882a593Smuzhiyun * @pidx: the adapter port index
494*4882a593Smuzhiyun */
t4vf_port_init(struct adapter * adapter,int pidx)495*4882a593Smuzhiyun int t4vf_port_init(struct adapter *adapter, int pidx)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun struct port_info *pi = adap2pinfo(adapter, pidx);
498*4882a593Smuzhiyun unsigned int fw_caps = adapter->params.fw_caps_support;
499*4882a593Smuzhiyun struct fw_vi_cmd vi_cmd, vi_rpl;
500*4882a593Smuzhiyun struct fw_port_cmd port_cmd, port_rpl;
501*4882a593Smuzhiyun enum fw_port_type port_type;
502*4882a593Smuzhiyun int mdio_addr;
503*4882a593Smuzhiyun fw_port_cap32_t pcaps, acaps;
504*4882a593Smuzhiyun int ret;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* If we haven't yet determined whether we're talking to Firmware
507*4882a593Smuzhiyun * which knows the new 32-bit Port Capabilities, it's time to find
508*4882a593Smuzhiyun * out now. This will also tell new Firmware to send us Port Status
509*4882a593Smuzhiyun * Updates using the new 32-bit Port Capabilities version of the
510*4882a593Smuzhiyun * Port Information message.
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun if (fw_caps == FW_CAPS_UNKNOWN) {
513*4882a593Smuzhiyun u32 param, val;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
516*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
517*4882a593Smuzhiyun val = 1;
518*4882a593Smuzhiyun ret = t4vf_set_params(adapter, 1, ¶m, &val);
519*4882a593Smuzhiyun fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
520*4882a593Smuzhiyun adapter->params.fw_caps_support = fw_caps;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /*
524*4882a593Smuzhiyun * Execute a VI Read command to get our Virtual Interface information
525*4882a593Smuzhiyun * like MAC address, etc.
526*4882a593Smuzhiyun */
527*4882a593Smuzhiyun memset(&vi_cmd, 0, sizeof(vi_cmd));
528*4882a593Smuzhiyun vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
529*4882a593Smuzhiyun FW_CMD_REQUEST_F |
530*4882a593Smuzhiyun FW_CMD_READ_F);
531*4882a593Smuzhiyun vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
532*4882a593Smuzhiyun vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid));
533*4882a593Smuzhiyun ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
534*4882a593Smuzhiyun if (ret != FW_SUCCESS)
535*4882a593Smuzhiyun return ret;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd));
538*4882a593Smuzhiyun pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd));
539*4882a593Smuzhiyun t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /*
542*4882a593Smuzhiyun * If we don't have read access to our port information, we're done
543*4882a593Smuzhiyun * now. Otherwise, execute a PORT Read command to get it ...
544*4882a593Smuzhiyun */
545*4882a593Smuzhiyun if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
546*4882a593Smuzhiyun return 0;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun memset(&port_cmd, 0, sizeof(port_cmd));
549*4882a593Smuzhiyun port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
550*4882a593Smuzhiyun FW_CMD_REQUEST_F |
551*4882a593Smuzhiyun FW_CMD_READ_F |
552*4882a593Smuzhiyun FW_PORT_CMD_PORTID_V(pi->port_id));
553*4882a593Smuzhiyun port_cmd.action_to_len16 = cpu_to_be32(
554*4882a593Smuzhiyun FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
555*4882a593Smuzhiyun ? FW_PORT_ACTION_GET_PORT_INFO
556*4882a593Smuzhiyun : FW_PORT_ACTION_GET_PORT_INFO32) |
557*4882a593Smuzhiyun FW_LEN16(port_cmd));
558*4882a593Smuzhiyun ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
559*4882a593Smuzhiyun if (ret != FW_SUCCESS)
560*4882a593Smuzhiyun return ret;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /* Extract the various fields from the Port Information message. */
563*4882a593Smuzhiyun if (fw_caps == FW_CAPS16) {
564*4882a593Smuzhiyun u32 lstatus = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun port_type = FW_PORT_CMD_PTYPE_G(lstatus);
567*4882a593Smuzhiyun mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
568*4882a593Smuzhiyun ? FW_PORT_CMD_MDIOADDR_G(lstatus)
569*4882a593Smuzhiyun : -1);
570*4882a593Smuzhiyun pcaps = fwcaps16_to_caps32(be16_to_cpu(port_rpl.u.info.pcap));
571*4882a593Smuzhiyun acaps = fwcaps16_to_caps32(be16_to_cpu(port_rpl.u.info.acap));
572*4882a593Smuzhiyun } else {
573*4882a593Smuzhiyun u32 lstatus32 =
574*4882a593Smuzhiyun be32_to_cpu(port_rpl.u.info32.lstatus32_to_cbllen32);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
577*4882a593Smuzhiyun mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
578*4882a593Smuzhiyun ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
579*4882a593Smuzhiyun : -1);
580*4882a593Smuzhiyun pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32);
581*4882a593Smuzhiyun acaps = be32_to_cpu(port_rpl.u.info32.acaps32);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun pi->port_type = port_type;
585*4882a593Smuzhiyun pi->mdio_addr = mdio_addr;
586*4882a593Smuzhiyun pi->mod_type = FW_PORT_MOD_TYPE_NA;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun init_link_config(&pi->link_cfg, pcaps, acaps);
589*4882a593Smuzhiyun return 0;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /**
593*4882a593Smuzhiyun * t4vf_fw_reset - issue a reset to FW
594*4882a593Smuzhiyun * @adapter: the adapter
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * Issues a reset command to FW. For a Physical Function this would
597*4882a593Smuzhiyun * result in the Firmware resetting all of its state. For a Virtual
598*4882a593Smuzhiyun * Function this just resets the state associated with the VF.
599*4882a593Smuzhiyun */
t4vf_fw_reset(struct adapter * adapter)600*4882a593Smuzhiyun int t4vf_fw_reset(struct adapter *adapter)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun struct fw_reset_cmd cmd;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
605*4882a593Smuzhiyun cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) |
606*4882a593Smuzhiyun FW_CMD_WRITE_F);
607*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
608*4882a593Smuzhiyun return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /**
612*4882a593Smuzhiyun * t4vf_query_params - query FW or device parameters
613*4882a593Smuzhiyun * @adapter: the adapter
614*4882a593Smuzhiyun * @nparams: the number of parameters
615*4882a593Smuzhiyun * @params: the parameter names
616*4882a593Smuzhiyun * @vals: the parameter values
617*4882a593Smuzhiyun *
618*4882a593Smuzhiyun * Reads the values of firmware or device parameters. Up to 7 parameters
619*4882a593Smuzhiyun * can be queried at once.
620*4882a593Smuzhiyun */
t4vf_query_params(struct adapter * adapter,unsigned int nparams,const u32 * params,u32 * vals)621*4882a593Smuzhiyun static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
622*4882a593Smuzhiyun const u32 *params, u32 *vals)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun int i, ret;
625*4882a593Smuzhiyun struct fw_params_cmd cmd, rpl;
626*4882a593Smuzhiyun struct fw_params_param *p;
627*4882a593Smuzhiyun size_t len16;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun if (nparams > 7)
630*4882a593Smuzhiyun return -EINVAL;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
633*4882a593Smuzhiyun cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
634*4882a593Smuzhiyun FW_CMD_REQUEST_F |
635*4882a593Smuzhiyun FW_CMD_READ_F);
636*4882a593Smuzhiyun len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
637*4882a593Smuzhiyun param[nparams].mnem), 16);
638*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
639*4882a593Smuzhiyun for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
640*4882a593Smuzhiyun p->mnem = htonl(*params++);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
643*4882a593Smuzhiyun if (ret == 0)
644*4882a593Smuzhiyun for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
645*4882a593Smuzhiyun *vals++ = be32_to_cpu(p->val);
646*4882a593Smuzhiyun return ret;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /**
650*4882a593Smuzhiyun * t4vf_set_params - sets FW or device parameters
651*4882a593Smuzhiyun * @adapter: the adapter
652*4882a593Smuzhiyun * @nparams: the number of parameters
653*4882a593Smuzhiyun * @params: the parameter names
654*4882a593Smuzhiyun * @vals: the parameter values
655*4882a593Smuzhiyun *
656*4882a593Smuzhiyun * Sets the values of firmware or device parameters. Up to 7 parameters
657*4882a593Smuzhiyun * can be specified at once.
658*4882a593Smuzhiyun */
t4vf_set_params(struct adapter * adapter,unsigned int nparams,const u32 * params,const u32 * vals)659*4882a593Smuzhiyun int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
660*4882a593Smuzhiyun const u32 *params, const u32 *vals)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun int i;
663*4882a593Smuzhiyun struct fw_params_cmd cmd;
664*4882a593Smuzhiyun struct fw_params_param *p;
665*4882a593Smuzhiyun size_t len16;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun if (nparams > 7)
668*4882a593Smuzhiyun return -EINVAL;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
671*4882a593Smuzhiyun cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
672*4882a593Smuzhiyun FW_CMD_REQUEST_F |
673*4882a593Smuzhiyun FW_CMD_WRITE_F);
674*4882a593Smuzhiyun len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
675*4882a593Smuzhiyun param[nparams]), 16);
676*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
677*4882a593Smuzhiyun for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
678*4882a593Smuzhiyun p->mnem = cpu_to_be32(*params++);
679*4882a593Smuzhiyun p->val = cpu_to_be32(*vals++);
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /**
686*4882a593Smuzhiyun * t4vf_fl_pkt_align - return the fl packet alignment
687*4882a593Smuzhiyun * @adapter: the adapter
688*4882a593Smuzhiyun *
689*4882a593Smuzhiyun * T4 has a single field to specify the packing and padding boundary.
690*4882a593Smuzhiyun * T5 onwards has separate fields for this and hence the alignment for
691*4882a593Smuzhiyun * next packet offset is maximum of these two. And T6 changes the
692*4882a593Smuzhiyun * Ingress Padding Boundary Shift, so it's all a mess and it's best
693*4882a593Smuzhiyun * if we put this in low-level Common Code ...
694*4882a593Smuzhiyun *
695*4882a593Smuzhiyun */
t4vf_fl_pkt_align(struct adapter * adapter)696*4882a593Smuzhiyun int t4vf_fl_pkt_align(struct adapter *adapter)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun u32 sge_control, sge_control2;
699*4882a593Smuzhiyun unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun sge_control = adapter->params.sge.sge_control;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun /* T4 uses a single control field to specify both the PCIe Padding and
704*4882a593Smuzhiyun * Packing Boundary. T5 introduced the ability to specify these
705*4882a593Smuzhiyun * separately. The actual Ingress Packet Data alignment boundary
706*4882a593Smuzhiyun * within Packed Buffer Mode is the maximum of these two
707*4882a593Smuzhiyun * specifications. (Note that it makes no real practical sense to
708*4882a593Smuzhiyun * have the Pading Boudary be larger than the Packing Boundary but you
709*4882a593Smuzhiyun * could set the chip up that way and, in fact, legacy T4 code would
710*4882a593Smuzhiyun * end doing this because it would initialize the Padding Boundary and
711*4882a593Smuzhiyun * leave the Packing Boundary initialized to 0 (16 bytes).)
712*4882a593Smuzhiyun * Padding Boundary values in T6 starts from 8B,
713*4882a593Smuzhiyun * where as it is 32B for T4 and T5.
714*4882a593Smuzhiyun */
715*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
716*4882a593Smuzhiyun ingpad_shift = INGPADBOUNDARY_SHIFT_X;
717*4882a593Smuzhiyun else
718*4882a593Smuzhiyun ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun fl_align = ingpadboundary;
723*4882a593Smuzhiyun if (!is_t4(adapter->params.chip)) {
724*4882a593Smuzhiyun /* T5 has a different interpretation of one of the PCIe Packing
725*4882a593Smuzhiyun * Boundary values.
726*4882a593Smuzhiyun */
727*4882a593Smuzhiyun sge_control2 = adapter->params.sge.sge_control2;
728*4882a593Smuzhiyun ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
729*4882a593Smuzhiyun if (ingpackboundary == INGPACKBOUNDARY_16B_X)
730*4882a593Smuzhiyun ingpackboundary = 16;
731*4882a593Smuzhiyun else
732*4882a593Smuzhiyun ingpackboundary = 1 << (ingpackboundary +
733*4882a593Smuzhiyun INGPACKBOUNDARY_SHIFT_X);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun fl_align = max(ingpadboundary, ingpackboundary);
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun return fl_align;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun /**
741*4882a593Smuzhiyun * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
742*4882a593Smuzhiyun * @adapter: the adapter
743*4882a593Smuzhiyun * @qid: the Queue ID
744*4882a593Smuzhiyun * @qtype: the Ingress or Egress type for @qid
745*4882a593Smuzhiyun * @pbar2_qoffset: BAR2 Queue Offset
746*4882a593Smuzhiyun * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
747*4882a593Smuzhiyun *
748*4882a593Smuzhiyun * Returns the BAR2 SGE Queue Registers information associated with the
749*4882a593Smuzhiyun * indicated Absolute Queue ID. These are passed back in return value
750*4882a593Smuzhiyun * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
751*4882a593Smuzhiyun * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
752*4882a593Smuzhiyun *
753*4882a593Smuzhiyun * This may return an error which indicates that BAR2 SGE Queue
754*4882a593Smuzhiyun * registers aren't available. If an error is not returned, then the
755*4882a593Smuzhiyun * following values are returned:
756*4882a593Smuzhiyun *
757*4882a593Smuzhiyun * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
758*4882a593Smuzhiyun * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
759*4882a593Smuzhiyun *
760*4882a593Smuzhiyun * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
761*4882a593Smuzhiyun * require the "Inferred Queue ID" ability may be used. E.g. the
762*4882a593Smuzhiyun * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
763*4882a593Smuzhiyun * then these "Inferred Queue ID" register may not be used.
764*4882a593Smuzhiyun */
t4vf_bar2_sge_qregs(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,u64 * pbar2_qoffset,unsigned int * pbar2_qid)765*4882a593Smuzhiyun int t4vf_bar2_sge_qregs(struct adapter *adapter,
766*4882a593Smuzhiyun unsigned int qid,
767*4882a593Smuzhiyun enum t4_bar2_qtype qtype,
768*4882a593Smuzhiyun u64 *pbar2_qoffset,
769*4882a593Smuzhiyun unsigned int *pbar2_qid)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun unsigned int page_shift, page_size, qpp_shift, qpp_mask;
772*4882a593Smuzhiyun u64 bar2_page_offset, bar2_qoffset;
773*4882a593Smuzhiyun unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /* T4 doesn't support BAR2 SGE Queue registers.
776*4882a593Smuzhiyun */
777*4882a593Smuzhiyun if (is_t4(adapter->params.chip))
778*4882a593Smuzhiyun return -EINVAL;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /* Get our SGE Page Size parameters.
781*4882a593Smuzhiyun */
782*4882a593Smuzhiyun page_shift = adapter->params.sge.sge_vf_hps + 10;
783*4882a593Smuzhiyun page_size = 1 << page_shift;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /* Get the right Queues per Page parameters for our Queue.
786*4882a593Smuzhiyun */
787*4882a593Smuzhiyun qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
788*4882a593Smuzhiyun ? adapter->params.sge.sge_vf_eq_qpp
789*4882a593Smuzhiyun : adapter->params.sge.sge_vf_iq_qpp);
790*4882a593Smuzhiyun qpp_mask = (1 << qpp_shift) - 1;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* Calculate the basics of the BAR2 SGE Queue register area:
793*4882a593Smuzhiyun * o The BAR2 page the Queue registers will be in.
794*4882a593Smuzhiyun * o The BAR2 Queue ID.
795*4882a593Smuzhiyun * o The BAR2 Queue ID Offset into the BAR2 page.
796*4882a593Smuzhiyun */
797*4882a593Smuzhiyun bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
798*4882a593Smuzhiyun bar2_qid = qid & qpp_mask;
799*4882a593Smuzhiyun bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun /* If the BAR2 Queue ID Offset is less than the Page Size, then the
802*4882a593Smuzhiyun * hardware will infer the Absolute Queue ID simply from the writes to
803*4882a593Smuzhiyun * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
804*4882a593Smuzhiyun * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
805*4882a593Smuzhiyun * write to the first BAR2 SGE Queue Area within the BAR2 Page with
806*4882a593Smuzhiyun * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
807*4882a593Smuzhiyun * from the BAR2 Page and BAR2 Queue ID.
808*4882a593Smuzhiyun *
809*4882a593Smuzhiyun * One important censequence of this is that some BAR2 SGE registers
810*4882a593Smuzhiyun * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
811*4882a593Smuzhiyun * there. But other registers synthesize the SGE Queue ID purely
812*4882a593Smuzhiyun * from the writes to the registers -- the Write Combined Doorbell
813*4882a593Smuzhiyun * Buffer is a good example. These BAR2 SGE Registers are only
814*4882a593Smuzhiyun * available for those BAR2 SGE Register areas where the SGE Absolute
815*4882a593Smuzhiyun * Queue ID can be inferred from simple writes.
816*4882a593Smuzhiyun */
817*4882a593Smuzhiyun bar2_qoffset = bar2_page_offset;
818*4882a593Smuzhiyun bar2_qinferred = (bar2_qid_offset < page_size);
819*4882a593Smuzhiyun if (bar2_qinferred) {
820*4882a593Smuzhiyun bar2_qoffset += bar2_qid_offset;
821*4882a593Smuzhiyun bar2_qid = 0;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun *pbar2_qoffset = bar2_qoffset;
825*4882a593Smuzhiyun *pbar2_qid = bar2_qid;
826*4882a593Smuzhiyun return 0;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
t4vf_get_pf_from_vf(struct adapter * adapter)829*4882a593Smuzhiyun unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun u32 whoami;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
834*4882a593Smuzhiyun return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
835*4882a593Smuzhiyun SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami));
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /**
839*4882a593Smuzhiyun * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
840*4882a593Smuzhiyun * @adapter: the adapter
841*4882a593Smuzhiyun *
842*4882a593Smuzhiyun * Retrieves various core SGE parameters in the form of hardware SGE
843*4882a593Smuzhiyun * register values. The caller is responsible for decoding these as
844*4882a593Smuzhiyun * needed. The SGE parameters are stored in @adapter->params.sge.
845*4882a593Smuzhiyun */
t4vf_get_sge_params(struct adapter * adapter)846*4882a593Smuzhiyun int t4vf_get_sge_params(struct adapter *adapter)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun struct sge_params *sge_params = &adapter->params.sge;
849*4882a593Smuzhiyun u32 params[7], vals[7];
850*4882a593Smuzhiyun int v;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
853*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
854*4882a593Smuzhiyun params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
855*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
856*4882a593Smuzhiyun params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
857*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
858*4882a593Smuzhiyun params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
859*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
860*4882a593Smuzhiyun params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
861*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
862*4882a593Smuzhiyun params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
863*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
864*4882a593Smuzhiyun params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
865*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
866*4882a593Smuzhiyun v = t4vf_query_params(adapter, 7, params, vals);
867*4882a593Smuzhiyun if (v)
868*4882a593Smuzhiyun return v;
869*4882a593Smuzhiyun sge_params->sge_control = vals[0];
870*4882a593Smuzhiyun sge_params->sge_host_page_size = vals[1];
871*4882a593Smuzhiyun sge_params->sge_fl_buffer_size[0] = vals[2];
872*4882a593Smuzhiyun sge_params->sge_fl_buffer_size[1] = vals[3];
873*4882a593Smuzhiyun sge_params->sge_timer_value_0_and_1 = vals[4];
874*4882a593Smuzhiyun sge_params->sge_timer_value_2_and_3 = vals[5];
875*4882a593Smuzhiyun sge_params->sge_timer_value_4_and_5 = vals[6];
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /* T4 uses a single control field to specify both the PCIe Padding and
878*4882a593Smuzhiyun * Packing Boundary. T5 introduced the ability to specify these
879*4882a593Smuzhiyun * separately with the Padding Boundary in SGE_CONTROL and and Packing
880*4882a593Smuzhiyun * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
881*4882a593Smuzhiyun * SGE_CONTROL in order to determine how ingress packet data will be
882*4882a593Smuzhiyun * laid out in Packed Buffer Mode. Unfortunately, older versions of
883*4882a593Smuzhiyun * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
884*4882a593Smuzhiyun * failure grabbing it we throw an error since we can't figure out the
885*4882a593Smuzhiyun * right value.
886*4882a593Smuzhiyun */
887*4882a593Smuzhiyun if (!is_t4(adapter->params.chip)) {
888*4882a593Smuzhiyun params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
889*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A));
890*4882a593Smuzhiyun v = t4vf_query_params(adapter, 1, params, vals);
891*4882a593Smuzhiyun if (v != FW_SUCCESS) {
892*4882a593Smuzhiyun dev_err(adapter->pdev_dev,
893*4882a593Smuzhiyun "Unable to get SGE Control2; "
894*4882a593Smuzhiyun "probably old firmware.\n");
895*4882a593Smuzhiyun return v;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun sge_params->sge_control2 = vals[0];
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
901*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
902*4882a593Smuzhiyun params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
903*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
904*4882a593Smuzhiyun v = t4vf_query_params(adapter, 2, params, vals);
905*4882a593Smuzhiyun if (v)
906*4882a593Smuzhiyun return v;
907*4882a593Smuzhiyun sge_params->sge_ingress_rx_threshold = vals[0];
908*4882a593Smuzhiyun sge_params->sge_congestion_control = vals[1];
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /* For T5 and later we want to use the new BAR2 Doorbells.
911*4882a593Smuzhiyun * Unfortunately, older firmware didn't allow the this register to be
912*4882a593Smuzhiyun * read.
913*4882a593Smuzhiyun */
914*4882a593Smuzhiyun if (!is_t4(adapter->params.chip)) {
915*4882a593Smuzhiyun unsigned int pf, s_hps, s_qpp;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
918*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(
919*4882a593Smuzhiyun SGE_EGRESS_QUEUES_PER_PAGE_VF_A));
920*4882a593Smuzhiyun params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
921*4882a593Smuzhiyun FW_PARAMS_PARAM_XYZ_V(
922*4882a593Smuzhiyun SGE_INGRESS_QUEUES_PER_PAGE_VF_A));
923*4882a593Smuzhiyun v = t4vf_query_params(adapter, 2, params, vals);
924*4882a593Smuzhiyun if (v != FW_SUCCESS) {
925*4882a593Smuzhiyun dev_warn(adapter->pdev_dev,
926*4882a593Smuzhiyun "Unable to get VF SGE Queues/Page; "
927*4882a593Smuzhiyun "probably old firmware.\n");
928*4882a593Smuzhiyun return v;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun sge_params->sge_egress_queues_per_page = vals[0];
931*4882a593Smuzhiyun sge_params->sge_ingress_queues_per_page = vals[1];
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun /* We need the Queues/Page for our VF. This is based on the
934*4882a593Smuzhiyun * PF from which we're instantiated and is indexed in the
935*4882a593Smuzhiyun * register we just read. Do it once here so other code in
936*4882a593Smuzhiyun * the driver can just use it.
937*4882a593Smuzhiyun */
938*4882a593Smuzhiyun pf = t4vf_get_pf_from_vf(adapter);
939*4882a593Smuzhiyun s_hps = (HOSTPAGESIZEPF0_S +
940*4882a593Smuzhiyun (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
941*4882a593Smuzhiyun sge_params->sge_vf_hps =
942*4882a593Smuzhiyun ((sge_params->sge_host_page_size >> s_hps)
943*4882a593Smuzhiyun & HOSTPAGESIZEPF0_M);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun s_qpp = (QUEUESPERPAGEPF0_S +
946*4882a593Smuzhiyun (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
947*4882a593Smuzhiyun sge_params->sge_vf_eq_qpp =
948*4882a593Smuzhiyun ((sge_params->sge_egress_queues_per_page >> s_qpp)
949*4882a593Smuzhiyun & QUEUESPERPAGEPF0_M);
950*4882a593Smuzhiyun sge_params->sge_vf_iq_qpp =
951*4882a593Smuzhiyun ((sge_params->sge_ingress_queues_per_page >> s_qpp)
952*4882a593Smuzhiyun & QUEUESPERPAGEPF0_M);
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun return 0;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun /**
959*4882a593Smuzhiyun * t4vf_get_vpd_params - retrieve device VPD paremeters
960*4882a593Smuzhiyun * @adapter: the adapter
961*4882a593Smuzhiyun *
962*4882a593Smuzhiyun * Retrives various device Vital Product Data parameters. The parameters
963*4882a593Smuzhiyun * are stored in @adapter->params.vpd.
964*4882a593Smuzhiyun */
t4vf_get_vpd_params(struct adapter * adapter)965*4882a593Smuzhiyun int t4vf_get_vpd_params(struct adapter *adapter)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun struct vpd_params *vpd_params = &adapter->params.vpd;
968*4882a593Smuzhiyun u32 params[7], vals[7];
969*4882a593Smuzhiyun int v;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
972*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
973*4882a593Smuzhiyun v = t4vf_query_params(adapter, 1, params, vals);
974*4882a593Smuzhiyun if (v)
975*4882a593Smuzhiyun return v;
976*4882a593Smuzhiyun vpd_params->cclk = vals[0];
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun return 0;
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun /**
982*4882a593Smuzhiyun * t4vf_get_dev_params - retrieve device paremeters
983*4882a593Smuzhiyun * @adapter: the adapter
984*4882a593Smuzhiyun *
985*4882a593Smuzhiyun * Retrives various device parameters. The parameters are stored in
986*4882a593Smuzhiyun * @adapter->params.dev.
987*4882a593Smuzhiyun */
t4vf_get_dev_params(struct adapter * adapter)988*4882a593Smuzhiyun int t4vf_get_dev_params(struct adapter *adapter)
989*4882a593Smuzhiyun {
990*4882a593Smuzhiyun struct dev_params *dev_params = &adapter->params.dev;
991*4882a593Smuzhiyun u32 params[7], vals[7];
992*4882a593Smuzhiyun int v;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
995*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV));
996*4882a593Smuzhiyun params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
997*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV));
998*4882a593Smuzhiyun v = t4vf_query_params(adapter, 2, params, vals);
999*4882a593Smuzhiyun if (v)
1000*4882a593Smuzhiyun return v;
1001*4882a593Smuzhiyun dev_params->fwrev = vals[0];
1002*4882a593Smuzhiyun dev_params->tprev = vals[1];
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun return 0;
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun /**
1008*4882a593Smuzhiyun * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
1009*4882a593Smuzhiyun * @adapter: the adapter
1010*4882a593Smuzhiyun *
1011*4882a593Smuzhiyun * Retrieves global RSS mode and parameters with which we have to live
1012*4882a593Smuzhiyun * and stores them in the @adapter's RSS parameters.
1013*4882a593Smuzhiyun */
t4vf_get_rss_glb_config(struct adapter * adapter)1014*4882a593Smuzhiyun int t4vf_get_rss_glb_config(struct adapter *adapter)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun struct rss_params *rss = &adapter->params.rss;
1017*4882a593Smuzhiyun struct fw_rss_glb_config_cmd cmd, rpl;
1018*4882a593Smuzhiyun int v;
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun /*
1021*4882a593Smuzhiyun * Execute an RSS Global Configuration read command to retrieve
1022*4882a593Smuzhiyun * our RSS configuration.
1023*4882a593Smuzhiyun */
1024*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1025*4882a593Smuzhiyun cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
1026*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1027*4882a593Smuzhiyun FW_CMD_READ_F);
1028*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1029*4882a593Smuzhiyun v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1030*4882a593Smuzhiyun if (v)
1031*4882a593Smuzhiyun return v;
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun /*
1034*4882a593Smuzhiyun * Transate the big-endian RSS Global Configuration into our
1035*4882a593Smuzhiyun * cpu-endian format based on the RSS mode. We also do first level
1036*4882a593Smuzhiyun * filtering at this point to weed out modes which don't support
1037*4882a593Smuzhiyun * VF Drivers ...
1038*4882a593Smuzhiyun */
1039*4882a593Smuzhiyun rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G(
1040*4882a593Smuzhiyun be32_to_cpu(rpl.u.manual.mode_pkd));
1041*4882a593Smuzhiyun switch (rss->mode) {
1042*4882a593Smuzhiyun case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
1043*4882a593Smuzhiyun u32 word = be32_to_cpu(
1044*4882a593Smuzhiyun rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun rss->u.basicvirtual.synmapen =
1047*4882a593Smuzhiyun ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0);
1048*4882a593Smuzhiyun rss->u.basicvirtual.syn4tupenipv6 =
1049*4882a593Smuzhiyun ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0);
1050*4882a593Smuzhiyun rss->u.basicvirtual.syn2tupenipv6 =
1051*4882a593Smuzhiyun ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0);
1052*4882a593Smuzhiyun rss->u.basicvirtual.syn4tupenipv4 =
1053*4882a593Smuzhiyun ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0);
1054*4882a593Smuzhiyun rss->u.basicvirtual.syn2tupenipv4 =
1055*4882a593Smuzhiyun ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0);
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun rss->u.basicvirtual.ofdmapen =
1058*4882a593Smuzhiyun ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun rss->u.basicvirtual.tnlmapen =
1061*4882a593Smuzhiyun ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0);
1062*4882a593Smuzhiyun rss->u.basicvirtual.tnlalllookup =
1063*4882a593Smuzhiyun ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun rss->u.basicvirtual.hashtoeplitz =
1066*4882a593Smuzhiyun ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0);
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun /* we need at least Tunnel Map Enable to be set */
1069*4882a593Smuzhiyun if (!rss->u.basicvirtual.tnlmapen)
1070*4882a593Smuzhiyun return -EINVAL;
1071*4882a593Smuzhiyun break;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun default:
1075*4882a593Smuzhiyun /* all unknown/unsupported RSS modes result in an error */
1076*4882a593Smuzhiyun return -EINVAL;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun return 0;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun /**
1083*4882a593Smuzhiyun * t4vf_get_vfres - retrieve VF resource limits
1084*4882a593Smuzhiyun * @adapter: the adapter
1085*4882a593Smuzhiyun *
1086*4882a593Smuzhiyun * Retrieves configured resource limits and capabilities for a virtual
1087*4882a593Smuzhiyun * function. The results are stored in @adapter->vfres.
1088*4882a593Smuzhiyun */
t4vf_get_vfres(struct adapter * adapter)1089*4882a593Smuzhiyun int t4vf_get_vfres(struct adapter *adapter)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun struct vf_resources *vfres = &adapter->params.vfres;
1092*4882a593Smuzhiyun struct fw_pfvf_cmd cmd, rpl;
1093*4882a593Smuzhiyun int v;
1094*4882a593Smuzhiyun u32 word;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /*
1097*4882a593Smuzhiyun * Execute PFVF Read command to get VF resource limits; bail out early
1098*4882a593Smuzhiyun * with error on command failure.
1099*4882a593Smuzhiyun */
1100*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1101*4882a593Smuzhiyun cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
1102*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1103*4882a593Smuzhiyun FW_CMD_READ_F);
1104*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1105*4882a593Smuzhiyun v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1106*4882a593Smuzhiyun if (v)
1107*4882a593Smuzhiyun return v;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun /*
1110*4882a593Smuzhiyun * Extract VF resource limits and return success.
1111*4882a593Smuzhiyun */
1112*4882a593Smuzhiyun word = be32_to_cpu(rpl.niqflint_niq);
1113*4882a593Smuzhiyun vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
1114*4882a593Smuzhiyun vfres->niq = FW_PFVF_CMD_NIQ_G(word);
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun word = be32_to_cpu(rpl.type_to_neq);
1117*4882a593Smuzhiyun vfres->neq = FW_PFVF_CMD_NEQ_G(word);
1118*4882a593Smuzhiyun vfres->pmask = FW_PFVF_CMD_PMASK_G(word);
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun word = be32_to_cpu(rpl.tc_to_nexactf);
1121*4882a593Smuzhiyun vfres->tc = FW_PFVF_CMD_TC_G(word);
1122*4882a593Smuzhiyun vfres->nvi = FW_PFVF_CMD_NVI_G(word);
1123*4882a593Smuzhiyun vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun word = be32_to_cpu(rpl.r_caps_to_nethctrl);
1126*4882a593Smuzhiyun vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
1127*4882a593Smuzhiyun vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
1128*4882a593Smuzhiyun vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun return 0;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun /**
1134*4882a593Smuzhiyun * t4vf_read_rss_vi_config - read a VI's RSS configuration
1135*4882a593Smuzhiyun * @adapter: the adapter
1136*4882a593Smuzhiyun * @viid: Virtual Interface ID
1137*4882a593Smuzhiyun * @config: pointer to host-native VI RSS Configuration buffer
1138*4882a593Smuzhiyun *
1139*4882a593Smuzhiyun * Reads the Virtual Interface's RSS configuration information and
1140*4882a593Smuzhiyun * translates it into CPU-native format.
1141*4882a593Smuzhiyun */
t4vf_read_rss_vi_config(struct adapter * adapter,unsigned int viid,union rss_vi_config * config)1142*4882a593Smuzhiyun int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
1143*4882a593Smuzhiyun union rss_vi_config *config)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun struct fw_rss_vi_config_cmd cmd, rpl;
1146*4882a593Smuzhiyun int v;
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1149*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
1150*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1151*4882a593Smuzhiyun FW_CMD_READ_F |
1152*4882a593Smuzhiyun FW_RSS_VI_CONFIG_CMD_VIID(viid));
1153*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1154*4882a593Smuzhiyun v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1155*4882a593Smuzhiyun if (v)
1156*4882a593Smuzhiyun return v;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun switch (adapter->params.rss.mode) {
1159*4882a593Smuzhiyun case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
1160*4882a593Smuzhiyun u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun config->basicvirtual.ip6fourtupen =
1163*4882a593Smuzhiyun ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0);
1164*4882a593Smuzhiyun config->basicvirtual.ip6twotupen =
1165*4882a593Smuzhiyun ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0);
1166*4882a593Smuzhiyun config->basicvirtual.ip4fourtupen =
1167*4882a593Smuzhiyun ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0);
1168*4882a593Smuzhiyun config->basicvirtual.ip4twotupen =
1169*4882a593Smuzhiyun ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0);
1170*4882a593Smuzhiyun config->basicvirtual.udpen =
1171*4882a593Smuzhiyun ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0);
1172*4882a593Smuzhiyun config->basicvirtual.defaultq =
1173*4882a593Smuzhiyun FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word);
1174*4882a593Smuzhiyun break;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun default:
1178*4882a593Smuzhiyun return -EINVAL;
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun return 0;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun /**
1185*4882a593Smuzhiyun * t4vf_write_rss_vi_config - write a VI's RSS configuration
1186*4882a593Smuzhiyun * @adapter: the adapter
1187*4882a593Smuzhiyun * @viid: Virtual Interface ID
1188*4882a593Smuzhiyun * @config: pointer to host-native VI RSS Configuration buffer
1189*4882a593Smuzhiyun *
1190*4882a593Smuzhiyun * Write the Virtual Interface's RSS configuration information
1191*4882a593Smuzhiyun * (translating it into firmware-native format before writing).
1192*4882a593Smuzhiyun */
t4vf_write_rss_vi_config(struct adapter * adapter,unsigned int viid,union rss_vi_config * config)1193*4882a593Smuzhiyun int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
1194*4882a593Smuzhiyun union rss_vi_config *config)
1195*4882a593Smuzhiyun {
1196*4882a593Smuzhiyun struct fw_rss_vi_config_cmd cmd, rpl;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1199*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
1200*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1201*4882a593Smuzhiyun FW_CMD_WRITE_F |
1202*4882a593Smuzhiyun FW_RSS_VI_CONFIG_CMD_VIID(viid));
1203*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1204*4882a593Smuzhiyun switch (adapter->params.rss.mode) {
1205*4882a593Smuzhiyun case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
1206*4882a593Smuzhiyun u32 word = 0;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun if (config->basicvirtual.ip6fourtupen)
1209*4882a593Smuzhiyun word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F;
1210*4882a593Smuzhiyun if (config->basicvirtual.ip6twotupen)
1211*4882a593Smuzhiyun word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F;
1212*4882a593Smuzhiyun if (config->basicvirtual.ip4fourtupen)
1213*4882a593Smuzhiyun word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F;
1214*4882a593Smuzhiyun if (config->basicvirtual.ip4twotupen)
1215*4882a593Smuzhiyun word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F;
1216*4882a593Smuzhiyun if (config->basicvirtual.udpen)
1217*4882a593Smuzhiyun word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F;
1218*4882a593Smuzhiyun word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(
1219*4882a593Smuzhiyun config->basicvirtual.defaultq);
1220*4882a593Smuzhiyun cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
1221*4882a593Smuzhiyun break;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun default:
1225*4882a593Smuzhiyun return -EINVAL;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun /**
1232*4882a593Smuzhiyun * t4vf_config_rss_range - configure a portion of the RSS mapping table
1233*4882a593Smuzhiyun * @adapter: the adapter
1234*4882a593Smuzhiyun * @viid: Virtual Interface of RSS Table Slice
1235*4882a593Smuzhiyun * @start: starting entry in the table to write
1236*4882a593Smuzhiyun * @n: how many table entries to write
1237*4882a593Smuzhiyun * @rspq: values for the "Response Queue" (Ingress Queue) lookup table
1238*4882a593Smuzhiyun * @nrspq: number of values in @rspq
1239*4882a593Smuzhiyun *
1240*4882a593Smuzhiyun * Programs the selected part of the VI's RSS mapping table with the
1241*4882a593Smuzhiyun * provided values. If @nrspq < @n the supplied values are used repeatedly
1242*4882a593Smuzhiyun * until the full table range is populated.
1243*4882a593Smuzhiyun *
1244*4882a593Smuzhiyun * The caller must ensure the values in @rspq are in the range 0..1023.
1245*4882a593Smuzhiyun */
t4vf_config_rss_range(struct adapter * adapter,unsigned int viid,int start,int n,const u16 * rspq,int nrspq)1246*4882a593Smuzhiyun int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
1247*4882a593Smuzhiyun int start, int n, const u16 *rspq, int nrspq)
1248*4882a593Smuzhiyun {
1249*4882a593Smuzhiyun const u16 *rsp = rspq;
1250*4882a593Smuzhiyun const u16 *rsp_end = rspq+nrspq;
1251*4882a593Smuzhiyun struct fw_rss_ind_tbl_cmd cmd;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun /*
1254*4882a593Smuzhiyun * Initialize firmware command template to write the RSS table.
1255*4882a593Smuzhiyun */
1256*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1257*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
1258*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1259*4882a593Smuzhiyun FW_CMD_WRITE_F |
1260*4882a593Smuzhiyun FW_RSS_IND_TBL_CMD_VIID_V(viid));
1261*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun /*
1264*4882a593Smuzhiyun * Each firmware RSS command can accommodate up to 32 RSS Ingress
1265*4882a593Smuzhiyun * Queue Identifiers. These Ingress Queue IDs are packed three to
1266*4882a593Smuzhiyun * a 32-bit word as 10-bit values with the upper remaining 2 bits
1267*4882a593Smuzhiyun * reserved.
1268*4882a593Smuzhiyun */
1269*4882a593Smuzhiyun while (n > 0) {
1270*4882a593Smuzhiyun __be32 *qp = &cmd.iq0_to_iq2;
1271*4882a593Smuzhiyun int nq = min(n, 32);
1272*4882a593Smuzhiyun int ret;
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun /*
1275*4882a593Smuzhiyun * Set up the firmware RSS command header to send the next
1276*4882a593Smuzhiyun * "nq" Ingress Queue IDs to the firmware.
1277*4882a593Smuzhiyun */
1278*4882a593Smuzhiyun cmd.niqid = cpu_to_be16(nq);
1279*4882a593Smuzhiyun cmd.startidx = cpu_to_be16(start);
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun /*
1282*4882a593Smuzhiyun * "nq" more done for the start of the next loop.
1283*4882a593Smuzhiyun */
1284*4882a593Smuzhiyun start += nq;
1285*4882a593Smuzhiyun n -= nq;
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun /*
1288*4882a593Smuzhiyun * While there are still Ingress Queue IDs to stuff into the
1289*4882a593Smuzhiyun * current firmware RSS command, retrieve them from the
1290*4882a593Smuzhiyun * Ingress Queue ID array and insert them into the command.
1291*4882a593Smuzhiyun */
1292*4882a593Smuzhiyun while (nq > 0) {
1293*4882a593Smuzhiyun /*
1294*4882a593Smuzhiyun * Grab up to the next 3 Ingress Queue IDs (wrapping
1295*4882a593Smuzhiyun * around the Ingress Queue ID array if necessary) and
1296*4882a593Smuzhiyun * insert them into the firmware RSS command at the
1297*4882a593Smuzhiyun * current 3-tuple position within the commad.
1298*4882a593Smuzhiyun */
1299*4882a593Smuzhiyun u16 qbuf[3];
1300*4882a593Smuzhiyun u16 *qbp = qbuf;
1301*4882a593Smuzhiyun int nqbuf = min(3, nq);
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun nq -= nqbuf;
1304*4882a593Smuzhiyun qbuf[0] = qbuf[1] = qbuf[2] = 0;
1305*4882a593Smuzhiyun while (nqbuf) {
1306*4882a593Smuzhiyun nqbuf--;
1307*4882a593Smuzhiyun *qbp++ = *rsp++;
1308*4882a593Smuzhiyun if (rsp >= rsp_end)
1309*4882a593Smuzhiyun rsp = rspq;
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) |
1312*4882a593Smuzhiyun FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) |
1313*4882a593Smuzhiyun FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2]));
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun /*
1317*4882a593Smuzhiyun * Send this portion of the RRS table update to the firmware;
1318*4882a593Smuzhiyun * bail out on any errors.
1319*4882a593Smuzhiyun */
1320*4882a593Smuzhiyun ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1321*4882a593Smuzhiyun if (ret)
1322*4882a593Smuzhiyun return ret;
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun return 0;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun /**
1328*4882a593Smuzhiyun * t4vf_alloc_vi - allocate a virtual interface on a port
1329*4882a593Smuzhiyun * @adapter: the adapter
1330*4882a593Smuzhiyun * @port_id: physical port associated with the VI
1331*4882a593Smuzhiyun *
1332*4882a593Smuzhiyun * Allocate a new Virtual Interface and bind it to the indicated
1333*4882a593Smuzhiyun * physical port. Return the new Virtual Interface Identifier on
1334*4882a593Smuzhiyun * success, or a [negative] error number on failure.
1335*4882a593Smuzhiyun */
t4vf_alloc_vi(struct adapter * adapter,int port_id)1336*4882a593Smuzhiyun int t4vf_alloc_vi(struct adapter *adapter, int port_id)
1337*4882a593Smuzhiyun {
1338*4882a593Smuzhiyun struct fw_vi_cmd cmd, rpl;
1339*4882a593Smuzhiyun int v;
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun /*
1342*4882a593Smuzhiyun * Execute a VI command to allocate Virtual Interface and return its
1343*4882a593Smuzhiyun * VIID.
1344*4882a593Smuzhiyun */
1345*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1346*4882a593Smuzhiyun cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
1347*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1348*4882a593Smuzhiyun FW_CMD_WRITE_F |
1349*4882a593Smuzhiyun FW_CMD_EXEC_F);
1350*4882a593Smuzhiyun cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
1351*4882a593Smuzhiyun FW_VI_CMD_ALLOC_F);
1352*4882a593Smuzhiyun cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id);
1353*4882a593Smuzhiyun v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1354*4882a593Smuzhiyun if (v)
1355*4882a593Smuzhiyun return v;
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid));
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun /**
1361*4882a593Smuzhiyun * t4vf_free_vi -- free a virtual interface
1362*4882a593Smuzhiyun * @adapter: the adapter
1363*4882a593Smuzhiyun * @viid: the virtual interface identifier
1364*4882a593Smuzhiyun *
1365*4882a593Smuzhiyun * Free a previously allocated Virtual Interface. Return an error on
1366*4882a593Smuzhiyun * failure.
1367*4882a593Smuzhiyun */
t4vf_free_vi(struct adapter * adapter,int viid)1368*4882a593Smuzhiyun int t4vf_free_vi(struct adapter *adapter, int viid)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun struct fw_vi_cmd cmd;
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun /*
1373*4882a593Smuzhiyun * Execute a VI command to free the Virtual Interface.
1374*4882a593Smuzhiyun */
1375*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1376*4882a593Smuzhiyun cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
1377*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1378*4882a593Smuzhiyun FW_CMD_EXEC_F);
1379*4882a593Smuzhiyun cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
1380*4882a593Smuzhiyun FW_VI_CMD_FREE_F);
1381*4882a593Smuzhiyun cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
1382*4882a593Smuzhiyun return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun /**
1386*4882a593Smuzhiyun * t4vf_enable_vi - enable/disable a virtual interface
1387*4882a593Smuzhiyun * @adapter: the adapter
1388*4882a593Smuzhiyun * @viid: the Virtual Interface ID
1389*4882a593Smuzhiyun * @rx_en: 1=enable Rx, 0=disable Rx
1390*4882a593Smuzhiyun * @tx_en: 1=enable Tx, 0=disable Tx
1391*4882a593Smuzhiyun *
1392*4882a593Smuzhiyun * Enables/disables a virtual interface.
1393*4882a593Smuzhiyun */
t4vf_enable_vi(struct adapter * adapter,unsigned int viid,bool rx_en,bool tx_en)1394*4882a593Smuzhiyun int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
1395*4882a593Smuzhiyun bool rx_en, bool tx_en)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun struct fw_vi_enable_cmd cmd;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1400*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
1401*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1402*4882a593Smuzhiyun FW_CMD_EXEC_F |
1403*4882a593Smuzhiyun FW_VI_ENABLE_CMD_VIID_V(viid));
1404*4882a593Smuzhiyun cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
1405*4882a593Smuzhiyun FW_VI_ENABLE_CMD_EEN_V(tx_en) |
1406*4882a593Smuzhiyun FW_LEN16(cmd));
1407*4882a593Smuzhiyun return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun /**
1411*4882a593Smuzhiyun * t4vf_enable_pi - enable/disable a Port's virtual interface
1412*4882a593Smuzhiyun * @adapter: the adapter
1413*4882a593Smuzhiyun * @pi: the Port Information structure
1414*4882a593Smuzhiyun * @rx_en: 1=enable Rx, 0=disable Rx
1415*4882a593Smuzhiyun * @tx_en: 1=enable Tx, 0=disable Tx
1416*4882a593Smuzhiyun *
1417*4882a593Smuzhiyun * Enables/disables a Port's virtual interface. If the Virtual
1418*4882a593Smuzhiyun * Interface enable/disable operation is successful, we notify the
1419*4882a593Smuzhiyun * OS-specific code of a potential Link Status change via the OS Contract
1420*4882a593Smuzhiyun * API t4vf_os_link_changed().
1421*4882a593Smuzhiyun */
t4vf_enable_pi(struct adapter * adapter,struct port_info * pi,bool rx_en,bool tx_en)1422*4882a593Smuzhiyun int t4vf_enable_pi(struct adapter *adapter, struct port_info *pi,
1423*4882a593Smuzhiyun bool rx_en, bool tx_en)
1424*4882a593Smuzhiyun {
1425*4882a593Smuzhiyun int ret = t4vf_enable_vi(adapter, pi->viid, rx_en, tx_en);
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun if (ret)
1428*4882a593Smuzhiyun return ret;
1429*4882a593Smuzhiyun t4vf_os_link_changed(adapter, pi->pidx,
1430*4882a593Smuzhiyun rx_en && tx_en && pi->link_cfg.link_ok);
1431*4882a593Smuzhiyun return 0;
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun /**
1435*4882a593Smuzhiyun * t4vf_identify_port - identify a VI's port by blinking its LED
1436*4882a593Smuzhiyun * @adapter: the adapter
1437*4882a593Smuzhiyun * @viid: the Virtual Interface ID
1438*4882a593Smuzhiyun * @nblinks: how many times to blink LED at 2.5 Hz
1439*4882a593Smuzhiyun *
1440*4882a593Smuzhiyun * Identifies a VI's port by blinking its LED.
1441*4882a593Smuzhiyun */
t4vf_identify_port(struct adapter * adapter,unsigned int viid,unsigned int nblinks)1442*4882a593Smuzhiyun int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
1443*4882a593Smuzhiyun unsigned int nblinks)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun struct fw_vi_enable_cmd cmd;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1448*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
1449*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1450*4882a593Smuzhiyun FW_CMD_EXEC_F |
1451*4882a593Smuzhiyun FW_VI_ENABLE_CMD_VIID_V(viid));
1452*4882a593Smuzhiyun cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F |
1453*4882a593Smuzhiyun FW_LEN16(cmd));
1454*4882a593Smuzhiyun cmd.blinkdur = cpu_to_be16(nblinks);
1455*4882a593Smuzhiyun return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun /**
1459*4882a593Smuzhiyun * t4vf_set_rxmode - set Rx properties of a virtual interface
1460*4882a593Smuzhiyun * @adapter: the adapter
1461*4882a593Smuzhiyun * @viid: the VI id
1462*4882a593Smuzhiyun * @mtu: the new MTU or -1 for no change
1463*4882a593Smuzhiyun * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
1464*4882a593Smuzhiyun * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
1465*4882a593Smuzhiyun * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
1466*4882a593Smuzhiyun * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
1467*4882a593Smuzhiyun * -1 no change
1468*4882a593Smuzhiyun * @sleep_ok: call is allowed to sleep
1469*4882a593Smuzhiyun *
1470*4882a593Smuzhiyun * Sets Rx properties of a virtual interface.
1471*4882a593Smuzhiyun */
t4vf_set_rxmode(struct adapter * adapter,unsigned int viid,int mtu,int promisc,int all_multi,int bcast,int vlanex,bool sleep_ok)1472*4882a593Smuzhiyun int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
1473*4882a593Smuzhiyun int mtu, int promisc, int all_multi, int bcast, int vlanex,
1474*4882a593Smuzhiyun bool sleep_ok)
1475*4882a593Smuzhiyun {
1476*4882a593Smuzhiyun struct fw_vi_rxmode_cmd cmd;
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun /* convert to FW values */
1479*4882a593Smuzhiyun if (mtu < 0)
1480*4882a593Smuzhiyun mtu = FW_VI_RXMODE_CMD_MTU_M;
1481*4882a593Smuzhiyun if (promisc < 0)
1482*4882a593Smuzhiyun promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
1483*4882a593Smuzhiyun if (all_multi < 0)
1484*4882a593Smuzhiyun all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
1485*4882a593Smuzhiyun if (bcast < 0)
1486*4882a593Smuzhiyun bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
1487*4882a593Smuzhiyun if (vlanex < 0)
1488*4882a593Smuzhiyun vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1491*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
1492*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1493*4882a593Smuzhiyun FW_CMD_WRITE_F |
1494*4882a593Smuzhiyun FW_VI_RXMODE_CMD_VIID_V(viid));
1495*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
1496*4882a593Smuzhiyun cmd.mtu_to_vlanexen =
1497*4882a593Smuzhiyun cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
1498*4882a593Smuzhiyun FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
1499*4882a593Smuzhiyun FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
1500*4882a593Smuzhiyun FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
1501*4882a593Smuzhiyun FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
1502*4882a593Smuzhiyun return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun /**
1506*4882a593Smuzhiyun * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
1507*4882a593Smuzhiyun * @adapter: the adapter
1508*4882a593Smuzhiyun * @viid: the Virtual Interface Identifier
1509*4882a593Smuzhiyun * @free: if true any existing filters for this VI id are first removed
1510*4882a593Smuzhiyun * @naddr: the number of MAC addresses to allocate filters for (up to 7)
1511*4882a593Smuzhiyun * @addr: the MAC address(es)
1512*4882a593Smuzhiyun * @idx: where to store the index of each allocated filter
1513*4882a593Smuzhiyun * @hash: pointer to hash address filter bitmap
1514*4882a593Smuzhiyun * @sleep_ok: call is allowed to sleep
1515*4882a593Smuzhiyun *
1516*4882a593Smuzhiyun * Allocates an exact-match filter for each of the supplied addresses and
1517*4882a593Smuzhiyun * sets it to the corresponding address. If @idx is not %NULL it should
1518*4882a593Smuzhiyun * have at least @naddr entries, each of which will be set to the index of
1519*4882a593Smuzhiyun * the filter allocated for the corresponding MAC address. If a filter
1520*4882a593Smuzhiyun * could not be allocated for an address its index is set to 0xffff.
1521*4882a593Smuzhiyun * If @hash is not %NULL addresses that fail to allocate an exact filter
1522*4882a593Smuzhiyun * are hashed and update the hash filter bitmap pointed at by @hash.
1523*4882a593Smuzhiyun *
1524*4882a593Smuzhiyun * Returns a negative error number or the number of filters allocated.
1525*4882a593Smuzhiyun */
t4vf_alloc_mac_filt(struct adapter * adapter,unsigned int viid,bool free,unsigned int naddr,const u8 ** addr,u16 * idx,u64 * hash,bool sleep_ok)1526*4882a593Smuzhiyun int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1527*4882a593Smuzhiyun unsigned int naddr, const u8 **addr, u16 *idx,
1528*4882a593Smuzhiyun u64 *hash, bool sleep_ok)
1529*4882a593Smuzhiyun {
1530*4882a593Smuzhiyun int offset, ret = 0;
1531*4882a593Smuzhiyun unsigned nfilters = 0;
1532*4882a593Smuzhiyun unsigned int rem = naddr;
1533*4882a593Smuzhiyun struct fw_vi_mac_cmd cmd, rpl;
1534*4882a593Smuzhiyun unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun if (naddr > max_naddr)
1537*4882a593Smuzhiyun return -EINVAL;
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun for (offset = 0; offset < naddr; /**/) {
1540*4882a593Smuzhiyun unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
1541*4882a593Smuzhiyun ? rem
1542*4882a593Smuzhiyun : ARRAY_SIZE(cmd.u.exact));
1543*4882a593Smuzhiyun size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1544*4882a593Smuzhiyun u.exact[fw_naddr]), 16);
1545*4882a593Smuzhiyun struct fw_vi_mac_exact *p;
1546*4882a593Smuzhiyun int i;
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1549*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1550*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1551*4882a593Smuzhiyun FW_CMD_WRITE_F |
1552*4882a593Smuzhiyun (free ? FW_CMD_EXEC_F : 0) |
1553*4882a593Smuzhiyun FW_VI_MAC_CMD_VIID_V(viid));
1554*4882a593Smuzhiyun cmd.freemacs_to_len16 =
1555*4882a593Smuzhiyun cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
1556*4882a593Smuzhiyun FW_CMD_LEN16_V(len16));
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1559*4882a593Smuzhiyun p->valid_to_idx = cpu_to_be16(
1560*4882a593Smuzhiyun FW_VI_MAC_CMD_VALID_F |
1561*4882a593Smuzhiyun FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
1562*4882a593Smuzhiyun memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
1567*4882a593Smuzhiyun sleep_ok);
1568*4882a593Smuzhiyun if (ret && ret != -ENOMEM)
1569*4882a593Smuzhiyun break;
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
1572*4882a593Smuzhiyun u16 index = FW_VI_MAC_CMD_IDX_G(
1573*4882a593Smuzhiyun be16_to_cpu(p->valid_to_idx));
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun if (idx)
1576*4882a593Smuzhiyun idx[offset+i] =
1577*4882a593Smuzhiyun (index >= max_naddr
1578*4882a593Smuzhiyun ? 0xffff
1579*4882a593Smuzhiyun : index);
1580*4882a593Smuzhiyun if (index < max_naddr)
1581*4882a593Smuzhiyun nfilters++;
1582*4882a593Smuzhiyun else if (hash)
1583*4882a593Smuzhiyun *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
1584*4882a593Smuzhiyun }
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun free = false;
1587*4882a593Smuzhiyun offset += fw_naddr;
1588*4882a593Smuzhiyun rem -= fw_naddr;
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun /*
1592*4882a593Smuzhiyun * If there were no errors or we merely ran out of room in our MAC
1593*4882a593Smuzhiyun * address arena, return the number of filters actually written.
1594*4882a593Smuzhiyun */
1595*4882a593Smuzhiyun if (ret == 0 || ret == -ENOMEM)
1596*4882a593Smuzhiyun ret = nfilters;
1597*4882a593Smuzhiyun return ret;
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun /**
1601*4882a593Smuzhiyun * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses
1602*4882a593Smuzhiyun * @adapter: the adapter
1603*4882a593Smuzhiyun * @viid: the VI id
1604*4882a593Smuzhiyun * @naddr: the number of MAC addresses to allocate filters for (up to 7)
1605*4882a593Smuzhiyun * @addr: the MAC address(es)
1606*4882a593Smuzhiyun * @sleep_ok: call is allowed to sleep
1607*4882a593Smuzhiyun *
1608*4882a593Smuzhiyun * Frees the exact-match filter for each of the supplied addresses
1609*4882a593Smuzhiyun *
1610*4882a593Smuzhiyun * Returns a negative error number or the number of filters freed.
1611*4882a593Smuzhiyun */
t4vf_free_mac_filt(struct adapter * adapter,unsigned int viid,unsigned int naddr,const u8 ** addr,bool sleep_ok)1612*4882a593Smuzhiyun int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid,
1613*4882a593Smuzhiyun unsigned int naddr, const u8 **addr, bool sleep_ok)
1614*4882a593Smuzhiyun {
1615*4882a593Smuzhiyun int offset, ret = 0;
1616*4882a593Smuzhiyun struct fw_vi_mac_cmd cmd;
1617*4882a593Smuzhiyun unsigned int nfilters = 0;
1618*4882a593Smuzhiyun unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
1619*4882a593Smuzhiyun unsigned int rem = naddr;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun if (naddr > max_naddr)
1622*4882a593Smuzhiyun return -EINVAL;
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun for (offset = 0; offset < (int)naddr ; /**/) {
1625*4882a593Smuzhiyun unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ?
1626*4882a593Smuzhiyun rem : ARRAY_SIZE(cmd.u.exact));
1627*4882a593Smuzhiyun size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1628*4882a593Smuzhiyun u.exact[fw_naddr]), 16);
1629*4882a593Smuzhiyun struct fw_vi_mac_exact *p;
1630*4882a593Smuzhiyun int i;
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1633*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1634*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1635*4882a593Smuzhiyun FW_CMD_WRITE_F |
1636*4882a593Smuzhiyun FW_CMD_EXEC_V(0) |
1637*4882a593Smuzhiyun FW_VI_MAC_CMD_VIID_V(viid));
1638*4882a593Smuzhiyun cmd.freemacs_to_len16 =
1639*4882a593Smuzhiyun cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
1640*4882a593Smuzhiyun FW_CMD_LEN16_V(len16));
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) {
1643*4882a593Smuzhiyun p->valid_to_idx = cpu_to_be16(
1644*4882a593Smuzhiyun FW_VI_MAC_CMD_VALID_F |
1645*4882a593Smuzhiyun FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
1646*4882a593Smuzhiyun memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd,
1650*4882a593Smuzhiyun sleep_ok);
1651*4882a593Smuzhiyun if (ret)
1652*4882a593Smuzhiyun break;
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1655*4882a593Smuzhiyun u16 index = FW_VI_MAC_CMD_IDX_G(
1656*4882a593Smuzhiyun be16_to_cpu(p->valid_to_idx));
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun if (index < max_naddr)
1659*4882a593Smuzhiyun nfilters++;
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun offset += fw_naddr;
1663*4882a593Smuzhiyun rem -= fw_naddr;
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun if (ret == 0)
1667*4882a593Smuzhiyun ret = nfilters;
1668*4882a593Smuzhiyun return ret;
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun /**
1672*4882a593Smuzhiyun * t4vf_change_mac - modifies the exact-match filter for a MAC address
1673*4882a593Smuzhiyun * @adapter: the adapter
1674*4882a593Smuzhiyun * @viid: the Virtual Interface ID
1675*4882a593Smuzhiyun * @idx: index of existing filter for old value of MAC address, or -1
1676*4882a593Smuzhiyun * @addr: the new MAC address value
1677*4882a593Smuzhiyun * @persist: if idx < 0, the new MAC allocation should be persistent
1678*4882a593Smuzhiyun *
1679*4882a593Smuzhiyun * Modifies an exact-match filter and sets it to the new MAC address.
1680*4882a593Smuzhiyun * Note that in general it is not possible to modify the value of a given
1681*4882a593Smuzhiyun * filter so the generic way to modify an address filter is to free the
1682*4882a593Smuzhiyun * one being used by the old address value and allocate a new filter for
1683*4882a593Smuzhiyun * the new address value. @idx can be -1 if the address is a new
1684*4882a593Smuzhiyun * addition.
1685*4882a593Smuzhiyun *
1686*4882a593Smuzhiyun * Returns a negative error number or the index of the filter with the new
1687*4882a593Smuzhiyun * MAC value.
1688*4882a593Smuzhiyun */
t4vf_change_mac(struct adapter * adapter,unsigned int viid,int idx,const u8 * addr,bool persist)1689*4882a593Smuzhiyun int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
1690*4882a593Smuzhiyun int idx, const u8 *addr, bool persist)
1691*4882a593Smuzhiyun {
1692*4882a593Smuzhiyun int ret;
1693*4882a593Smuzhiyun struct fw_vi_mac_cmd cmd, rpl;
1694*4882a593Smuzhiyun struct fw_vi_mac_exact *p = &cmd.u.exact[0];
1695*4882a593Smuzhiyun size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1696*4882a593Smuzhiyun u.exact[1]), 16);
1697*4882a593Smuzhiyun unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun /*
1700*4882a593Smuzhiyun * If this is a new allocation, determine whether it should be
1701*4882a593Smuzhiyun * persistent (across a "freemacs" operation) or not.
1702*4882a593Smuzhiyun */
1703*4882a593Smuzhiyun if (idx < 0)
1704*4882a593Smuzhiyun idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1707*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1708*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1709*4882a593Smuzhiyun FW_CMD_WRITE_F |
1710*4882a593Smuzhiyun FW_VI_MAC_CMD_VIID_V(viid));
1711*4882a593Smuzhiyun cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
1712*4882a593Smuzhiyun p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
1713*4882a593Smuzhiyun FW_VI_MAC_CMD_IDX_V(idx));
1714*4882a593Smuzhiyun memcpy(p->macaddr, addr, sizeof(p->macaddr));
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
1717*4882a593Smuzhiyun if (ret == 0) {
1718*4882a593Smuzhiyun p = &rpl.u.exact[0];
1719*4882a593Smuzhiyun ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
1720*4882a593Smuzhiyun if (ret >= max_mac_addr)
1721*4882a593Smuzhiyun ret = -ENOMEM;
1722*4882a593Smuzhiyun }
1723*4882a593Smuzhiyun return ret;
1724*4882a593Smuzhiyun }
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun /**
1727*4882a593Smuzhiyun * t4vf_set_addr_hash - program the MAC inexact-match hash filter
1728*4882a593Smuzhiyun * @adapter: the adapter
1729*4882a593Smuzhiyun * @viid: the Virtual Interface Identifier
1730*4882a593Smuzhiyun * @ucast: whether the hash filter should also match unicast addresses
1731*4882a593Smuzhiyun * @vec: the value to be written to the hash filter
1732*4882a593Smuzhiyun * @sleep_ok: call is allowed to sleep
1733*4882a593Smuzhiyun *
1734*4882a593Smuzhiyun * Sets the 64-bit inexact-match hash filter for a virtual interface.
1735*4882a593Smuzhiyun */
t4vf_set_addr_hash(struct adapter * adapter,unsigned int viid,bool ucast,u64 vec,bool sleep_ok)1736*4882a593Smuzhiyun int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
1737*4882a593Smuzhiyun bool ucast, u64 vec, bool sleep_ok)
1738*4882a593Smuzhiyun {
1739*4882a593Smuzhiyun struct fw_vi_mac_cmd cmd;
1740*4882a593Smuzhiyun size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1741*4882a593Smuzhiyun u.exact[0]), 16);
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1744*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
1745*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1746*4882a593Smuzhiyun FW_CMD_WRITE_F |
1747*4882a593Smuzhiyun FW_VI_ENABLE_CMD_VIID_V(viid));
1748*4882a593Smuzhiyun cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
1749*4882a593Smuzhiyun FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
1750*4882a593Smuzhiyun FW_CMD_LEN16_V(len16));
1751*4882a593Smuzhiyun cmd.u.hash.hashvec = cpu_to_be64(vec);
1752*4882a593Smuzhiyun return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun /**
1756*4882a593Smuzhiyun * t4vf_get_port_stats - collect "port" statistics
1757*4882a593Smuzhiyun * @adapter: the adapter
1758*4882a593Smuzhiyun * @pidx: the port index
1759*4882a593Smuzhiyun * @s: the stats structure to fill
1760*4882a593Smuzhiyun *
1761*4882a593Smuzhiyun * Collect statistics for the "port"'s Virtual Interface.
1762*4882a593Smuzhiyun */
t4vf_get_port_stats(struct adapter * adapter,int pidx,struct t4vf_port_stats * s)1763*4882a593Smuzhiyun int t4vf_get_port_stats(struct adapter *adapter, int pidx,
1764*4882a593Smuzhiyun struct t4vf_port_stats *s)
1765*4882a593Smuzhiyun {
1766*4882a593Smuzhiyun struct port_info *pi = adap2pinfo(adapter, pidx);
1767*4882a593Smuzhiyun struct fw_vi_stats_vf fwstats;
1768*4882a593Smuzhiyun unsigned int rem = VI_VF_NUM_STATS;
1769*4882a593Smuzhiyun __be64 *fwsp = (__be64 *)&fwstats;
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun /*
1772*4882a593Smuzhiyun * Grab the Virtual Interface statistics a chunk at a time via mailbox
1773*4882a593Smuzhiyun * commands. We could use a Work Request and get all of them at once
1774*4882a593Smuzhiyun * but that's an asynchronous interface which is awkward to use.
1775*4882a593Smuzhiyun */
1776*4882a593Smuzhiyun while (rem) {
1777*4882a593Smuzhiyun unsigned int ix = VI_VF_NUM_STATS - rem;
1778*4882a593Smuzhiyun unsigned int nstats = min(6U, rem);
1779*4882a593Smuzhiyun struct fw_vi_stats_cmd cmd, rpl;
1780*4882a593Smuzhiyun size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
1781*4882a593Smuzhiyun sizeof(struct fw_vi_stats_ctl));
1782*4882a593Smuzhiyun size_t len16 = DIV_ROUND_UP(len, 16);
1783*4882a593Smuzhiyun int ret;
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1786*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) |
1787*4882a593Smuzhiyun FW_VI_STATS_CMD_VIID_V(pi->viid) |
1788*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1789*4882a593Smuzhiyun FW_CMD_READ_F);
1790*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
1791*4882a593Smuzhiyun cmd.u.ctl.nstats_ix =
1792*4882a593Smuzhiyun cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) |
1793*4882a593Smuzhiyun FW_VI_STATS_CMD_NSTATS_V(nstats));
1794*4882a593Smuzhiyun ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
1795*4882a593Smuzhiyun if (ret)
1796*4882a593Smuzhiyun return ret;
1797*4882a593Smuzhiyun
1798*4882a593Smuzhiyun memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun rem -= nstats;
1801*4882a593Smuzhiyun fwsp += nstats;
1802*4882a593Smuzhiyun }
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun /*
1805*4882a593Smuzhiyun * Translate firmware statistics into host native statistics.
1806*4882a593Smuzhiyun */
1807*4882a593Smuzhiyun s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
1808*4882a593Smuzhiyun s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
1809*4882a593Smuzhiyun s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
1810*4882a593Smuzhiyun s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
1811*4882a593Smuzhiyun s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
1812*4882a593Smuzhiyun s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
1813*4882a593Smuzhiyun s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
1814*4882a593Smuzhiyun s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
1815*4882a593Smuzhiyun s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
1818*4882a593Smuzhiyun s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
1819*4882a593Smuzhiyun s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
1820*4882a593Smuzhiyun s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
1821*4882a593Smuzhiyun s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
1822*4882a593Smuzhiyun s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun return 0;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun /**
1830*4882a593Smuzhiyun * t4vf_iq_free - free an ingress queue and its free lists
1831*4882a593Smuzhiyun * @adapter: the adapter
1832*4882a593Smuzhiyun * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
1833*4882a593Smuzhiyun * @iqid: ingress queue ID
1834*4882a593Smuzhiyun * @fl0id: FL0 queue ID or 0xffff if no attached FL0
1835*4882a593Smuzhiyun * @fl1id: FL1 queue ID or 0xffff if no attached FL1
1836*4882a593Smuzhiyun *
1837*4882a593Smuzhiyun * Frees an ingress queue and its associated free lists, if any.
1838*4882a593Smuzhiyun */
t4vf_iq_free(struct adapter * adapter,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)1839*4882a593Smuzhiyun int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
1840*4882a593Smuzhiyun unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
1841*4882a593Smuzhiyun {
1842*4882a593Smuzhiyun struct fw_iq_cmd cmd;
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1845*4882a593Smuzhiyun cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
1846*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1847*4882a593Smuzhiyun FW_CMD_EXEC_F);
1848*4882a593Smuzhiyun cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F |
1849*4882a593Smuzhiyun FW_LEN16(cmd));
1850*4882a593Smuzhiyun cmd.type_to_iqandstindex =
1851*4882a593Smuzhiyun cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun cmd.iqid = cpu_to_be16(iqid);
1854*4882a593Smuzhiyun cmd.fl0id = cpu_to_be16(fl0id);
1855*4882a593Smuzhiyun cmd.fl1id = cpu_to_be16(fl1id);
1856*4882a593Smuzhiyun return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun
1859*4882a593Smuzhiyun /**
1860*4882a593Smuzhiyun * t4vf_eth_eq_free - free an Ethernet egress queue
1861*4882a593Smuzhiyun * @adapter: the adapter
1862*4882a593Smuzhiyun * @eqid: egress queue ID
1863*4882a593Smuzhiyun *
1864*4882a593Smuzhiyun * Frees an Ethernet egress queue.
1865*4882a593Smuzhiyun */
t4vf_eth_eq_free(struct adapter * adapter,unsigned int eqid)1866*4882a593Smuzhiyun int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
1867*4882a593Smuzhiyun {
1868*4882a593Smuzhiyun struct fw_eq_eth_cmd cmd;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
1871*4882a593Smuzhiyun cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
1872*4882a593Smuzhiyun FW_CMD_REQUEST_F |
1873*4882a593Smuzhiyun FW_CMD_EXEC_F);
1874*4882a593Smuzhiyun cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F |
1875*4882a593Smuzhiyun FW_LEN16(cmd));
1876*4882a593Smuzhiyun cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
1877*4882a593Smuzhiyun return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun /**
1881*4882a593Smuzhiyun * t4vf_link_down_rc_str - return a string for a Link Down Reason Code
1882*4882a593Smuzhiyun * @link_down_rc: Link Down Reason Code
1883*4882a593Smuzhiyun *
1884*4882a593Smuzhiyun * Returns a string representation of the Link Down Reason Code.
1885*4882a593Smuzhiyun */
t4vf_link_down_rc_str(unsigned char link_down_rc)1886*4882a593Smuzhiyun static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
1887*4882a593Smuzhiyun {
1888*4882a593Smuzhiyun static const char * const reason[] = {
1889*4882a593Smuzhiyun "Link Down",
1890*4882a593Smuzhiyun "Remote Fault",
1891*4882a593Smuzhiyun "Auto-negotiation Failure",
1892*4882a593Smuzhiyun "Reserved",
1893*4882a593Smuzhiyun "Insufficient Airflow",
1894*4882a593Smuzhiyun "Unable To Determine Reason",
1895*4882a593Smuzhiyun "No RX Signal Detected",
1896*4882a593Smuzhiyun "Reserved",
1897*4882a593Smuzhiyun };
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun if (link_down_rc >= ARRAY_SIZE(reason))
1900*4882a593Smuzhiyun return "Bad Reason Code";
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun return reason[link_down_rc];
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun /**
1906*4882a593Smuzhiyun * t4vf_handle_get_port_info - process a FW reply message
1907*4882a593Smuzhiyun * @pi: the port info
1908*4882a593Smuzhiyun * @cmd: start of the FW message
1909*4882a593Smuzhiyun *
1910*4882a593Smuzhiyun * Processes a GET_PORT_INFO FW reply message.
1911*4882a593Smuzhiyun */
t4vf_handle_get_port_info(struct port_info * pi,const struct fw_port_cmd * cmd)1912*4882a593Smuzhiyun static void t4vf_handle_get_port_info(struct port_info *pi,
1913*4882a593Smuzhiyun const struct fw_port_cmd *cmd)
1914*4882a593Smuzhiyun {
1915*4882a593Smuzhiyun fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
1916*4882a593Smuzhiyun struct link_config *lc = &pi->link_cfg;
1917*4882a593Smuzhiyun struct adapter *adapter = pi->adapter;
1918*4882a593Smuzhiyun unsigned int speed, fc, fec, adv_fc;
1919*4882a593Smuzhiyun enum fw_port_module_type mod_type;
1920*4882a593Smuzhiyun int action, link_ok, linkdnrc;
1921*4882a593Smuzhiyun enum fw_port_type port_type;
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun /* Extract the various fields from the Port Information message. */
1924*4882a593Smuzhiyun action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
1925*4882a593Smuzhiyun switch (action) {
1926*4882a593Smuzhiyun case FW_PORT_ACTION_GET_PORT_INFO: {
1927*4882a593Smuzhiyun u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
1928*4882a593Smuzhiyun
1929*4882a593Smuzhiyun link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
1930*4882a593Smuzhiyun linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
1931*4882a593Smuzhiyun port_type = FW_PORT_CMD_PTYPE_G(lstatus);
1932*4882a593Smuzhiyun mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
1933*4882a593Smuzhiyun pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
1934*4882a593Smuzhiyun acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
1935*4882a593Smuzhiyun lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
1936*4882a593Smuzhiyun
1937*4882a593Smuzhiyun /* Unfortunately the format of the Link Status in the old
1938*4882a593Smuzhiyun * 16-bit Port Information message isn't the same as the
1939*4882a593Smuzhiyun * 16-bit Port Capabilities bitfield used everywhere else ...
1940*4882a593Smuzhiyun */
1941*4882a593Smuzhiyun linkattr = 0;
1942*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_RXPAUSE_F)
1943*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_FC_RX;
1944*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_TXPAUSE_F)
1945*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_FC_TX;
1946*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
1947*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_100M;
1948*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
1949*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_1G;
1950*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
1951*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_10G;
1952*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
1953*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_25G;
1954*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
1955*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_40G;
1956*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
1957*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_100G;
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun break;
1960*4882a593Smuzhiyun }
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun case FW_PORT_ACTION_GET_PORT_INFO32: {
1963*4882a593Smuzhiyun u32 lstatus32;
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
1966*4882a593Smuzhiyun link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
1967*4882a593Smuzhiyun linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
1968*4882a593Smuzhiyun port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
1969*4882a593Smuzhiyun mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
1970*4882a593Smuzhiyun pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
1971*4882a593Smuzhiyun acaps = be32_to_cpu(cmd->u.info32.acaps32);
1972*4882a593Smuzhiyun lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
1973*4882a593Smuzhiyun linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
1974*4882a593Smuzhiyun break;
1975*4882a593Smuzhiyun }
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun default:
1978*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
1979*4882a593Smuzhiyun be32_to_cpu(cmd->action_to_len16));
1980*4882a593Smuzhiyun return;
1981*4882a593Smuzhiyun }
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun fec = fwcap_to_cc_fec(acaps);
1984*4882a593Smuzhiyun adv_fc = fwcap_to_cc_pause(acaps);
1985*4882a593Smuzhiyun fc = fwcap_to_cc_pause(linkattr);
1986*4882a593Smuzhiyun speed = fwcap_to_speed(linkattr);
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun if (mod_type != pi->mod_type) {
1989*4882a593Smuzhiyun /* When a new Transceiver Module is inserted, the Firmware
1990*4882a593Smuzhiyun * will examine any Forward Error Correction parameters
1991*4882a593Smuzhiyun * present in the Transceiver Module i2c EPROM and determine
1992*4882a593Smuzhiyun * the supported and recommended FEC settings from those
1993*4882a593Smuzhiyun * based on IEEE 802.3 standards. We always record the
1994*4882a593Smuzhiyun * IEEE 802.3 recommended "automatic" settings.
1995*4882a593Smuzhiyun */
1996*4882a593Smuzhiyun lc->auto_fec = fec;
1997*4882a593Smuzhiyun
1998*4882a593Smuzhiyun /* Some versions of the early T6 Firmware "cheated" when
1999*4882a593Smuzhiyun * handling different Transceiver Modules by changing the
2000*4882a593Smuzhiyun * underlaying Port Type reported to the Host Drivers. As
2001*4882a593Smuzhiyun * such we need to capture whatever Port Type the Firmware
2002*4882a593Smuzhiyun * sends us and record it in case it's different from what we
2003*4882a593Smuzhiyun * were told earlier. Unfortunately, since Firmware is
2004*4882a593Smuzhiyun * forever, we'll need to keep this code here forever, but in
2005*4882a593Smuzhiyun * later T6 Firmware it should just be an assignment of the
2006*4882a593Smuzhiyun * same value already recorded.
2007*4882a593Smuzhiyun */
2008*4882a593Smuzhiyun pi->port_type = port_type;
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun pi->mod_type = mod_type;
2011*4882a593Smuzhiyun t4vf_os_portmod_changed(adapter, pi->pidx);
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun if (link_ok != lc->link_ok || speed != lc->speed ||
2015*4882a593Smuzhiyun fc != lc->fc || adv_fc != lc->advertised_fc ||
2016*4882a593Smuzhiyun fec != lc->fec) {
2017*4882a593Smuzhiyun /* something changed */
2018*4882a593Smuzhiyun if (!link_ok && lc->link_ok) {
2019*4882a593Smuzhiyun lc->link_down_rc = linkdnrc;
2020*4882a593Smuzhiyun dev_warn_ratelimited(adapter->pdev_dev,
2021*4882a593Smuzhiyun "Port %d link down, reason: %s\n",
2022*4882a593Smuzhiyun pi->port_id,
2023*4882a593Smuzhiyun t4vf_link_down_rc_str(linkdnrc));
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun lc->link_ok = link_ok;
2026*4882a593Smuzhiyun lc->speed = speed;
2027*4882a593Smuzhiyun lc->advertised_fc = adv_fc;
2028*4882a593Smuzhiyun lc->fc = fc;
2029*4882a593Smuzhiyun lc->fec = fec;
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun lc->pcaps = pcaps;
2032*4882a593Smuzhiyun lc->lpacaps = lpacaps;
2033*4882a593Smuzhiyun lc->acaps = acaps & ADVERT_MASK;
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun /* If we're not physically capable of Auto-Negotiation, note
2036*4882a593Smuzhiyun * this as Auto-Negotiation disabled. Otherwise, we track
2037*4882a593Smuzhiyun * what Auto-Negotiation settings we have. Note parallel
2038*4882a593Smuzhiyun * structure in init_link_config().
2039*4882a593Smuzhiyun */
2040*4882a593Smuzhiyun if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
2041*4882a593Smuzhiyun lc->autoneg = AUTONEG_DISABLE;
2042*4882a593Smuzhiyun } else if (lc->acaps & FW_PORT_CAP32_ANEG) {
2043*4882a593Smuzhiyun lc->autoneg = AUTONEG_ENABLE;
2044*4882a593Smuzhiyun } else {
2045*4882a593Smuzhiyun /* When Autoneg is disabled, user needs to set
2046*4882a593Smuzhiyun * single speed.
2047*4882a593Smuzhiyun * Similar to cxgb4_ethtool.c: set_link_ksettings
2048*4882a593Smuzhiyun */
2049*4882a593Smuzhiyun lc->acaps = 0;
2050*4882a593Smuzhiyun lc->speed_caps = fwcap_to_speed(acaps);
2051*4882a593Smuzhiyun lc->autoneg = AUTONEG_DISABLE;
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun t4vf_os_link_changed(adapter, pi->pidx, link_ok);
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun
2058*4882a593Smuzhiyun /**
2059*4882a593Smuzhiyun * t4vf_update_port_info - retrieve and update port information if changed
2060*4882a593Smuzhiyun * @pi: the port_info
2061*4882a593Smuzhiyun *
2062*4882a593Smuzhiyun * We issue a Get Port Information Command to the Firmware and, if
2063*4882a593Smuzhiyun * successful, we check to see if anything is different from what we
2064*4882a593Smuzhiyun * last recorded and update things accordingly.
2065*4882a593Smuzhiyun */
t4vf_update_port_info(struct port_info * pi)2066*4882a593Smuzhiyun int t4vf_update_port_info(struct port_info *pi)
2067*4882a593Smuzhiyun {
2068*4882a593Smuzhiyun unsigned int fw_caps = pi->adapter->params.fw_caps_support;
2069*4882a593Smuzhiyun struct fw_port_cmd port_cmd;
2070*4882a593Smuzhiyun int ret;
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun memset(&port_cmd, 0, sizeof(port_cmd));
2073*4882a593Smuzhiyun port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
2074*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_READ_F |
2075*4882a593Smuzhiyun FW_PORT_CMD_PORTID_V(pi->port_id));
2076*4882a593Smuzhiyun port_cmd.action_to_len16 = cpu_to_be32(
2077*4882a593Smuzhiyun FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
2078*4882a593Smuzhiyun ? FW_PORT_ACTION_GET_PORT_INFO
2079*4882a593Smuzhiyun : FW_PORT_ACTION_GET_PORT_INFO32) |
2080*4882a593Smuzhiyun FW_LEN16(port_cmd));
2081*4882a593Smuzhiyun ret = t4vf_wr_mbox(pi->adapter, &port_cmd, sizeof(port_cmd),
2082*4882a593Smuzhiyun &port_cmd);
2083*4882a593Smuzhiyun if (ret)
2084*4882a593Smuzhiyun return ret;
2085*4882a593Smuzhiyun t4vf_handle_get_port_info(pi, &port_cmd);
2086*4882a593Smuzhiyun return 0;
2087*4882a593Smuzhiyun }
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun /**
2090*4882a593Smuzhiyun * t4vf_handle_fw_rpl - process a firmware reply message
2091*4882a593Smuzhiyun * @adapter: the adapter
2092*4882a593Smuzhiyun * @rpl: start of the firmware message
2093*4882a593Smuzhiyun *
2094*4882a593Smuzhiyun * Processes a firmware message, such as link state change messages.
2095*4882a593Smuzhiyun */
t4vf_handle_fw_rpl(struct adapter * adapter,const __be64 * rpl)2096*4882a593Smuzhiyun int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
2097*4882a593Smuzhiyun {
2098*4882a593Smuzhiyun const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
2099*4882a593Smuzhiyun u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi));
2100*4882a593Smuzhiyun
2101*4882a593Smuzhiyun switch (opcode) {
2102*4882a593Smuzhiyun case FW_PORT_CMD: {
2103*4882a593Smuzhiyun /*
2104*4882a593Smuzhiyun * Link/module state change message.
2105*4882a593Smuzhiyun */
2106*4882a593Smuzhiyun const struct fw_port_cmd *port_cmd =
2107*4882a593Smuzhiyun (const struct fw_port_cmd *)rpl;
2108*4882a593Smuzhiyun int action = FW_PORT_CMD_ACTION_G(
2109*4882a593Smuzhiyun be32_to_cpu(port_cmd->action_to_len16));
2110*4882a593Smuzhiyun int port_id, pidx;
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun if (action != FW_PORT_ACTION_GET_PORT_INFO &&
2113*4882a593Smuzhiyun action != FW_PORT_ACTION_GET_PORT_INFO32) {
2114*4882a593Smuzhiyun dev_err(adapter->pdev_dev,
2115*4882a593Smuzhiyun "Unknown firmware PORT reply action %x\n",
2116*4882a593Smuzhiyun action);
2117*4882a593Smuzhiyun break;
2118*4882a593Smuzhiyun }
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun port_id = FW_PORT_CMD_PORTID_G(
2121*4882a593Smuzhiyun be32_to_cpu(port_cmd->op_to_portid));
2122*4882a593Smuzhiyun for_each_port(adapter, pidx) {
2123*4882a593Smuzhiyun struct port_info *pi = adap2pinfo(adapter, pidx);
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun if (pi->port_id != port_id)
2126*4882a593Smuzhiyun continue;
2127*4882a593Smuzhiyun t4vf_handle_get_port_info(pi, port_cmd);
2128*4882a593Smuzhiyun }
2129*4882a593Smuzhiyun break;
2130*4882a593Smuzhiyun }
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun default:
2133*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
2134*4882a593Smuzhiyun opcode);
2135*4882a593Smuzhiyun }
2136*4882a593Smuzhiyun return 0;
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun
t4vf_prep_adapter(struct adapter * adapter)2139*4882a593Smuzhiyun int t4vf_prep_adapter(struct adapter *adapter)
2140*4882a593Smuzhiyun {
2141*4882a593Smuzhiyun int err;
2142*4882a593Smuzhiyun unsigned int chipid;
2143*4882a593Smuzhiyun
2144*4882a593Smuzhiyun /* Wait for the device to become ready before proceeding ...
2145*4882a593Smuzhiyun */
2146*4882a593Smuzhiyun err = t4vf_wait_dev_ready(adapter);
2147*4882a593Smuzhiyun if (err)
2148*4882a593Smuzhiyun return err;
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun /* Default port and clock for debugging in case we can't reach
2151*4882a593Smuzhiyun * firmware.
2152*4882a593Smuzhiyun */
2153*4882a593Smuzhiyun adapter->params.nports = 1;
2154*4882a593Smuzhiyun adapter->params.vfres.pmask = 1;
2155*4882a593Smuzhiyun adapter->params.vpd.cclk = 50000;
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun adapter->params.chip = 0;
2158*4882a593Smuzhiyun switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
2159*4882a593Smuzhiyun case CHELSIO_T4:
2160*4882a593Smuzhiyun adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
2161*4882a593Smuzhiyun adapter->params.arch.sge_fl_db = DBPRIO_F;
2162*4882a593Smuzhiyun adapter->params.arch.mps_tcam_size =
2163*4882a593Smuzhiyun NUM_MPS_CLS_SRAM_L_INSTANCES;
2164*4882a593Smuzhiyun break;
2165*4882a593Smuzhiyun
2166*4882a593Smuzhiyun case CHELSIO_T5:
2167*4882a593Smuzhiyun chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
2168*4882a593Smuzhiyun adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
2169*4882a593Smuzhiyun adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
2170*4882a593Smuzhiyun adapter->params.arch.mps_tcam_size =
2171*4882a593Smuzhiyun NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
2172*4882a593Smuzhiyun break;
2173*4882a593Smuzhiyun
2174*4882a593Smuzhiyun case CHELSIO_T6:
2175*4882a593Smuzhiyun chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
2176*4882a593Smuzhiyun adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
2177*4882a593Smuzhiyun adapter->params.arch.sge_fl_db = 0;
2178*4882a593Smuzhiyun adapter->params.arch.mps_tcam_size =
2179*4882a593Smuzhiyun NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
2180*4882a593Smuzhiyun break;
2181*4882a593Smuzhiyun }
2182*4882a593Smuzhiyun
2183*4882a593Smuzhiyun return 0;
2184*4882a593Smuzhiyun }
2185*4882a593Smuzhiyun
2186*4882a593Smuzhiyun /**
2187*4882a593Smuzhiyun * t4vf_get_vf_mac_acl - Get the MAC address to be set to
2188*4882a593Smuzhiyun * the VI of this VF.
2189*4882a593Smuzhiyun * @adapter: The adapter
2190*4882a593Smuzhiyun * @port: The port associated with vf
2191*4882a593Smuzhiyun * @naddr: the number of ACL MAC addresses returned in addr
2192*4882a593Smuzhiyun * @addr: Placeholder for MAC addresses
2193*4882a593Smuzhiyun *
2194*4882a593Smuzhiyun * Find the MAC address to be set to the VF's VI. The requested MAC address
2195*4882a593Smuzhiyun * is from the host OS via callback in the PF driver.
2196*4882a593Smuzhiyun */
t4vf_get_vf_mac_acl(struct adapter * adapter,unsigned int port,unsigned int * naddr,u8 * addr)2197*4882a593Smuzhiyun int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int port,
2198*4882a593Smuzhiyun unsigned int *naddr, u8 *addr)
2199*4882a593Smuzhiyun {
2200*4882a593Smuzhiyun struct fw_acl_mac_cmd cmd;
2201*4882a593Smuzhiyun int ret;
2202*4882a593Smuzhiyun
2203*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
2204*4882a593Smuzhiyun cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
2205*4882a593Smuzhiyun FW_CMD_REQUEST_F |
2206*4882a593Smuzhiyun FW_CMD_READ_F);
2207*4882a593Smuzhiyun cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
2208*4882a593Smuzhiyun ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
2209*4882a593Smuzhiyun if (ret)
2210*4882a593Smuzhiyun return ret;
2211*4882a593Smuzhiyun
2212*4882a593Smuzhiyun if (cmd.nmac < *naddr)
2213*4882a593Smuzhiyun *naddr = cmd.nmac;
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun switch (port) {
2216*4882a593Smuzhiyun case 3:
2217*4882a593Smuzhiyun memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3));
2218*4882a593Smuzhiyun break;
2219*4882a593Smuzhiyun case 2:
2220*4882a593Smuzhiyun memcpy(addr, cmd.macaddr2, sizeof(cmd.macaddr2));
2221*4882a593Smuzhiyun break;
2222*4882a593Smuzhiyun case 1:
2223*4882a593Smuzhiyun memcpy(addr, cmd.macaddr1, sizeof(cmd.macaddr1));
2224*4882a593Smuzhiyun break;
2225*4882a593Smuzhiyun case 0:
2226*4882a593Smuzhiyun memcpy(addr, cmd.macaddr0, sizeof(cmd.macaddr0));
2227*4882a593Smuzhiyun break;
2228*4882a593Smuzhiyun }
2229*4882a593Smuzhiyun
2230*4882a593Smuzhiyun return ret;
2231*4882a593Smuzhiyun }
2232*4882a593Smuzhiyun
2233*4882a593Smuzhiyun /**
2234*4882a593Smuzhiyun * t4vf_get_vf_vlan_acl - Get the VLAN ID to be set to
2235*4882a593Smuzhiyun * the VI of this VF.
2236*4882a593Smuzhiyun * @adapter: The adapter
2237*4882a593Smuzhiyun *
2238*4882a593Smuzhiyun * Find the VLAN ID to be set to the VF's VI. The requested VLAN ID
2239*4882a593Smuzhiyun * is from the host OS via callback in the PF driver.
2240*4882a593Smuzhiyun */
t4vf_get_vf_vlan_acl(struct adapter * adapter)2241*4882a593Smuzhiyun int t4vf_get_vf_vlan_acl(struct adapter *adapter)
2242*4882a593Smuzhiyun {
2243*4882a593Smuzhiyun struct fw_acl_vlan_cmd cmd;
2244*4882a593Smuzhiyun int vlan = 0;
2245*4882a593Smuzhiyun int ret = 0;
2246*4882a593Smuzhiyun
2247*4882a593Smuzhiyun cmd.op_to_vfn = htonl(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
2248*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_READ_F);
2249*4882a593Smuzhiyun
2250*4882a593Smuzhiyun /* Note: Do not enable the ACL */
2251*4882a593Smuzhiyun cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
2252*4882a593Smuzhiyun
2253*4882a593Smuzhiyun ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun if (!ret)
2256*4882a593Smuzhiyun vlan = be16_to_cpu(cmd.vlanid[0]);
2257*4882a593Smuzhiyun
2258*4882a593Smuzhiyun return vlan;
2259*4882a593Smuzhiyun }
2260