1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3*4882a593Smuzhiyun * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4*4882a593Smuzhiyun * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun * OpenIB.org BSD license below:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun * without modification, are permitted provided that the following
14*4882a593Smuzhiyun * conditions are met:
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * - Redistributions of source code must retain the above
17*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun * disclaimer.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun * provided with the distribution.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun * SOFTWARE.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/sched.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun #include <linux/export.h>
38*4882a593Smuzhiyun #include <linux/pci.h>
39*4882a593Smuzhiyun #include <linux/errno.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #include <linux/mlx4/cmd.h>
42*4882a593Smuzhiyun #include <linux/mlx4/device.h>
43*4882a593Smuzhiyun #include <linux/semaphore.h>
44*4882a593Smuzhiyun #include <rdma/ib_smi.h>
45*4882a593Smuzhiyun #include <linux/delay.h>
46*4882a593Smuzhiyun #include <linux/etherdevice.h>
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #include <asm/io.h>
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #include "mlx4.h"
51*4882a593Smuzhiyun #include "fw.h"
52*4882a593Smuzhiyun #include "fw_qos.h"
53*4882a593Smuzhiyun #include "mlx4_stats.h"
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define CMD_POLL_TOKEN 0xffff
56*4882a593Smuzhiyun #define INBOX_MASK 0xffffffffffffff00ULL
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #define CMD_CHAN_VER 1
59*4882a593Smuzhiyun #define CMD_CHAN_IF_REV 1
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun enum {
62*4882a593Smuzhiyun /* command completed successfully: */
63*4882a593Smuzhiyun CMD_STAT_OK = 0x00,
64*4882a593Smuzhiyun /* Internal error (such as a bus error) occurred while processing command: */
65*4882a593Smuzhiyun CMD_STAT_INTERNAL_ERR = 0x01,
66*4882a593Smuzhiyun /* Operation/command not supported or opcode modifier not supported: */
67*4882a593Smuzhiyun CMD_STAT_BAD_OP = 0x02,
68*4882a593Smuzhiyun /* Parameter not supported or parameter out of range: */
69*4882a593Smuzhiyun CMD_STAT_BAD_PARAM = 0x03,
70*4882a593Smuzhiyun /* System not enabled or bad system state: */
71*4882a593Smuzhiyun CMD_STAT_BAD_SYS_STATE = 0x04,
72*4882a593Smuzhiyun /* Attempt to access reserved or unallocaterd resource: */
73*4882a593Smuzhiyun CMD_STAT_BAD_RESOURCE = 0x05,
74*4882a593Smuzhiyun /* Requested resource is currently executing a command, or is otherwise busy: */
75*4882a593Smuzhiyun CMD_STAT_RESOURCE_BUSY = 0x06,
76*4882a593Smuzhiyun /* Required capability exceeds device limits: */
77*4882a593Smuzhiyun CMD_STAT_EXCEED_LIM = 0x08,
78*4882a593Smuzhiyun /* Resource is not in the appropriate state or ownership: */
79*4882a593Smuzhiyun CMD_STAT_BAD_RES_STATE = 0x09,
80*4882a593Smuzhiyun /* Index out of range: */
81*4882a593Smuzhiyun CMD_STAT_BAD_INDEX = 0x0a,
82*4882a593Smuzhiyun /* FW image corrupted: */
83*4882a593Smuzhiyun CMD_STAT_BAD_NVMEM = 0x0b,
84*4882a593Smuzhiyun /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
85*4882a593Smuzhiyun CMD_STAT_ICM_ERROR = 0x0c,
86*4882a593Smuzhiyun /* Attempt to modify a QP/EE which is not in the presumed state: */
87*4882a593Smuzhiyun CMD_STAT_BAD_QP_STATE = 0x10,
88*4882a593Smuzhiyun /* Bad segment parameters (Address/Size): */
89*4882a593Smuzhiyun CMD_STAT_BAD_SEG_PARAM = 0x20,
90*4882a593Smuzhiyun /* Memory Region has Memory Windows bound to: */
91*4882a593Smuzhiyun CMD_STAT_REG_BOUND = 0x21,
92*4882a593Smuzhiyun /* HCA local attached memory not present: */
93*4882a593Smuzhiyun CMD_STAT_LAM_NOT_PRE = 0x22,
94*4882a593Smuzhiyun /* Bad management packet (silently discarded): */
95*4882a593Smuzhiyun CMD_STAT_BAD_PKT = 0x30,
96*4882a593Smuzhiyun /* More outstanding CQEs in CQ than new CQ size: */
97*4882a593Smuzhiyun CMD_STAT_BAD_SIZE = 0x40,
98*4882a593Smuzhiyun /* Multi Function device support required: */
99*4882a593Smuzhiyun CMD_STAT_MULTI_FUNC_REQ = 0x50,
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun enum {
103*4882a593Smuzhiyun HCR_IN_PARAM_OFFSET = 0x00,
104*4882a593Smuzhiyun HCR_IN_MODIFIER_OFFSET = 0x08,
105*4882a593Smuzhiyun HCR_OUT_PARAM_OFFSET = 0x0c,
106*4882a593Smuzhiyun HCR_TOKEN_OFFSET = 0x14,
107*4882a593Smuzhiyun HCR_STATUS_OFFSET = 0x18,
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun HCR_OPMOD_SHIFT = 12,
110*4882a593Smuzhiyun HCR_T_BIT = 21,
111*4882a593Smuzhiyun HCR_E_BIT = 22,
112*4882a593Smuzhiyun HCR_GO_BIT = 23
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun enum {
116*4882a593Smuzhiyun GO_BIT_TIMEOUT_MSECS = 10000
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun enum mlx4_vlan_transition {
120*4882a593Smuzhiyun MLX4_VLAN_TRANSITION_VST_VST = 0,
121*4882a593Smuzhiyun MLX4_VLAN_TRANSITION_VST_VGT = 1,
122*4882a593Smuzhiyun MLX4_VLAN_TRANSITION_VGT_VST = 2,
123*4882a593Smuzhiyun MLX4_VLAN_TRANSITION_VGT_VGT = 3,
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun struct mlx4_cmd_context {
128*4882a593Smuzhiyun struct completion done;
129*4882a593Smuzhiyun int result;
130*4882a593Smuzhiyun int next;
131*4882a593Smuzhiyun u64 out_param;
132*4882a593Smuzhiyun u16 token;
133*4882a593Smuzhiyun u8 fw_status;
134*4882a593Smuzhiyun };
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
137*4882a593Smuzhiyun struct mlx4_vhcr_cmd *in_vhcr);
138*4882a593Smuzhiyun
mlx4_status_to_errno(u8 status)139*4882a593Smuzhiyun static int mlx4_status_to_errno(u8 status)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun static const int trans_table[] = {
142*4882a593Smuzhiyun [CMD_STAT_INTERNAL_ERR] = -EIO,
143*4882a593Smuzhiyun [CMD_STAT_BAD_OP] = -EPERM,
144*4882a593Smuzhiyun [CMD_STAT_BAD_PARAM] = -EINVAL,
145*4882a593Smuzhiyun [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
146*4882a593Smuzhiyun [CMD_STAT_BAD_RESOURCE] = -EBADF,
147*4882a593Smuzhiyun [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
148*4882a593Smuzhiyun [CMD_STAT_EXCEED_LIM] = -ENOMEM,
149*4882a593Smuzhiyun [CMD_STAT_BAD_RES_STATE] = -EBADF,
150*4882a593Smuzhiyun [CMD_STAT_BAD_INDEX] = -EBADF,
151*4882a593Smuzhiyun [CMD_STAT_BAD_NVMEM] = -EFAULT,
152*4882a593Smuzhiyun [CMD_STAT_ICM_ERROR] = -ENFILE,
153*4882a593Smuzhiyun [CMD_STAT_BAD_QP_STATE] = -EINVAL,
154*4882a593Smuzhiyun [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
155*4882a593Smuzhiyun [CMD_STAT_REG_BOUND] = -EBUSY,
156*4882a593Smuzhiyun [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
157*4882a593Smuzhiyun [CMD_STAT_BAD_PKT] = -EINVAL,
158*4882a593Smuzhiyun [CMD_STAT_BAD_SIZE] = -ENOMEM,
159*4882a593Smuzhiyun [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
160*4882a593Smuzhiyun };
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (status >= ARRAY_SIZE(trans_table) ||
163*4882a593Smuzhiyun (status != CMD_STAT_OK && trans_table[status] == 0))
164*4882a593Smuzhiyun return -EIO;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun return trans_table[status];
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
mlx4_errno_to_status(int errno)169*4882a593Smuzhiyun static u8 mlx4_errno_to_status(int errno)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun switch (errno) {
172*4882a593Smuzhiyun case -EPERM:
173*4882a593Smuzhiyun return CMD_STAT_BAD_OP;
174*4882a593Smuzhiyun case -EINVAL:
175*4882a593Smuzhiyun return CMD_STAT_BAD_PARAM;
176*4882a593Smuzhiyun case -ENXIO:
177*4882a593Smuzhiyun return CMD_STAT_BAD_SYS_STATE;
178*4882a593Smuzhiyun case -EBUSY:
179*4882a593Smuzhiyun return CMD_STAT_RESOURCE_BUSY;
180*4882a593Smuzhiyun case -ENOMEM:
181*4882a593Smuzhiyun return CMD_STAT_EXCEED_LIM;
182*4882a593Smuzhiyun case -ENFILE:
183*4882a593Smuzhiyun return CMD_STAT_ICM_ERROR;
184*4882a593Smuzhiyun default:
185*4882a593Smuzhiyun return CMD_STAT_INTERNAL_ERR;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
mlx4_internal_err_ret_value(struct mlx4_dev * dev,u16 op,u8 op_modifier)189*4882a593Smuzhiyun static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
190*4882a593Smuzhiyun u8 op_modifier)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun switch (op) {
193*4882a593Smuzhiyun case MLX4_CMD_UNMAP_ICM:
194*4882a593Smuzhiyun case MLX4_CMD_UNMAP_ICM_AUX:
195*4882a593Smuzhiyun case MLX4_CMD_UNMAP_FA:
196*4882a593Smuzhiyun case MLX4_CMD_2RST_QP:
197*4882a593Smuzhiyun case MLX4_CMD_HW2SW_EQ:
198*4882a593Smuzhiyun case MLX4_CMD_HW2SW_CQ:
199*4882a593Smuzhiyun case MLX4_CMD_HW2SW_SRQ:
200*4882a593Smuzhiyun case MLX4_CMD_HW2SW_MPT:
201*4882a593Smuzhiyun case MLX4_CMD_CLOSE_HCA:
202*4882a593Smuzhiyun case MLX4_QP_FLOW_STEERING_DETACH:
203*4882a593Smuzhiyun case MLX4_CMD_FREE_RES:
204*4882a593Smuzhiyun case MLX4_CMD_CLOSE_PORT:
205*4882a593Smuzhiyun return CMD_STAT_OK;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun case MLX4_CMD_QP_ATTACH:
208*4882a593Smuzhiyun /* On Detach case return success */
209*4882a593Smuzhiyun if (op_modifier == 0)
210*4882a593Smuzhiyun return CMD_STAT_OK;
211*4882a593Smuzhiyun return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun default:
214*4882a593Smuzhiyun return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
mlx4_closing_cmd_fatal_error(u16 op,u8 fw_status)218*4882a593Smuzhiyun static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun /* Any error during the closing commands below is considered fatal */
221*4882a593Smuzhiyun if (op == MLX4_CMD_CLOSE_HCA ||
222*4882a593Smuzhiyun op == MLX4_CMD_HW2SW_EQ ||
223*4882a593Smuzhiyun op == MLX4_CMD_HW2SW_CQ ||
224*4882a593Smuzhiyun op == MLX4_CMD_2RST_QP ||
225*4882a593Smuzhiyun op == MLX4_CMD_HW2SW_SRQ ||
226*4882a593Smuzhiyun op == MLX4_CMD_SYNC_TPT ||
227*4882a593Smuzhiyun op == MLX4_CMD_UNMAP_ICM ||
228*4882a593Smuzhiyun op == MLX4_CMD_UNMAP_ICM_AUX ||
229*4882a593Smuzhiyun op == MLX4_CMD_UNMAP_FA)
230*4882a593Smuzhiyun return 1;
231*4882a593Smuzhiyun /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
232*4882a593Smuzhiyun * CMD_STAT_REG_BOUND.
233*4882a593Smuzhiyun * This status indicates that memory region has memory windows bound to it
234*4882a593Smuzhiyun * which may result from invalid user space usage and is not fatal.
235*4882a593Smuzhiyun */
236*4882a593Smuzhiyun if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
237*4882a593Smuzhiyun return 1;
238*4882a593Smuzhiyun return 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
mlx4_cmd_reset_flow(struct mlx4_dev * dev,u16 op,u8 op_modifier,int err)241*4882a593Smuzhiyun static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
242*4882a593Smuzhiyun int err)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun /* Only if reset flow is really active return code is based on
245*4882a593Smuzhiyun * command, otherwise current error code is returned.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun if (mlx4_internal_err_reset) {
248*4882a593Smuzhiyun mlx4_enter_error_state(dev->persist);
249*4882a593Smuzhiyun err = mlx4_internal_err_ret_value(dev, op, op_modifier);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun return err;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
comm_pending(struct mlx4_dev * dev)255*4882a593Smuzhiyun static int comm_pending(struct mlx4_dev *dev)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
258*4882a593Smuzhiyun u32 status = readl(&priv->mfunc.comm->slave_read);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return (swab32(status) >> 31) != priv->cmd.comm_toggle;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
mlx4_comm_cmd_post(struct mlx4_dev * dev,u8 cmd,u16 param)263*4882a593Smuzhiyun static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
266*4882a593Smuzhiyun u32 val;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* To avoid writing to unknown addresses after the device state was
269*4882a593Smuzhiyun * changed to internal error and the function was rest,
270*4882a593Smuzhiyun * check the INTERNAL_ERROR flag which is updated under
271*4882a593Smuzhiyun * device_state_mutex lock.
272*4882a593Smuzhiyun */
273*4882a593Smuzhiyun mutex_lock(&dev->persist->device_state_mutex);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
276*4882a593Smuzhiyun mutex_unlock(&dev->persist->device_state_mutex);
277*4882a593Smuzhiyun return -EIO;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun priv->cmd.comm_toggle ^= 1;
281*4882a593Smuzhiyun val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
282*4882a593Smuzhiyun __raw_writel((__force u32) cpu_to_be32(val),
283*4882a593Smuzhiyun &priv->mfunc.comm->slave_write);
284*4882a593Smuzhiyun mutex_unlock(&dev->persist->device_state_mutex);
285*4882a593Smuzhiyun return 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
mlx4_comm_cmd_poll(struct mlx4_dev * dev,u8 cmd,u16 param,unsigned long timeout)288*4882a593Smuzhiyun static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
289*4882a593Smuzhiyun unsigned long timeout)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
292*4882a593Smuzhiyun unsigned long end;
293*4882a593Smuzhiyun int err = 0;
294*4882a593Smuzhiyun int ret_from_pending = 0;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* First, verify that the master reports correct status */
297*4882a593Smuzhiyun if (comm_pending(dev)) {
298*4882a593Smuzhiyun mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
299*4882a593Smuzhiyun priv->cmd.comm_toggle, cmd);
300*4882a593Smuzhiyun return -EAGAIN;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* Write command */
304*4882a593Smuzhiyun down(&priv->cmd.poll_sem);
305*4882a593Smuzhiyun if (mlx4_comm_cmd_post(dev, cmd, param)) {
306*4882a593Smuzhiyun /* Only in case the device state is INTERNAL_ERROR,
307*4882a593Smuzhiyun * mlx4_comm_cmd_post returns with an error
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
310*4882a593Smuzhiyun goto out;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun end = msecs_to_jiffies(timeout) + jiffies;
314*4882a593Smuzhiyun while (comm_pending(dev) && time_before(jiffies, end))
315*4882a593Smuzhiyun cond_resched();
316*4882a593Smuzhiyun ret_from_pending = comm_pending(dev);
317*4882a593Smuzhiyun if (ret_from_pending) {
318*4882a593Smuzhiyun /* check if the slave is trying to boot in the middle of
319*4882a593Smuzhiyun * FLR process. The only non-zero result in the RESET command
320*4882a593Smuzhiyun * is MLX4_DELAY_RESET_SLAVE*/
321*4882a593Smuzhiyun if ((MLX4_COMM_CMD_RESET == cmd)) {
322*4882a593Smuzhiyun err = MLX4_DELAY_RESET_SLAVE;
323*4882a593Smuzhiyun goto out;
324*4882a593Smuzhiyun } else {
325*4882a593Smuzhiyun mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
326*4882a593Smuzhiyun cmd);
327*4882a593Smuzhiyun err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun if (err)
332*4882a593Smuzhiyun mlx4_enter_error_state(dev->persist);
333*4882a593Smuzhiyun out:
334*4882a593Smuzhiyun up(&priv->cmd.poll_sem);
335*4882a593Smuzhiyun return err;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
mlx4_comm_cmd_wait(struct mlx4_dev * dev,u8 vhcr_cmd,u16 param,u16 op,unsigned long timeout)338*4882a593Smuzhiyun static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
339*4882a593Smuzhiyun u16 param, u16 op, unsigned long timeout)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
342*4882a593Smuzhiyun struct mlx4_cmd_context *context;
343*4882a593Smuzhiyun unsigned long end;
344*4882a593Smuzhiyun int err = 0;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun down(&cmd->event_sem);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun spin_lock(&cmd->context_lock);
349*4882a593Smuzhiyun BUG_ON(cmd->free_head < 0);
350*4882a593Smuzhiyun context = &cmd->context[cmd->free_head];
351*4882a593Smuzhiyun context->token += cmd->token_mask + 1;
352*4882a593Smuzhiyun cmd->free_head = context->next;
353*4882a593Smuzhiyun spin_unlock(&cmd->context_lock);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun reinit_completion(&context->done);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
358*4882a593Smuzhiyun /* Only in case the device state is INTERNAL_ERROR,
359*4882a593Smuzhiyun * mlx4_comm_cmd_post returns with an error
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
362*4882a593Smuzhiyun goto out;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (!wait_for_completion_timeout(&context->done,
366*4882a593Smuzhiyun msecs_to_jiffies(timeout))) {
367*4882a593Smuzhiyun mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
368*4882a593Smuzhiyun vhcr_cmd, op);
369*4882a593Smuzhiyun goto out_reset;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun err = context->result;
373*4882a593Smuzhiyun if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
374*4882a593Smuzhiyun mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
375*4882a593Smuzhiyun vhcr_cmd, context->fw_status);
376*4882a593Smuzhiyun if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
377*4882a593Smuzhiyun goto out_reset;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* wait for comm channel ready
381*4882a593Smuzhiyun * this is necessary for prevention the race
382*4882a593Smuzhiyun * when switching between event to polling mode
383*4882a593Smuzhiyun * Skipping this section in case the device is in FATAL_ERROR state,
384*4882a593Smuzhiyun * In this state, no commands are sent via the comm channel until
385*4882a593Smuzhiyun * the device has returned from reset.
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
388*4882a593Smuzhiyun end = msecs_to_jiffies(timeout) + jiffies;
389*4882a593Smuzhiyun while (comm_pending(dev) && time_before(jiffies, end))
390*4882a593Smuzhiyun cond_resched();
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun goto out;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun out_reset:
395*4882a593Smuzhiyun err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
396*4882a593Smuzhiyun mlx4_enter_error_state(dev->persist);
397*4882a593Smuzhiyun out:
398*4882a593Smuzhiyun spin_lock(&cmd->context_lock);
399*4882a593Smuzhiyun context->next = cmd->free_head;
400*4882a593Smuzhiyun cmd->free_head = context - cmd->context;
401*4882a593Smuzhiyun spin_unlock(&cmd->context_lock);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun up(&cmd->event_sem);
404*4882a593Smuzhiyun return err;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
mlx4_comm_cmd(struct mlx4_dev * dev,u8 cmd,u16 param,u16 op,unsigned long timeout)407*4882a593Smuzhiyun int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
408*4882a593Smuzhiyun u16 op, unsigned long timeout)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
411*4882a593Smuzhiyun return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (mlx4_priv(dev)->cmd.use_events)
414*4882a593Smuzhiyun return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
415*4882a593Smuzhiyun return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
cmd_pending(struct mlx4_dev * dev)418*4882a593Smuzhiyun static int cmd_pending(struct mlx4_dev *dev)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun u32 status;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun if (pci_channel_offline(dev->persist->pdev))
423*4882a593Smuzhiyun return -EIO;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun return (status & swab32(1 << HCR_GO_BIT)) ||
428*4882a593Smuzhiyun (mlx4_priv(dev)->cmd.toggle ==
429*4882a593Smuzhiyun !!(status & swab32(1 << HCR_T_BIT)));
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
mlx4_cmd_post(struct mlx4_dev * dev,u64 in_param,u64 out_param,u32 in_modifier,u8 op_modifier,u16 op,u16 token,int event)432*4882a593Smuzhiyun static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
433*4882a593Smuzhiyun u32 in_modifier, u8 op_modifier, u16 op, u16 token,
434*4882a593Smuzhiyun int event)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
437*4882a593Smuzhiyun u32 __iomem *hcr = cmd->hcr;
438*4882a593Smuzhiyun int ret = -EIO;
439*4882a593Smuzhiyun unsigned long end;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun mutex_lock(&dev->persist->device_state_mutex);
442*4882a593Smuzhiyun /* To avoid writing to unknown addresses after the device state was
443*4882a593Smuzhiyun * changed to internal error and the chip was reset,
444*4882a593Smuzhiyun * check the INTERNAL_ERROR flag which is updated under
445*4882a593Smuzhiyun * device_state_mutex lock.
446*4882a593Smuzhiyun */
447*4882a593Smuzhiyun if (pci_channel_offline(dev->persist->pdev) ||
448*4882a593Smuzhiyun (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
449*4882a593Smuzhiyun /*
450*4882a593Smuzhiyun * Device is going through error recovery
451*4882a593Smuzhiyun * and cannot accept commands.
452*4882a593Smuzhiyun */
453*4882a593Smuzhiyun goto out;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun end = jiffies;
457*4882a593Smuzhiyun if (event)
458*4882a593Smuzhiyun end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun while (cmd_pending(dev)) {
461*4882a593Smuzhiyun if (pci_channel_offline(dev->persist->pdev)) {
462*4882a593Smuzhiyun /*
463*4882a593Smuzhiyun * Device is going through error recovery
464*4882a593Smuzhiyun * and cannot accept commands.
465*4882a593Smuzhiyun */
466*4882a593Smuzhiyun goto out;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun if (time_after_eq(jiffies, end)) {
470*4882a593Smuzhiyun mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
471*4882a593Smuzhiyun goto out;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun cond_resched();
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /*
477*4882a593Smuzhiyun * We use writel (instead of something like memcpy_toio)
478*4882a593Smuzhiyun * because writes of less than 32 bits to the HCR don't work
479*4882a593Smuzhiyun * (and some architectures such as ia64 implement memcpy_toio
480*4882a593Smuzhiyun * in terms of writeb).
481*4882a593Smuzhiyun */
482*4882a593Smuzhiyun __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
483*4882a593Smuzhiyun __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
484*4882a593Smuzhiyun __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
485*4882a593Smuzhiyun __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
486*4882a593Smuzhiyun __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
487*4882a593Smuzhiyun __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /* __raw_writel may not order writes. */
490*4882a593Smuzhiyun wmb();
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
493*4882a593Smuzhiyun (cmd->toggle << HCR_T_BIT) |
494*4882a593Smuzhiyun (event ? (1 << HCR_E_BIT) : 0) |
495*4882a593Smuzhiyun (op_modifier << HCR_OPMOD_SHIFT) |
496*4882a593Smuzhiyun op), hcr + 6);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun cmd->toggle = cmd->toggle ^ 1;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun ret = 0;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun out:
503*4882a593Smuzhiyun if (ret)
504*4882a593Smuzhiyun mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
505*4882a593Smuzhiyun op, ret, in_param, in_modifier, op_modifier);
506*4882a593Smuzhiyun mutex_unlock(&dev->persist->device_state_mutex);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun return ret;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
mlx4_slave_cmd(struct mlx4_dev * dev,u64 in_param,u64 * out_param,int out_is_imm,u32 in_modifier,u8 op_modifier,u16 op,unsigned long timeout)511*4882a593Smuzhiyun static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
512*4882a593Smuzhiyun int out_is_imm, u32 in_modifier, u8 op_modifier,
513*4882a593Smuzhiyun u16 op, unsigned long timeout)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
516*4882a593Smuzhiyun struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
517*4882a593Smuzhiyun int ret;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun mutex_lock(&priv->cmd.slave_cmd_mutex);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun vhcr->in_param = cpu_to_be64(in_param);
522*4882a593Smuzhiyun vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
523*4882a593Smuzhiyun vhcr->in_modifier = cpu_to_be32(in_modifier);
524*4882a593Smuzhiyun vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
525*4882a593Smuzhiyun vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
526*4882a593Smuzhiyun vhcr->status = 0;
527*4882a593Smuzhiyun vhcr->flags = !!(priv->cmd.use_events) << 6;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun if (mlx4_is_master(dev)) {
530*4882a593Smuzhiyun ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
531*4882a593Smuzhiyun if (!ret) {
532*4882a593Smuzhiyun if (out_is_imm) {
533*4882a593Smuzhiyun if (out_param)
534*4882a593Smuzhiyun *out_param =
535*4882a593Smuzhiyun be64_to_cpu(vhcr->out_param);
536*4882a593Smuzhiyun else {
537*4882a593Smuzhiyun mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
538*4882a593Smuzhiyun op);
539*4882a593Smuzhiyun vhcr->status = CMD_STAT_BAD_PARAM;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun ret = mlx4_status_to_errno(vhcr->status);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun if (ret &&
545*4882a593Smuzhiyun dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
546*4882a593Smuzhiyun ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
547*4882a593Smuzhiyun } else {
548*4882a593Smuzhiyun ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
549*4882a593Smuzhiyun MLX4_COMM_TIME + timeout);
550*4882a593Smuzhiyun if (!ret) {
551*4882a593Smuzhiyun if (out_is_imm) {
552*4882a593Smuzhiyun if (out_param)
553*4882a593Smuzhiyun *out_param =
554*4882a593Smuzhiyun be64_to_cpu(vhcr->out_param);
555*4882a593Smuzhiyun else {
556*4882a593Smuzhiyun mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
557*4882a593Smuzhiyun op);
558*4882a593Smuzhiyun vhcr->status = CMD_STAT_BAD_PARAM;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun ret = mlx4_status_to_errno(vhcr->status);
562*4882a593Smuzhiyun } else {
563*4882a593Smuzhiyun if (dev->persist->state &
564*4882a593Smuzhiyun MLX4_DEVICE_STATE_INTERNAL_ERROR)
565*4882a593Smuzhiyun ret = mlx4_internal_err_ret_value(dev, op,
566*4882a593Smuzhiyun op_modifier);
567*4882a593Smuzhiyun else
568*4882a593Smuzhiyun mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun mutex_unlock(&priv->cmd.slave_cmd_mutex);
573*4882a593Smuzhiyun return ret;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
mlx4_cmd_poll(struct mlx4_dev * dev,u64 in_param,u64 * out_param,int out_is_imm,u32 in_modifier,u8 op_modifier,u16 op,unsigned long timeout)576*4882a593Smuzhiyun static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
577*4882a593Smuzhiyun int out_is_imm, u32 in_modifier, u8 op_modifier,
578*4882a593Smuzhiyun u16 op, unsigned long timeout)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
581*4882a593Smuzhiyun void __iomem *hcr = priv->cmd.hcr;
582*4882a593Smuzhiyun int err = 0;
583*4882a593Smuzhiyun unsigned long end;
584*4882a593Smuzhiyun u32 stat;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun down(&priv->cmd.poll_sem);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
589*4882a593Smuzhiyun /*
590*4882a593Smuzhiyun * Device is going through error recovery
591*4882a593Smuzhiyun * and cannot accept commands.
592*4882a593Smuzhiyun */
593*4882a593Smuzhiyun err = mlx4_internal_err_ret_value(dev, op, op_modifier);
594*4882a593Smuzhiyun goto out;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (out_is_imm && !out_param) {
598*4882a593Smuzhiyun mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
599*4882a593Smuzhiyun op);
600*4882a593Smuzhiyun err = -EINVAL;
601*4882a593Smuzhiyun goto out;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
605*4882a593Smuzhiyun in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
606*4882a593Smuzhiyun if (err)
607*4882a593Smuzhiyun goto out_reset;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun end = msecs_to_jiffies(timeout) + jiffies;
610*4882a593Smuzhiyun while (cmd_pending(dev) && time_before(jiffies, end)) {
611*4882a593Smuzhiyun if (pci_channel_offline(dev->persist->pdev)) {
612*4882a593Smuzhiyun /*
613*4882a593Smuzhiyun * Device is going through error recovery
614*4882a593Smuzhiyun * and cannot accept commands.
615*4882a593Smuzhiyun */
616*4882a593Smuzhiyun err = -EIO;
617*4882a593Smuzhiyun goto out_reset;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
621*4882a593Smuzhiyun err = mlx4_internal_err_ret_value(dev, op, op_modifier);
622*4882a593Smuzhiyun goto out;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun cond_resched();
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (cmd_pending(dev)) {
629*4882a593Smuzhiyun mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
630*4882a593Smuzhiyun op);
631*4882a593Smuzhiyun err = -EIO;
632*4882a593Smuzhiyun goto out_reset;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (out_is_imm)
636*4882a593Smuzhiyun *out_param =
637*4882a593Smuzhiyun (u64) be32_to_cpu((__force __be32)
638*4882a593Smuzhiyun __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
639*4882a593Smuzhiyun (u64) be32_to_cpu((__force __be32)
640*4882a593Smuzhiyun __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
641*4882a593Smuzhiyun stat = be32_to_cpu((__force __be32)
642*4882a593Smuzhiyun __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
643*4882a593Smuzhiyun err = mlx4_status_to_errno(stat);
644*4882a593Smuzhiyun if (err) {
645*4882a593Smuzhiyun mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
646*4882a593Smuzhiyun op, stat);
647*4882a593Smuzhiyun if (mlx4_closing_cmd_fatal_error(op, stat))
648*4882a593Smuzhiyun goto out_reset;
649*4882a593Smuzhiyun goto out;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun out_reset:
653*4882a593Smuzhiyun if (err)
654*4882a593Smuzhiyun err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
655*4882a593Smuzhiyun out:
656*4882a593Smuzhiyun up(&priv->cmd.poll_sem);
657*4882a593Smuzhiyun return err;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
mlx4_cmd_event(struct mlx4_dev * dev,u16 token,u8 status,u64 out_param)660*4882a593Smuzhiyun void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
663*4882a593Smuzhiyun struct mlx4_cmd_context *context =
664*4882a593Smuzhiyun &priv->cmd.context[token & priv->cmd.token_mask];
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /* previously timed out command completing at long last */
667*4882a593Smuzhiyun if (token != context->token)
668*4882a593Smuzhiyun return;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun context->fw_status = status;
671*4882a593Smuzhiyun context->result = mlx4_status_to_errno(status);
672*4882a593Smuzhiyun context->out_param = out_param;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun complete(&context->done);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
mlx4_cmd_wait(struct mlx4_dev * dev,u64 in_param,u64 * out_param,int out_is_imm,u32 in_modifier,u8 op_modifier,u16 op,unsigned long timeout)677*4882a593Smuzhiyun static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
678*4882a593Smuzhiyun int out_is_imm, u32 in_modifier, u8 op_modifier,
679*4882a593Smuzhiyun u16 op, unsigned long timeout)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
682*4882a593Smuzhiyun struct mlx4_cmd_context *context;
683*4882a593Smuzhiyun long ret_wait;
684*4882a593Smuzhiyun int err = 0;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun down(&cmd->event_sem);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun spin_lock(&cmd->context_lock);
689*4882a593Smuzhiyun BUG_ON(cmd->free_head < 0);
690*4882a593Smuzhiyun context = &cmd->context[cmd->free_head];
691*4882a593Smuzhiyun context->token += cmd->token_mask + 1;
692*4882a593Smuzhiyun cmd->free_head = context->next;
693*4882a593Smuzhiyun spin_unlock(&cmd->context_lock);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun if (out_is_imm && !out_param) {
696*4882a593Smuzhiyun mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
697*4882a593Smuzhiyun op);
698*4882a593Smuzhiyun err = -EINVAL;
699*4882a593Smuzhiyun goto out;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun reinit_completion(&context->done);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
705*4882a593Smuzhiyun in_modifier, op_modifier, op, context->token, 1);
706*4882a593Smuzhiyun if (err)
707*4882a593Smuzhiyun goto out_reset;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun if (op == MLX4_CMD_SENSE_PORT) {
710*4882a593Smuzhiyun ret_wait =
711*4882a593Smuzhiyun wait_for_completion_interruptible_timeout(&context->done,
712*4882a593Smuzhiyun msecs_to_jiffies(timeout));
713*4882a593Smuzhiyun if (ret_wait < 0) {
714*4882a593Smuzhiyun context->fw_status = 0;
715*4882a593Smuzhiyun context->out_param = 0;
716*4882a593Smuzhiyun context->result = 0;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun } else {
719*4882a593Smuzhiyun ret_wait = (long)wait_for_completion_timeout(&context->done,
720*4882a593Smuzhiyun msecs_to_jiffies(timeout));
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun if (!ret_wait) {
723*4882a593Smuzhiyun mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
724*4882a593Smuzhiyun op);
725*4882a593Smuzhiyun if (op == MLX4_CMD_NOP) {
726*4882a593Smuzhiyun err = -EBUSY;
727*4882a593Smuzhiyun goto out;
728*4882a593Smuzhiyun } else {
729*4882a593Smuzhiyun err = -EIO;
730*4882a593Smuzhiyun goto out_reset;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun err = context->result;
735*4882a593Smuzhiyun if (err) {
736*4882a593Smuzhiyun /* Since we do not want to have this error message always
737*4882a593Smuzhiyun * displayed at driver start when there are ConnectX2 HCAs
738*4882a593Smuzhiyun * on the host, we deprecate the error message for this
739*4882a593Smuzhiyun * specific command/input_mod/opcode_mod/fw-status to be debug.
740*4882a593Smuzhiyun */
741*4882a593Smuzhiyun if (op == MLX4_CMD_SET_PORT &&
742*4882a593Smuzhiyun (in_modifier == 1 || in_modifier == 2) &&
743*4882a593Smuzhiyun op_modifier == MLX4_SET_PORT_IB_OPCODE &&
744*4882a593Smuzhiyun context->fw_status == CMD_STAT_BAD_SIZE)
745*4882a593Smuzhiyun mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
746*4882a593Smuzhiyun op, context->fw_status);
747*4882a593Smuzhiyun else
748*4882a593Smuzhiyun mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
749*4882a593Smuzhiyun op, context->fw_status);
750*4882a593Smuzhiyun if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
751*4882a593Smuzhiyun err = mlx4_internal_err_ret_value(dev, op, op_modifier);
752*4882a593Smuzhiyun else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
753*4882a593Smuzhiyun goto out_reset;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun goto out;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun if (out_is_imm)
759*4882a593Smuzhiyun *out_param = context->out_param;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun out_reset:
762*4882a593Smuzhiyun if (err)
763*4882a593Smuzhiyun err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
764*4882a593Smuzhiyun out:
765*4882a593Smuzhiyun spin_lock(&cmd->context_lock);
766*4882a593Smuzhiyun context->next = cmd->free_head;
767*4882a593Smuzhiyun cmd->free_head = context - cmd->context;
768*4882a593Smuzhiyun spin_unlock(&cmd->context_lock);
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun up(&cmd->event_sem);
771*4882a593Smuzhiyun return err;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
__mlx4_cmd(struct mlx4_dev * dev,u64 in_param,u64 * out_param,int out_is_imm,u32 in_modifier,u8 op_modifier,u16 op,unsigned long timeout,int native)774*4882a593Smuzhiyun int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
775*4882a593Smuzhiyun int out_is_imm, u32 in_modifier, u8 op_modifier,
776*4882a593Smuzhiyun u16 op, unsigned long timeout, int native)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun if (pci_channel_offline(dev->persist->pdev))
779*4882a593Smuzhiyun return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
782*4882a593Smuzhiyun int ret;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
785*4882a593Smuzhiyun return mlx4_internal_err_ret_value(dev, op,
786*4882a593Smuzhiyun op_modifier);
787*4882a593Smuzhiyun down_read(&mlx4_priv(dev)->cmd.switch_sem);
788*4882a593Smuzhiyun if (mlx4_priv(dev)->cmd.use_events)
789*4882a593Smuzhiyun ret = mlx4_cmd_wait(dev, in_param, out_param,
790*4882a593Smuzhiyun out_is_imm, in_modifier,
791*4882a593Smuzhiyun op_modifier, op, timeout);
792*4882a593Smuzhiyun else
793*4882a593Smuzhiyun ret = mlx4_cmd_poll(dev, in_param, out_param,
794*4882a593Smuzhiyun out_is_imm, in_modifier,
795*4882a593Smuzhiyun op_modifier, op, timeout);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun up_read(&mlx4_priv(dev)->cmd.switch_sem);
798*4882a593Smuzhiyun return ret;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
801*4882a593Smuzhiyun in_modifier, op_modifier, op, timeout);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__mlx4_cmd);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun
mlx4_ARM_COMM_CHANNEL(struct mlx4_dev * dev)806*4882a593Smuzhiyun int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
809*4882a593Smuzhiyun MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
mlx4_ACCESS_MEM(struct mlx4_dev * dev,u64 master_addr,int slave,u64 slave_addr,int size,int is_read)812*4882a593Smuzhiyun static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
813*4882a593Smuzhiyun int slave, u64 slave_addr,
814*4882a593Smuzhiyun int size, int is_read)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun u64 in_param;
817*4882a593Smuzhiyun u64 out_param;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
820*4882a593Smuzhiyun (slave & ~0x7f) | (size & 0xff)) {
821*4882a593Smuzhiyun mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
822*4882a593Smuzhiyun slave_addr, master_addr, slave, size);
823*4882a593Smuzhiyun return -EINVAL;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (is_read) {
827*4882a593Smuzhiyun in_param = (u64) slave | slave_addr;
828*4882a593Smuzhiyun out_param = (u64) dev->caps.function | master_addr;
829*4882a593Smuzhiyun } else {
830*4882a593Smuzhiyun in_param = (u64) dev->caps.function | master_addr;
831*4882a593Smuzhiyun out_param = (u64) slave | slave_addr;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
835*4882a593Smuzhiyun MLX4_CMD_ACCESS_MEM,
836*4882a593Smuzhiyun MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
query_pkey_block(struct mlx4_dev * dev,u8 port,u16 index,u16 * pkey,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox)839*4882a593Smuzhiyun static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
840*4882a593Smuzhiyun struct mlx4_cmd_mailbox *inbox,
841*4882a593Smuzhiyun struct mlx4_cmd_mailbox *outbox)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
844*4882a593Smuzhiyun struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
845*4882a593Smuzhiyun int err;
846*4882a593Smuzhiyun int i;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun if (index & 0x1f)
849*4882a593Smuzhiyun return -EINVAL;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun in_mad->attr_mod = cpu_to_be32(index / 32);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
854*4882a593Smuzhiyun MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
855*4882a593Smuzhiyun MLX4_CMD_NATIVE);
856*4882a593Smuzhiyun if (err)
857*4882a593Smuzhiyun return err;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun for (i = 0; i < 32; ++i)
860*4882a593Smuzhiyun pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun return err;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
get_full_pkey_table(struct mlx4_dev * dev,u8 port,u16 * table,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox)865*4882a593Smuzhiyun static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
866*4882a593Smuzhiyun struct mlx4_cmd_mailbox *inbox,
867*4882a593Smuzhiyun struct mlx4_cmd_mailbox *outbox)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun int i;
870*4882a593Smuzhiyun int err;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
873*4882a593Smuzhiyun err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
874*4882a593Smuzhiyun if (err)
875*4882a593Smuzhiyun return err;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun return 0;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun #define PORT_CAPABILITY_LOCATION_IN_SMP 20
881*4882a593Smuzhiyun #define PORT_STATE_OFFSET 32
882*4882a593Smuzhiyun
vf_port_state(struct mlx4_dev * dev,int port,int vf)883*4882a593Smuzhiyun static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
886*4882a593Smuzhiyun return IB_PORT_ACTIVE;
887*4882a593Smuzhiyun else
888*4882a593Smuzhiyun return IB_PORT_DOWN;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
mlx4_MAD_IFC_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)891*4882a593Smuzhiyun static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
892*4882a593Smuzhiyun struct mlx4_vhcr *vhcr,
893*4882a593Smuzhiyun struct mlx4_cmd_mailbox *inbox,
894*4882a593Smuzhiyun struct mlx4_cmd_mailbox *outbox,
895*4882a593Smuzhiyun struct mlx4_cmd_info *cmd)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun struct ib_smp *smp = inbox->buf;
898*4882a593Smuzhiyun u32 index;
899*4882a593Smuzhiyun u8 port, slave_port;
900*4882a593Smuzhiyun u8 opcode_modifier;
901*4882a593Smuzhiyun u16 *table;
902*4882a593Smuzhiyun int err;
903*4882a593Smuzhiyun int vidx, pidx;
904*4882a593Smuzhiyun int network_view;
905*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
906*4882a593Smuzhiyun struct ib_smp *outsmp = outbox->buf;
907*4882a593Smuzhiyun __be16 *outtab = (__be16 *)(outsmp->data);
908*4882a593Smuzhiyun __be32 slave_cap_mask;
909*4882a593Smuzhiyun __be64 slave_node_guid;
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun slave_port = vhcr->in_modifier;
912*4882a593Smuzhiyun port = mlx4_slave_convert_port(dev, slave, slave_port);
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun /* network-view bit is for driver use only, and should not be passed to FW */
915*4882a593Smuzhiyun opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
916*4882a593Smuzhiyun network_view = !!(vhcr->op_modifier & 0x8);
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun if (smp->base_version == 1 &&
919*4882a593Smuzhiyun smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
920*4882a593Smuzhiyun smp->class_version == 1) {
921*4882a593Smuzhiyun /* host view is paravirtualized */
922*4882a593Smuzhiyun if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
923*4882a593Smuzhiyun if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
924*4882a593Smuzhiyun index = be32_to_cpu(smp->attr_mod);
925*4882a593Smuzhiyun if (port < 1 || port > dev->caps.num_ports)
926*4882a593Smuzhiyun return -EINVAL;
927*4882a593Smuzhiyun table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
928*4882a593Smuzhiyun sizeof(*table) * 32, GFP_KERNEL);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (!table)
931*4882a593Smuzhiyun return -ENOMEM;
932*4882a593Smuzhiyun /* need to get the full pkey table because the paravirtualized
933*4882a593Smuzhiyun * pkeys may be scattered among several pkey blocks.
934*4882a593Smuzhiyun */
935*4882a593Smuzhiyun err = get_full_pkey_table(dev, port, table, inbox, outbox);
936*4882a593Smuzhiyun if (!err) {
937*4882a593Smuzhiyun for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
938*4882a593Smuzhiyun pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
939*4882a593Smuzhiyun outtab[vidx % 32] = cpu_to_be16(table[pidx]);
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun kfree(table);
943*4882a593Smuzhiyun return err;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
946*4882a593Smuzhiyun /*get the slave specific caps:*/
947*4882a593Smuzhiyun /*do the command */
948*4882a593Smuzhiyun smp->attr_mod = cpu_to_be32(port);
949*4882a593Smuzhiyun err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
950*4882a593Smuzhiyun port, opcode_modifier,
951*4882a593Smuzhiyun vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
952*4882a593Smuzhiyun /* modify the response for slaves */
953*4882a593Smuzhiyun if (!err && slave != mlx4_master_func_num(dev)) {
954*4882a593Smuzhiyun u8 *state = outsmp->data + PORT_STATE_OFFSET;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
957*4882a593Smuzhiyun slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
958*4882a593Smuzhiyun memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun return err;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
963*4882a593Smuzhiyun __be64 guid = mlx4_get_admin_guid(dev, slave,
964*4882a593Smuzhiyun port);
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun /* set the PF admin guid to the FW/HW burned
967*4882a593Smuzhiyun * GUID, if it wasn't yet set
968*4882a593Smuzhiyun */
969*4882a593Smuzhiyun if (slave == 0 && guid == 0) {
970*4882a593Smuzhiyun smp->attr_mod = 0;
971*4882a593Smuzhiyun err = mlx4_cmd_box(dev,
972*4882a593Smuzhiyun inbox->dma,
973*4882a593Smuzhiyun outbox->dma,
974*4882a593Smuzhiyun vhcr->in_modifier,
975*4882a593Smuzhiyun opcode_modifier,
976*4882a593Smuzhiyun vhcr->op,
977*4882a593Smuzhiyun MLX4_CMD_TIME_CLASS_C,
978*4882a593Smuzhiyun MLX4_CMD_NATIVE);
979*4882a593Smuzhiyun if (err)
980*4882a593Smuzhiyun return err;
981*4882a593Smuzhiyun mlx4_set_admin_guid(dev,
982*4882a593Smuzhiyun *(__be64 *)outsmp->
983*4882a593Smuzhiyun data, slave, port);
984*4882a593Smuzhiyun } else {
985*4882a593Smuzhiyun memcpy(outsmp->data, &guid, 8);
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun /* clean all other gids */
989*4882a593Smuzhiyun memset(outsmp->data + 8, 0, 56);
990*4882a593Smuzhiyun return 0;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
993*4882a593Smuzhiyun err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
994*4882a593Smuzhiyun port, opcode_modifier,
995*4882a593Smuzhiyun vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
996*4882a593Smuzhiyun if (!err) {
997*4882a593Smuzhiyun slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
998*4882a593Smuzhiyun memcpy(outsmp->data + 12, &slave_node_guid, 8);
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun return err;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
1006*4882a593Smuzhiyun * These are the MADs used by ib verbs (such as ib_query_gids).
1007*4882a593Smuzhiyun */
1008*4882a593Smuzhiyun if (slave != mlx4_master_func_num(dev) &&
1009*4882a593Smuzhiyun !mlx4_vf_smi_enabled(dev, slave, port)) {
1010*4882a593Smuzhiyun if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
1011*4882a593Smuzhiyun smp->method == IB_MGMT_METHOD_GET) || network_view) {
1012*4882a593Smuzhiyun mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1013*4882a593Smuzhiyun slave, smp->mgmt_class, smp->method,
1014*4882a593Smuzhiyun network_view ? "Network" : "Host",
1015*4882a593Smuzhiyun be16_to_cpu(smp->attr_id));
1016*4882a593Smuzhiyun return -EPERM;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1021*4882a593Smuzhiyun vhcr->in_modifier, opcode_modifier,
1022*4882a593Smuzhiyun vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
mlx4_CMD_EPERM_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1025*4882a593Smuzhiyun static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
1026*4882a593Smuzhiyun struct mlx4_vhcr *vhcr,
1027*4882a593Smuzhiyun struct mlx4_cmd_mailbox *inbox,
1028*4882a593Smuzhiyun struct mlx4_cmd_mailbox *outbox,
1029*4882a593Smuzhiyun struct mlx4_cmd_info *cmd)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun return -EPERM;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
mlx4_DMA_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)1034*4882a593Smuzhiyun int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1035*4882a593Smuzhiyun struct mlx4_vhcr *vhcr,
1036*4882a593Smuzhiyun struct mlx4_cmd_mailbox *inbox,
1037*4882a593Smuzhiyun struct mlx4_cmd_mailbox *outbox,
1038*4882a593Smuzhiyun struct mlx4_cmd_info *cmd)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun u64 in_param;
1041*4882a593Smuzhiyun u64 out_param;
1042*4882a593Smuzhiyun int err;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1045*4882a593Smuzhiyun out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1046*4882a593Smuzhiyun if (cmd->encode_slave_id) {
1047*4882a593Smuzhiyun in_param &= 0xffffffffffffff00ll;
1048*4882a593Smuzhiyun in_param |= slave;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1052*4882a593Smuzhiyun vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1053*4882a593Smuzhiyun MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun if (cmd->out_is_imm)
1056*4882a593Smuzhiyun vhcr->out_param = out_param;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun return err;
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun static struct mlx4_cmd_info cmd_info[] = {
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_FW,
1064*4882a593Smuzhiyun .has_inbox = false,
1065*4882a593Smuzhiyun .has_outbox = true,
1066*4882a593Smuzhiyun .out_is_imm = false,
1067*4882a593Smuzhiyun .encode_slave_id = false,
1068*4882a593Smuzhiyun .verify = NULL,
1069*4882a593Smuzhiyun .wrapper = mlx4_QUERY_FW_wrapper
1070*4882a593Smuzhiyun },
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_HCA,
1073*4882a593Smuzhiyun .has_inbox = false,
1074*4882a593Smuzhiyun .has_outbox = true,
1075*4882a593Smuzhiyun .out_is_imm = false,
1076*4882a593Smuzhiyun .encode_slave_id = false,
1077*4882a593Smuzhiyun .verify = NULL,
1078*4882a593Smuzhiyun .wrapper = NULL
1079*4882a593Smuzhiyun },
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_DEV_CAP,
1082*4882a593Smuzhiyun .has_inbox = false,
1083*4882a593Smuzhiyun .has_outbox = true,
1084*4882a593Smuzhiyun .out_is_imm = false,
1085*4882a593Smuzhiyun .encode_slave_id = false,
1086*4882a593Smuzhiyun .verify = NULL,
1087*4882a593Smuzhiyun .wrapper = mlx4_QUERY_DEV_CAP_wrapper
1088*4882a593Smuzhiyun },
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_FUNC_CAP,
1091*4882a593Smuzhiyun .has_inbox = false,
1092*4882a593Smuzhiyun .has_outbox = true,
1093*4882a593Smuzhiyun .out_is_imm = false,
1094*4882a593Smuzhiyun .encode_slave_id = false,
1095*4882a593Smuzhiyun .verify = NULL,
1096*4882a593Smuzhiyun .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1097*4882a593Smuzhiyun },
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_ADAPTER,
1100*4882a593Smuzhiyun .has_inbox = false,
1101*4882a593Smuzhiyun .has_outbox = true,
1102*4882a593Smuzhiyun .out_is_imm = false,
1103*4882a593Smuzhiyun .encode_slave_id = false,
1104*4882a593Smuzhiyun .verify = NULL,
1105*4882a593Smuzhiyun .wrapper = NULL
1106*4882a593Smuzhiyun },
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun .opcode = MLX4_CMD_INIT_PORT,
1109*4882a593Smuzhiyun .has_inbox = false,
1110*4882a593Smuzhiyun .has_outbox = false,
1111*4882a593Smuzhiyun .out_is_imm = false,
1112*4882a593Smuzhiyun .encode_slave_id = false,
1113*4882a593Smuzhiyun .verify = NULL,
1114*4882a593Smuzhiyun .wrapper = mlx4_INIT_PORT_wrapper
1115*4882a593Smuzhiyun },
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun .opcode = MLX4_CMD_CLOSE_PORT,
1118*4882a593Smuzhiyun .has_inbox = false,
1119*4882a593Smuzhiyun .has_outbox = false,
1120*4882a593Smuzhiyun .out_is_imm = false,
1121*4882a593Smuzhiyun .encode_slave_id = false,
1122*4882a593Smuzhiyun .verify = NULL,
1123*4882a593Smuzhiyun .wrapper = mlx4_CLOSE_PORT_wrapper
1124*4882a593Smuzhiyun },
1125*4882a593Smuzhiyun {
1126*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_PORT,
1127*4882a593Smuzhiyun .has_inbox = false,
1128*4882a593Smuzhiyun .has_outbox = true,
1129*4882a593Smuzhiyun .out_is_imm = false,
1130*4882a593Smuzhiyun .encode_slave_id = false,
1131*4882a593Smuzhiyun .verify = NULL,
1132*4882a593Smuzhiyun .wrapper = mlx4_QUERY_PORT_wrapper
1133*4882a593Smuzhiyun },
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun .opcode = MLX4_CMD_SET_PORT,
1136*4882a593Smuzhiyun .has_inbox = true,
1137*4882a593Smuzhiyun .has_outbox = false,
1138*4882a593Smuzhiyun .out_is_imm = false,
1139*4882a593Smuzhiyun .encode_slave_id = false,
1140*4882a593Smuzhiyun .verify = NULL,
1141*4882a593Smuzhiyun .wrapper = mlx4_SET_PORT_wrapper
1142*4882a593Smuzhiyun },
1143*4882a593Smuzhiyun {
1144*4882a593Smuzhiyun .opcode = MLX4_CMD_MAP_EQ,
1145*4882a593Smuzhiyun .has_inbox = false,
1146*4882a593Smuzhiyun .has_outbox = false,
1147*4882a593Smuzhiyun .out_is_imm = false,
1148*4882a593Smuzhiyun .encode_slave_id = false,
1149*4882a593Smuzhiyun .verify = NULL,
1150*4882a593Smuzhiyun .wrapper = mlx4_MAP_EQ_wrapper
1151*4882a593Smuzhiyun },
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun .opcode = MLX4_CMD_SW2HW_EQ,
1154*4882a593Smuzhiyun .has_inbox = true,
1155*4882a593Smuzhiyun .has_outbox = false,
1156*4882a593Smuzhiyun .out_is_imm = false,
1157*4882a593Smuzhiyun .encode_slave_id = true,
1158*4882a593Smuzhiyun .verify = NULL,
1159*4882a593Smuzhiyun .wrapper = mlx4_SW2HW_EQ_wrapper
1160*4882a593Smuzhiyun },
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun .opcode = MLX4_CMD_HW_HEALTH_CHECK,
1163*4882a593Smuzhiyun .has_inbox = false,
1164*4882a593Smuzhiyun .has_outbox = false,
1165*4882a593Smuzhiyun .out_is_imm = false,
1166*4882a593Smuzhiyun .encode_slave_id = false,
1167*4882a593Smuzhiyun .verify = NULL,
1168*4882a593Smuzhiyun .wrapper = NULL
1169*4882a593Smuzhiyun },
1170*4882a593Smuzhiyun {
1171*4882a593Smuzhiyun .opcode = MLX4_CMD_NOP,
1172*4882a593Smuzhiyun .has_inbox = false,
1173*4882a593Smuzhiyun .has_outbox = false,
1174*4882a593Smuzhiyun .out_is_imm = false,
1175*4882a593Smuzhiyun .encode_slave_id = false,
1176*4882a593Smuzhiyun .verify = NULL,
1177*4882a593Smuzhiyun .wrapper = NULL
1178*4882a593Smuzhiyun },
1179*4882a593Smuzhiyun {
1180*4882a593Smuzhiyun .opcode = MLX4_CMD_CONFIG_DEV,
1181*4882a593Smuzhiyun .has_inbox = false,
1182*4882a593Smuzhiyun .has_outbox = true,
1183*4882a593Smuzhiyun .out_is_imm = false,
1184*4882a593Smuzhiyun .encode_slave_id = false,
1185*4882a593Smuzhiyun .verify = NULL,
1186*4882a593Smuzhiyun .wrapper = mlx4_CONFIG_DEV_wrapper
1187*4882a593Smuzhiyun },
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun .opcode = MLX4_CMD_ALLOC_RES,
1190*4882a593Smuzhiyun .has_inbox = false,
1191*4882a593Smuzhiyun .has_outbox = false,
1192*4882a593Smuzhiyun .out_is_imm = true,
1193*4882a593Smuzhiyun .encode_slave_id = false,
1194*4882a593Smuzhiyun .verify = NULL,
1195*4882a593Smuzhiyun .wrapper = mlx4_ALLOC_RES_wrapper
1196*4882a593Smuzhiyun },
1197*4882a593Smuzhiyun {
1198*4882a593Smuzhiyun .opcode = MLX4_CMD_FREE_RES,
1199*4882a593Smuzhiyun .has_inbox = false,
1200*4882a593Smuzhiyun .has_outbox = false,
1201*4882a593Smuzhiyun .out_is_imm = false,
1202*4882a593Smuzhiyun .encode_slave_id = false,
1203*4882a593Smuzhiyun .verify = NULL,
1204*4882a593Smuzhiyun .wrapper = mlx4_FREE_RES_wrapper
1205*4882a593Smuzhiyun },
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun .opcode = MLX4_CMD_SW2HW_MPT,
1208*4882a593Smuzhiyun .has_inbox = true,
1209*4882a593Smuzhiyun .has_outbox = false,
1210*4882a593Smuzhiyun .out_is_imm = false,
1211*4882a593Smuzhiyun .encode_slave_id = true,
1212*4882a593Smuzhiyun .verify = NULL,
1213*4882a593Smuzhiyun .wrapper = mlx4_SW2HW_MPT_wrapper
1214*4882a593Smuzhiyun },
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_MPT,
1217*4882a593Smuzhiyun .has_inbox = false,
1218*4882a593Smuzhiyun .has_outbox = true,
1219*4882a593Smuzhiyun .out_is_imm = false,
1220*4882a593Smuzhiyun .encode_slave_id = false,
1221*4882a593Smuzhiyun .verify = NULL,
1222*4882a593Smuzhiyun .wrapper = mlx4_QUERY_MPT_wrapper
1223*4882a593Smuzhiyun },
1224*4882a593Smuzhiyun {
1225*4882a593Smuzhiyun .opcode = MLX4_CMD_HW2SW_MPT,
1226*4882a593Smuzhiyun .has_inbox = false,
1227*4882a593Smuzhiyun .has_outbox = false,
1228*4882a593Smuzhiyun .out_is_imm = false,
1229*4882a593Smuzhiyun .encode_slave_id = false,
1230*4882a593Smuzhiyun .verify = NULL,
1231*4882a593Smuzhiyun .wrapper = mlx4_HW2SW_MPT_wrapper
1232*4882a593Smuzhiyun },
1233*4882a593Smuzhiyun {
1234*4882a593Smuzhiyun .opcode = MLX4_CMD_READ_MTT,
1235*4882a593Smuzhiyun .has_inbox = false,
1236*4882a593Smuzhiyun .has_outbox = true,
1237*4882a593Smuzhiyun .out_is_imm = false,
1238*4882a593Smuzhiyun .encode_slave_id = false,
1239*4882a593Smuzhiyun .verify = NULL,
1240*4882a593Smuzhiyun .wrapper = NULL
1241*4882a593Smuzhiyun },
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun .opcode = MLX4_CMD_WRITE_MTT,
1244*4882a593Smuzhiyun .has_inbox = true,
1245*4882a593Smuzhiyun .has_outbox = false,
1246*4882a593Smuzhiyun .out_is_imm = false,
1247*4882a593Smuzhiyun .encode_slave_id = false,
1248*4882a593Smuzhiyun .verify = NULL,
1249*4882a593Smuzhiyun .wrapper = mlx4_WRITE_MTT_wrapper
1250*4882a593Smuzhiyun },
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun .opcode = MLX4_CMD_SYNC_TPT,
1253*4882a593Smuzhiyun .has_inbox = true,
1254*4882a593Smuzhiyun .has_outbox = false,
1255*4882a593Smuzhiyun .out_is_imm = false,
1256*4882a593Smuzhiyun .encode_slave_id = false,
1257*4882a593Smuzhiyun .verify = NULL,
1258*4882a593Smuzhiyun .wrapper = NULL
1259*4882a593Smuzhiyun },
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun .opcode = MLX4_CMD_HW2SW_EQ,
1262*4882a593Smuzhiyun .has_inbox = false,
1263*4882a593Smuzhiyun .has_outbox = false,
1264*4882a593Smuzhiyun .out_is_imm = false,
1265*4882a593Smuzhiyun .encode_slave_id = true,
1266*4882a593Smuzhiyun .verify = NULL,
1267*4882a593Smuzhiyun .wrapper = mlx4_HW2SW_EQ_wrapper
1268*4882a593Smuzhiyun },
1269*4882a593Smuzhiyun {
1270*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_EQ,
1271*4882a593Smuzhiyun .has_inbox = false,
1272*4882a593Smuzhiyun .has_outbox = true,
1273*4882a593Smuzhiyun .out_is_imm = false,
1274*4882a593Smuzhiyun .encode_slave_id = true,
1275*4882a593Smuzhiyun .verify = NULL,
1276*4882a593Smuzhiyun .wrapper = mlx4_QUERY_EQ_wrapper
1277*4882a593Smuzhiyun },
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun .opcode = MLX4_CMD_SW2HW_CQ,
1280*4882a593Smuzhiyun .has_inbox = true,
1281*4882a593Smuzhiyun .has_outbox = false,
1282*4882a593Smuzhiyun .out_is_imm = false,
1283*4882a593Smuzhiyun .encode_slave_id = true,
1284*4882a593Smuzhiyun .verify = NULL,
1285*4882a593Smuzhiyun .wrapper = mlx4_SW2HW_CQ_wrapper
1286*4882a593Smuzhiyun },
1287*4882a593Smuzhiyun {
1288*4882a593Smuzhiyun .opcode = MLX4_CMD_HW2SW_CQ,
1289*4882a593Smuzhiyun .has_inbox = false,
1290*4882a593Smuzhiyun .has_outbox = false,
1291*4882a593Smuzhiyun .out_is_imm = false,
1292*4882a593Smuzhiyun .encode_slave_id = false,
1293*4882a593Smuzhiyun .verify = NULL,
1294*4882a593Smuzhiyun .wrapper = mlx4_HW2SW_CQ_wrapper
1295*4882a593Smuzhiyun },
1296*4882a593Smuzhiyun {
1297*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_CQ,
1298*4882a593Smuzhiyun .has_inbox = false,
1299*4882a593Smuzhiyun .has_outbox = true,
1300*4882a593Smuzhiyun .out_is_imm = false,
1301*4882a593Smuzhiyun .encode_slave_id = false,
1302*4882a593Smuzhiyun .verify = NULL,
1303*4882a593Smuzhiyun .wrapper = mlx4_QUERY_CQ_wrapper
1304*4882a593Smuzhiyun },
1305*4882a593Smuzhiyun {
1306*4882a593Smuzhiyun .opcode = MLX4_CMD_MODIFY_CQ,
1307*4882a593Smuzhiyun .has_inbox = true,
1308*4882a593Smuzhiyun .has_outbox = false,
1309*4882a593Smuzhiyun .out_is_imm = true,
1310*4882a593Smuzhiyun .encode_slave_id = false,
1311*4882a593Smuzhiyun .verify = NULL,
1312*4882a593Smuzhiyun .wrapper = mlx4_MODIFY_CQ_wrapper
1313*4882a593Smuzhiyun },
1314*4882a593Smuzhiyun {
1315*4882a593Smuzhiyun .opcode = MLX4_CMD_SW2HW_SRQ,
1316*4882a593Smuzhiyun .has_inbox = true,
1317*4882a593Smuzhiyun .has_outbox = false,
1318*4882a593Smuzhiyun .out_is_imm = false,
1319*4882a593Smuzhiyun .encode_slave_id = true,
1320*4882a593Smuzhiyun .verify = NULL,
1321*4882a593Smuzhiyun .wrapper = mlx4_SW2HW_SRQ_wrapper
1322*4882a593Smuzhiyun },
1323*4882a593Smuzhiyun {
1324*4882a593Smuzhiyun .opcode = MLX4_CMD_HW2SW_SRQ,
1325*4882a593Smuzhiyun .has_inbox = false,
1326*4882a593Smuzhiyun .has_outbox = false,
1327*4882a593Smuzhiyun .out_is_imm = false,
1328*4882a593Smuzhiyun .encode_slave_id = false,
1329*4882a593Smuzhiyun .verify = NULL,
1330*4882a593Smuzhiyun .wrapper = mlx4_HW2SW_SRQ_wrapper
1331*4882a593Smuzhiyun },
1332*4882a593Smuzhiyun {
1333*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_SRQ,
1334*4882a593Smuzhiyun .has_inbox = false,
1335*4882a593Smuzhiyun .has_outbox = true,
1336*4882a593Smuzhiyun .out_is_imm = false,
1337*4882a593Smuzhiyun .encode_slave_id = false,
1338*4882a593Smuzhiyun .verify = NULL,
1339*4882a593Smuzhiyun .wrapper = mlx4_QUERY_SRQ_wrapper
1340*4882a593Smuzhiyun },
1341*4882a593Smuzhiyun {
1342*4882a593Smuzhiyun .opcode = MLX4_CMD_ARM_SRQ,
1343*4882a593Smuzhiyun .has_inbox = false,
1344*4882a593Smuzhiyun .has_outbox = false,
1345*4882a593Smuzhiyun .out_is_imm = false,
1346*4882a593Smuzhiyun .encode_slave_id = false,
1347*4882a593Smuzhiyun .verify = NULL,
1348*4882a593Smuzhiyun .wrapper = mlx4_ARM_SRQ_wrapper
1349*4882a593Smuzhiyun },
1350*4882a593Smuzhiyun {
1351*4882a593Smuzhiyun .opcode = MLX4_CMD_RST2INIT_QP,
1352*4882a593Smuzhiyun .has_inbox = true,
1353*4882a593Smuzhiyun .has_outbox = false,
1354*4882a593Smuzhiyun .out_is_imm = false,
1355*4882a593Smuzhiyun .encode_slave_id = true,
1356*4882a593Smuzhiyun .verify = NULL,
1357*4882a593Smuzhiyun .wrapper = mlx4_RST2INIT_QP_wrapper
1358*4882a593Smuzhiyun },
1359*4882a593Smuzhiyun {
1360*4882a593Smuzhiyun .opcode = MLX4_CMD_INIT2INIT_QP,
1361*4882a593Smuzhiyun .has_inbox = true,
1362*4882a593Smuzhiyun .has_outbox = false,
1363*4882a593Smuzhiyun .out_is_imm = false,
1364*4882a593Smuzhiyun .encode_slave_id = false,
1365*4882a593Smuzhiyun .verify = NULL,
1366*4882a593Smuzhiyun .wrapper = mlx4_INIT2INIT_QP_wrapper
1367*4882a593Smuzhiyun },
1368*4882a593Smuzhiyun {
1369*4882a593Smuzhiyun .opcode = MLX4_CMD_INIT2RTR_QP,
1370*4882a593Smuzhiyun .has_inbox = true,
1371*4882a593Smuzhiyun .has_outbox = false,
1372*4882a593Smuzhiyun .out_is_imm = false,
1373*4882a593Smuzhiyun .encode_slave_id = false,
1374*4882a593Smuzhiyun .verify = NULL,
1375*4882a593Smuzhiyun .wrapper = mlx4_INIT2RTR_QP_wrapper
1376*4882a593Smuzhiyun },
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun .opcode = MLX4_CMD_RTR2RTS_QP,
1379*4882a593Smuzhiyun .has_inbox = true,
1380*4882a593Smuzhiyun .has_outbox = false,
1381*4882a593Smuzhiyun .out_is_imm = false,
1382*4882a593Smuzhiyun .encode_slave_id = false,
1383*4882a593Smuzhiyun .verify = NULL,
1384*4882a593Smuzhiyun .wrapper = mlx4_RTR2RTS_QP_wrapper
1385*4882a593Smuzhiyun },
1386*4882a593Smuzhiyun {
1387*4882a593Smuzhiyun .opcode = MLX4_CMD_RTS2RTS_QP,
1388*4882a593Smuzhiyun .has_inbox = true,
1389*4882a593Smuzhiyun .has_outbox = false,
1390*4882a593Smuzhiyun .out_is_imm = false,
1391*4882a593Smuzhiyun .encode_slave_id = false,
1392*4882a593Smuzhiyun .verify = NULL,
1393*4882a593Smuzhiyun .wrapper = mlx4_RTS2RTS_QP_wrapper
1394*4882a593Smuzhiyun },
1395*4882a593Smuzhiyun {
1396*4882a593Smuzhiyun .opcode = MLX4_CMD_SQERR2RTS_QP,
1397*4882a593Smuzhiyun .has_inbox = true,
1398*4882a593Smuzhiyun .has_outbox = false,
1399*4882a593Smuzhiyun .out_is_imm = false,
1400*4882a593Smuzhiyun .encode_slave_id = false,
1401*4882a593Smuzhiyun .verify = NULL,
1402*4882a593Smuzhiyun .wrapper = mlx4_SQERR2RTS_QP_wrapper
1403*4882a593Smuzhiyun },
1404*4882a593Smuzhiyun {
1405*4882a593Smuzhiyun .opcode = MLX4_CMD_2ERR_QP,
1406*4882a593Smuzhiyun .has_inbox = false,
1407*4882a593Smuzhiyun .has_outbox = false,
1408*4882a593Smuzhiyun .out_is_imm = false,
1409*4882a593Smuzhiyun .encode_slave_id = false,
1410*4882a593Smuzhiyun .verify = NULL,
1411*4882a593Smuzhiyun .wrapper = mlx4_GEN_QP_wrapper
1412*4882a593Smuzhiyun },
1413*4882a593Smuzhiyun {
1414*4882a593Smuzhiyun .opcode = MLX4_CMD_RTS2SQD_QP,
1415*4882a593Smuzhiyun .has_inbox = false,
1416*4882a593Smuzhiyun .has_outbox = false,
1417*4882a593Smuzhiyun .out_is_imm = false,
1418*4882a593Smuzhiyun .encode_slave_id = false,
1419*4882a593Smuzhiyun .verify = NULL,
1420*4882a593Smuzhiyun .wrapper = mlx4_GEN_QP_wrapper
1421*4882a593Smuzhiyun },
1422*4882a593Smuzhiyun {
1423*4882a593Smuzhiyun .opcode = MLX4_CMD_SQD2SQD_QP,
1424*4882a593Smuzhiyun .has_inbox = true,
1425*4882a593Smuzhiyun .has_outbox = false,
1426*4882a593Smuzhiyun .out_is_imm = false,
1427*4882a593Smuzhiyun .encode_slave_id = false,
1428*4882a593Smuzhiyun .verify = NULL,
1429*4882a593Smuzhiyun .wrapper = mlx4_SQD2SQD_QP_wrapper
1430*4882a593Smuzhiyun },
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun .opcode = MLX4_CMD_SQD2RTS_QP,
1433*4882a593Smuzhiyun .has_inbox = true,
1434*4882a593Smuzhiyun .has_outbox = false,
1435*4882a593Smuzhiyun .out_is_imm = false,
1436*4882a593Smuzhiyun .encode_slave_id = false,
1437*4882a593Smuzhiyun .verify = NULL,
1438*4882a593Smuzhiyun .wrapper = mlx4_SQD2RTS_QP_wrapper
1439*4882a593Smuzhiyun },
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun .opcode = MLX4_CMD_2RST_QP,
1442*4882a593Smuzhiyun .has_inbox = false,
1443*4882a593Smuzhiyun .has_outbox = false,
1444*4882a593Smuzhiyun .out_is_imm = false,
1445*4882a593Smuzhiyun .encode_slave_id = false,
1446*4882a593Smuzhiyun .verify = NULL,
1447*4882a593Smuzhiyun .wrapper = mlx4_2RST_QP_wrapper
1448*4882a593Smuzhiyun },
1449*4882a593Smuzhiyun {
1450*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_QP,
1451*4882a593Smuzhiyun .has_inbox = false,
1452*4882a593Smuzhiyun .has_outbox = true,
1453*4882a593Smuzhiyun .out_is_imm = false,
1454*4882a593Smuzhiyun .encode_slave_id = false,
1455*4882a593Smuzhiyun .verify = NULL,
1456*4882a593Smuzhiyun .wrapper = mlx4_GEN_QP_wrapper
1457*4882a593Smuzhiyun },
1458*4882a593Smuzhiyun {
1459*4882a593Smuzhiyun .opcode = MLX4_CMD_SUSPEND_QP,
1460*4882a593Smuzhiyun .has_inbox = false,
1461*4882a593Smuzhiyun .has_outbox = false,
1462*4882a593Smuzhiyun .out_is_imm = false,
1463*4882a593Smuzhiyun .encode_slave_id = false,
1464*4882a593Smuzhiyun .verify = NULL,
1465*4882a593Smuzhiyun .wrapper = mlx4_GEN_QP_wrapper
1466*4882a593Smuzhiyun },
1467*4882a593Smuzhiyun {
1468*4882a593Smuzhiyun .opcode = MLX4_CMD_UNSUSPEND_QP,
1469*4882a593Smuzhiyun .has_inbox = false,
1470*4882a593Smuzhiyun .has_outbox = false,
1471*4882a593Smuzhiyun .out_is_imm = false,
1472*4882a593Smuzhiyun .encode_slave_id = false,
1473*4882a593Smuzhiyun .verify = NULL,
1474*4882a593Smuzhiyun .wrapper = mlx4_GEN_QP_wrapper
1475*4882a593Smuzhiyun },
1476*4882a593Smuzhiyun {
1477*4882a593Smuzhiyun .opcode = MLX4_CMD_UPDATE_QP,
1478*4882a593Smuzhiyun .has_inbox = true,
1479*4882a593Smuzhiyun .has_outbox = false,
1480*4882a593Smuzhiyun .out_is_imm = false,
1481*4882a593Smuzhiyun .encode_slave_id = false,
1482*4882a593Smuzhiyun .verify = NULL,
1483*4882a593Smuzhiyun .wrapper = mlx4_UPDATE_QP_wrapper
1484*4882a593Smuzhiyun },
1485*4882a593Smuzhiyun {
1486*4882a593Smuzhiyun .opcode = MLX4_CMD_GET_OP_REQ,
1487*4882a593Smuzhiyun .has_inbox = false,
1488*4882a593Smuzhiyun .has_outbox = false,
1489*4882a593Smuzhiyun .out_is_imm = false,
1490*4882a593Smuzhiyun .encode_slave_id = false,
1491*4882a593Smuzhiyun .verify = NULL,
1492*4882a593Smuzhiyun .wrapper = mlx4_CMD_EPERM_wrapper,
1493*4882a593Smuzhiyun },
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun .opcode = MLX4_CMD_ALLOCATE_VPP,
1496*4882a593Smuzhiyun .has_inbox = false,
1497*4882a593Smuzhiyun .has_outbox = true,
1498*4882a593Smuzhiyun .out_is_imm = false,
1499*4882a593Smuzhiyun .encode_slave_id = false,
1500*4882a593Smuzhiyun .verify = NULL,
1501*4882a593Smuzhiyun .wrapper = mlx4_CMD_EPERM_wrapper,
1502*4882a593Smuzhiyun },
1503*4882a593Smuzhiyun {
1504*4882a593Smuzhiyun .opcode = MLX4_CMD_SET_VPORT_QOS,
1505*4882a593Smuzhiyun .has_inbox = false,
1506*4882a593Smuzhiyun .has_outbox = true,
1507*4882a593Smuzhiyun .out_is_imm = false,
1508*4882a593Smuzhiyun .encode_slave_id = false,
1509*4882a593Smuzhiyun .verify = NULL,
1510*4882a593Smuzhiyun .wrapper = mlx4_CMD_EPERM_wrapper,
1511*4882a593Smuzhiyun },
1512*4882a593Smuzhiyun {
1513*4882a593Smuzhiyun .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1514*4882a593Smuzhiyun .has_inbox = false,
1515*4882a593Smuzhiyun .has_outbox = false,
1516*4882a593Smuzhiyun .out_is_imm = false,
1517*4882a593Smuzhiyun .encode_slave_id = false,
1518*4882a593Smuzhiyun .verify = NULL, /* XXX verify: only demux can do this */
1519*4882a593Smuzhiyun .wrapper = NULL
1520*4882a593Smuzhiyun },
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun .opcode = MLX4_CMD_MAD_IFC,
1523*4882a593Smuzhiyun .has_inbox = true,
1524*4882a593Smuzhiyun .has_outbox = true,
1525*4882a593Smuzhiyun .out_is_imm = false,
1526*4882a593Smuzhiyun .encode_slave_id = false,
1527*4882a593Smuzhiyun .verify = NULL,
1528*4882a593Smuzhiyun .wrapper = mlx4_MAD_IFC_wrapper
1529*4882a593Smuzhiyun },
1530*4882a593Smuzhiyun {
1531*4882a593Smuzhiyun .opcode = MLX4_CMD_MAD_DEMUX,
1532*4882a593Smuzhiyun .has_inbox = false,
1533*4882a593Smuzhiyun .has_outbox = false,
1534*4882a593Smuzhiyun .out_is_imm = false,
1535*4882a593Smuzhiyun .encode_slave_id = false,
1536*4882a593Smuzhiyun .verify = NULL,
1537*4882a593Smuzhiyun .wrapper = mlx4_CMD_EPERM_wrapper
1538*4882a593Smuzhiyun },
1539*4882a593Smuzhiyun {
1540*4882a593Smuzhiyun .opcode = MLX4_CMD_QUERY_IF_STAT,
1541*4882a593Smuzhiyun .has_inbox = false,
1542*4882a593Smuzhiyun .has_outbox = true,
1543*4882a593Smuzhiyun .out_is_imm = false,
1544*4882a593Smuzhiyun .encode_slave_id = false,
1545*4882a593Smuzhiyun .verify = NULL,
1546*4882a593Smuzhiyun .wrapper = mlx4_QUERY_IF_STAT_wrapper
1547*4882a593Smuzhiyun },
1548*4882a593Smuzhiyun {
1549*4882a593Smuzhiyun .opcode = MLX4_CMD_ACCESS_REG,
1550*4882a593Smuzhiyun .has_inbox = true,
1551*4882a593Smuzhiyun .has_outbox = true,
1552*4882a593Smuzhiyun .out_is_imm = false,
1553*4882a593Smuzhiyun .encode_slave_id = false,
1554*4882a593Smuzhiyun .verify = NULL,
1555*4882a593Smuzhiyun .wrapper = mlx4_ACCESS_REG_wrapper,
1556*4882a593Smuzhiyun },
1557*4882a593Smuzhiyun {
1558*4882a593Smuzhiyun .opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
1559*4882a593Smuzhiyun .has_inbox = false,
1560*4882a593Smuzhiyun .has_outbox = false,
1561*4882a593Smuzhiyun .out_is_imm = false,
1562*4882a593Smuzhiyun .encode_slave_id = false,
1563*4882a593Smuzhiyun .verify = NULL,
1564*4882a593Smuzhiyun .wrapper = mlx4_CMD_EPERM_wrapper,
1565*4882a593Smuzhiyun },
1566*4882a593Smuzhiyun /* Native multicast commands are not available for guests */
1567*4882a593Smuzhiyun {
1568*4882a593Smuzhiyun .opcode = MLX4_CMD_QP_ATTACH,
1569*4882a593Smuzhiyun .has_inbox = true,
1570*4882a593Smuzhiyun .has_outbox = false,
1571*4882a593Smuzhiyun .out_is_imm = false,
1572*4882a593Smuzhiyun .encode_slave_id = false,
1573*4882a593Smuzhiyun .verify = NULL,
1574*4882a593Smuzhiyun .wrapper = mlx4_QP_ATTACH_wrapper
1575*4882a593Smuzhiyun },
1576*4882a593Smuzhiyun {
1577*4882a593Smuzhiyun .opcode = MLX4_CMD_PROMISC,
1578*4882a593Smuzhiyun .has_inbox = false,
1579*4882a593Smuzhiyun .has_outbox = false,
1580*4882a593Smuzhiyun .out_is_imm = false,
1581*4882a593Smuzhiyun .encode_slave_id = false,
1582*4882a593Smuzhiyun .verify = NULL,
1583*4882a593Smuzhiyun .wrapper = mlx4_PROMISC_wrapper
1584*4882a593Smuzhiyun },
1585*4882a593Smuzhiyun /* Ethernet specific commands */
1586*4882a593Smuzhiyun {
1587*4882a593Smuzhiyun .opcode = MLX4_CMD_SET_VLAN_FLTR,
1588*4882a593Smuzhiyun .has_inbox = true,
1589*4882a593Smuzhiyun .has_outbox = false,
1590*4882a593Smuzhiyun .out_is_imm = false,
1591*4882a593Smuzhiyun .encode_slave_id = false,
1592*4882a593Smuzhiyun .verify = NULL,
1593*4882a593Smuzhiyun .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1594*4882a593Smuzhiyun },
1595*4882a593Smuzhiyun {
1596*4882a593Smuzhiyun .opcode = MLX4_CMD_SET_MCAST_FLTR,
1597*4882a593Smuzhiyun .has_inbox = false,
1598*4882a593Smuzhiyun .has_outbox = false,
1599*4882a593Smuzhiyun .out_is_imm = false,
1600*4882a593Smuzhiyun .encode_slave_id = false,
1601*4882a593Smuzhiyun .verify = NULL,
1602*4882a593Smuzhiyun .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1603*4882a593Smuzhiyun },
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun .opcode = MLX4_CMD_DUMP_ETH_STATS,
1606*4882a593Smuzhiyun .has_inbox = false,
1607*4882a593Smuzhiyun .has_outbox = true,
1608*4882a593Smuzhiyun .out_is_imm = false,
1609*4882a593Smuzhiyun .encode_slave_id = false,
1610*4882a593Smuzhiyun .verify = NULL,
1611*4882a593Smuzhiyun .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1612*4882a593Smuzhiyun },
1613*4882a593Smuzhiyun {
1614*4882a593Smuzhiyun .opcode = MLX4_CMD_INFORM_FLR_DONE,
1615*4882a593Smuzhiyun .has_inbox = false,
1616*4882a593Smuzhiyun .has_outbox = false,
1617*4882a593Smuzhiyun .out_is_imm = false,
1618*4882a593Smuzhiyun .encode_slave_id = false,
1619*4882a593Smuzhiyun .verify = NULL,
1620*4882a593Smuzhiyun .wrapper = NULL
1621*4882a593Smuzhiyun },
1622*4882a593Smuzhiyun /* flow steering commands */
1623*4882a593Smuzhiyun {
1624*4882a593Smuzhiyun .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1625*4882a593Smuzhiyun .has_inbox = true,
1626*4882a593Smuzhiyun .has_outbox = false,
1627*4882a593Smuzhiyun .out_is_imm = true,
1628*4882a593Smuzhiyun .encode_slave_id = false,
1629*4882a593Smuzhiyun .verify = NULL,
1630*4882a593Smuzhiyun .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1631*4882a593Smuzhiyun },
1632*4882a593Smuzhiyun {
1633*4882a593Smuzhiyun .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1634*4882a593Smuzhiyun .has_inbox = false,
1635*4882a593Smuzhiyun .has_outbox = false,
1636*4882a593Smuzhiyun .out_is_imm = false,
1637*4882a593Smuzhiyun .encode_slave_id = false,
1638*4882a593Smuzhiyun .verify = NULL,
1639*4882a593Smuzhiyun .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1640*4882a593Smuzhiyun },
1641*4882a593Smuzhiyun {
1642*4882a593Smuzhiyun .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1643*4882a593Smuzhiyun .has_inbox = false,
1644*4882a593Smuzhiyun .has_outbox = false,
1645*4882a593Smuzhiyun .out_is_imm = false,
1646*4882a593Smuzhiyun .encode_slave_id = false,
1647*4882a593Smuzhiyun .verify = NULL,
1648*4882a593Smuzhiyun .wrapper = mlx4_CMD_EPERM_wrapper
1649*4882a593Smuzhiyun },
1650*4882a593Smuzhiyun {
1651*4882a593Smuzhiyun .opcode = MLX4_CMD_VIRT_PORT_MAP,
1652*4882a593Smuzhiyun .has_inbox = false,
1653*4882a593Smuzhiyun .has_outbox = false,
1654*4882a593Smuzhiyun .out_is_imm = false,
1655*4882a593Smuzhiyun .encode_slave_id = false,
1656*4882a593Smuzhiyun .verify = NULL,
1657*4882a593Smuzhiyun .wrapper = mlx4_CMD_EPERM_wrapper
1658*4882a593Smuzhiyun },
1659*4882a593Smuzhiyun };
1660*4882a593Smuzhiyun
mlx4_master_process_vhcr(struct mlx4_dev * dev,int slave,struct mlx4_vhcr_cmd * in_vhcr)1661*4882a593Smuzhiyun static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1662*4882a593Smuzhiyun struct mlx4_vhcr_cmd *in_vhcr)
1663*4882a593Smuzhiyun {
1664*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
1665*4882a593Smuzhiyun struct mlx4_cmd_info *cmd = NULL;
1666*4882a593Smuzhiyun struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1667*4882a593Smuzhiyun struct mlx4_vhcr *vhcr;
1668*4882a593Smuzhiyun struct mlx4_cmd_mailbox *inbox = NULL;
1669*4882a593Smuzhiyun struct mlx4_cmd_mailbox *outbox = NULL;
1670*4882a593Smuzhiyun u64 in_param;
1671*4882a593Smuzhiyun u64 out_param;
1672*4882a593Smuzhiyun int ret = 0;
1673*4882a593Smuzhiyun int i;
1674*4882a593Smuzhiyun int err = 0;
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun /* Create sw representation of Virtual HCR */
1677*4882a593Smuzhiyun vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1678*4882a593Smuzhiyun if (!vhcr)
1679*4882a593Smuzhiyun return -ENOMEM;
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun /* DMA in the vHCR */
1682*4882a593Smuzhiyun if (!in_vhcr) {
1683*4882a593Smuzhiyun ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1684*4882a593Smuzhiyun priv->mfunc.master.slave_state[slave].vhcr_dma,
1685*4882a593Smuzhiyun ALIGN(sizeof(struct mlx4_vhcr_cmd),
1686*4882a593Smuzhiyun MLX4_ACCESS_MEM_ALIGN), 1);
1687*4882a593Smuzhiyun if (ret) {
1688*4882a593Smuzhiyun if (!(dev->persist->state &
1689*4882a593Smuzhiyun MLX4_DEVICE_STATE_INTERNAL_ERROR))
1690*4882a593Smuzhiyun mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1691*4882a593Smuzhiyun __func__, ret);
1692*4882a593Smuzhiyun kfree(vhcr);
1693*4882a593Smuzhiyun return ret;
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun /* Fill SW VHCR fields */
1698*4882a593Smuzhiyun vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1699*4882a593Smuzhiyun vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1700*4882a593Smuzhiyun vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1701*4882a593Smuzhiyun vhcr->token = be16_to_cpu(vhcr_cmd->token);
1702*4882a593Smuzhiyun vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1703*4882a593Smuzhiyun vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1704*4882a593Smuzhiyun vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun /* Lookup command */
1707*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1708*4882a593Smuzhiyun if (vhcr->op == cmd_info[i].opcode) {
1709*4882a593Smuzhiyun cmd = &cmd_info[i];
1710*4882a593Smuzhiyun break;
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun if (!cmd) {
1714*4882a593Smuzhiyun mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1715*4882a593Smuzhiyun vhcr->op, slave);
1716*4882a593Smuzhiyun vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1717*4882a593Smuzhiyun goto out_status;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun /* Read inbox */
1721*4882a593Smuzhiyun if (cmd->has_inbox) {
1722*4882a593Smuzhiyun vhcr->in_param &= INBOX_MASK;
1723*4882a593Smuzhiyun inbox = mlx4_alloc_cmd_mailbox(dev);
1724*4882a593Smuzhiyun if (IS_ERR(inbox)) {
1725*4882a593Smuzhiyun vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1726*4882a593Smuzhiyun inbox = NULL;
1727*4882a593Smuzhiyun goto out_status;
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1731*4882a593Smuzhiyun vhcr->in_param,
1732*4882a593Smuzhiyun MLX4_MAILBOX_SIZE, 1);
1733*4882a593Smuzhiyun if (ret) {
1734*4882a593Smuzhiyun if (!(dev->persist->state &
1735*4882a593Smuzhiyun MLX4_DEVICE_STATE_INTERNAL_ERROR))
1736*4882a593Smuzhiyun mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1737*4882a593Smuzhiyun __func__, cmd->opcode);
1738*4882a593Smuzhiyun vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1739*4882a593Smuzhiyun goto out_status;
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun /* Apply permission and bound checks if applicable */
1744*4882a593Smuzhiyun if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1745*4882a593Smuzhiyun mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1746*4882a593Smuzhiyun vhcr->op, slave, vhcr->in_modifier);
1747*4882a593Smuzhiyun vhcr_cmd->status = CMD_STAT_BAD_OP;
1748*4882a593Smuzhiyun goto out_status;
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun /* Allocate outbox */
1752*4882a593Smuzhiyun if (cmd->has_outbox) {
1753*4882a593Smuzhiyun outbox = mlx4_alloc_cmd_mailbox(dev);
1754*4882a593Smuzhiyun if (IS_ERR(outbox)) {
1755*4882a593Smuzhiyun vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1756*4882a593Smuzhiyun outbox = NULL;
1757*4882a593Smuzhiyun goto out_status;
1758*4882a593Smuzhiyun }
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun /* Execute the command! */
1762*4882a593Smuzhiyun if (cmd->wrapper) {
1763*4882a593Smuzhiyun err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1764*4882a593Smuzhiyun cmd);
1765*4882a593Smuzhiyun if (cmd->out_is_imm)
1766*4882a593Smuzhiyun vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1767*4882a593Smuzhiyun } else {
1768*4882a593Smuzhiyun in_param = cmd->has_inbox ? (u64) inbox->dma :
1769*4882a593Smuzhiyun vhcr->in_param;
1770*4882a593Smuzhiyun out_param = cmd->has_outbox ? (u64) outbox->dma :
1771*4882a593Smuzhiyun vhcr->out_param;
1772*4882a593Smuzhiyun err = __mlx4_cmd(dev, in_param, &out_param,
1773*4882a593Smuzhiyun cmd->out_is_imm, vhcr->in_modifier,
1774*4882a593Smuzhiyun vhcr->op_modifier, vhcr->op,
1775*4882a593Smuzhiyun MLX4_CMD_TIME_CLASS_A,
1776*4882a593Smuzhiyun MLX4_CMD_NATIVE);
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun if (cmd->out_is_imm) {
1779*4882a593Smuzhiyun vhcr->out_param = out_param;
1780*4882a593Smuzhiyun vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun if (err) {
1785*4882a593Smuzhiyun if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
1786*4882a593Smuzhiyun if (vhcr->op == MLX4_CMD_ALLOC_RES &&
1787*4882a593Smuzhiyun (vhcr->in_modifier & 0xff) == RES_COUNTER &&
1788*4882a593Smuzhiyun err == -EDQUOT)
1789*4882a593Smuzhiyun mlx4_dbg(dev,
1790*4882a593Smuzhiyun "Unable to allocate counter for slave %d (%d)\n",
1791*4882a593Smuzhiyun slave, err);
1792*4882a593Smuzhiyun else
1793*4882a593Smuzhiyun mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1794*4882a593Smuzhiyun vhcr->op, slave, vhcr->errno, err);
1795*4882a593Smuzhiyun }
1796*4882a593Smuzhiyun vhcr_cmd->status = mlx4_errno_to_status(err);
1797*4882a593Smuzhiyun goto out_status;
1798*4882a593Smuzhiyun }
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun /* Write outbox if command completed successfully */
1802*4882a593Smuzhiyun if (cmd->has_outbox && !vhcr_cmd->status) {
1803*4882a593Smuzhiyun ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1804*4882a593Smuzhiyun vhcr->out_param,
1805*4882a593Smuzhiyun MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1806*4882a593Smuzhiyun if (ret) {
1807*4882a593Smuzhiyun /* If we failed to write back the outbox after the
1808*4882a593Smuzhiyun *command was successfully executed, we must fail this
1809*4882a593Smuzhiyun * slave, as it is now in undefined state */
1810*4882a593Smuzhiyun if (!(dev->persist->state &
1811*4882a593Smuzhiyun MLX4_DEVICE_STATE_INTERNAL_ERROR))
1812*4882a593Smuzhiyun mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1813*4882a593Smuzhiyun goto out;
1814*4882a593Smuzhiyun }
1815*4882a593Smuzhiyun }
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun out_status:
1818*4882a593Smuzhiyun /* DMA back vhcr result */
1819*4882a593Smuzhiyun if (!in_vhcr) {
1820*4882a593Smuzhiyun ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1821*4882a593Smuzhiyun priv->mfunc.master.slave_state[slave].vhcr_dma,
1822*4882a593Smuzhiyun ALIGN(sizeof(struct mlx4_vhcr),
1823*4882a593Smuzhiyun MLX4_ACCESS_MEM_ALIGN),
1824*4882a593Smuzhiyun MLX4_CMD_WRAPPED);
1825*4882a593Smuzhiyun if (ret)
1826*4882a593Smuzhiyun mlx4_err(dev, "%s:Failed writing vhcr result\n",
1827*4882a593Smuzhiyun __func__);
1828*4882a593Smuzhiyun else if (vhcr->e_bit &&
1829*4882a593Smuzhiyun mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1830*4882a593Smuzhiyun mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1831*4882a593Smuzhiyun slave);
1832*4882a593Smuzhiyun }
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun out:
1835*4882a593Smuzhiyun kfree(vhcr);
1836*4882a593Smuzhiyun mlx4_free_cmd_mailbox(dev, inbox);
1837*4882a593Smuzhiyun mlx4_free_cmd_mailbox(dev, outbox);
1838*4882a593Smuzhiyun return ret;
1839*4882a593Smuzhiyun }
1840*4882a593Smuzhiyun
mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv * priv,int slave,int port)1841*4882a593Smuzhiyun static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1842*4882a593Smuzhiyun int slave, int port)
1843*4882a593Smuzhiyun {
1844*4882a593Smuzhiyun struct mlx4_vport_oper_state *vp_oper;
1845*4882a593Smuzhiyun struct mlx4_vport_state *vp_admin;
1846*4882a593Smuzhiyun struct mlx4_vf_immed_vlan_work *work;
1847*4882a593Smuzhiyun struct mlx4_dev *dev = &(priv->dev);
1848*4882a593Smuzhiyun int err;
1849*4882a593Smuzhiyun int admin_vlan_ix = NO_INDX;
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1852*4882a593Smuzhiyun vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1855*4882a593Smuzhiyun vp_oper->state.default_qos == vp_admin->default_qos &&
1856*4882a593Smuzhiyun vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
1857*4882a593Smuzhiyun vp_oper->state.link_state == vp_admin->link_state &&
1858*4882a593Smuzhiyun vp_oper->state.qos_vport == vp_admin->qos_vport)
1859*4882a593Smuzhiyun return 0;
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun if (!(priv->mfunc.master.slave_state[slave].active &&
1862*4882a593Smuzhiyun dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1863*4882a593Smuzhiyun /* even if the UPDATE_QP command isn't supported, we still want
1864*4882a593Smuzhiyun * to set this VF link according to the admin directive
1865*4882a593Smuzhiyun */
1866*4882a593Smuzhiyun vp_oper->state.link_state = vp_admin->link_state;
1867*4882a593Smuzhiyun return -1;
1868*4882a593Smuzhiyun }
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1871*4882a593Smuzhiyun slave, port);
1872*4882a593Smuzhiyun mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1873*4882a593Smuzhiyun vp_admin->default_vlan, vp_admin->default_qos,
1874*4882a593Smuzhiyun vp_admin->link_state);
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun work = kzalloc(sizeof(*work), GFP_KERNEL);
1877*4882a593Smuzhiyun if (!work)
1878*4882a593Smuzhiyun return -ENOMEM;
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1881*4882a593Smuzhiyun if (MLX4_VGT != vp_admin->default_vlan) {
1882*4882a593Smuzhiyun err = __mlx4_register_vlan(&priv->dev, port,
1883*4882a593Smuzhiyun vp_admin->default_vlan,
1884*4882a593Smuzhiyun &admin_vlan_ix);
1885*4882a593Smuzhiyun if (err) {
1886*4882a593Smuzhiyun kfree(work);
1887*4882a593Smuzhiyun mlx4_warn(&priv->dev,
1888*4882a593Smuzhiyun "No vlan resources slave %d, port %d\n",
1889*4882a593Smuzhiyun slave, port);
1890*4882a593Smuzhiyun return err;
1891*4882a593Smuzhiyun }
1892*4882a593Smuzhiyun } else {
1893*4882a593Smuzhiyun admin_vlan_ix = NO_INDX;
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1896*4882a593Smuzhiyun mlx4_dbg(&priv->dev,
1897*4882a593Smuzhiyun "alloc vlan %d idx %d slave %d port %d\n",
1898*4882a593Smuzhiyun (int)(vp_admin->default_vlan),
1899*4882a593Smuzhiyun admin_vlan_ix, slave, port);
1900*4882a593Smuzhiyun }
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun /* save original vlan ix and vlan id */
1903*4882a593Smuzhiyun work->orig_vlan_id = vp_oper->state.default_vlan;
1904*4882a593Smuzhiyun work->orig_vlan_ix = vp_oper->vlan_idx;
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun /* handle new qos */
1907*4882a593Smuzhiyun if (vp_oper->state.default_qos != vp_admin->default_qos)
1908*4882a593Smuzhiyun work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1911*4882a593Smuzhiyun vp_oper->vlan_idx = admin_vlan_ix;
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun vp_oper->state.default_vlan = vp_admin->default_vlan;
1914*4882a593Smuzhiyun vp_oper->state.default_qos = vp_admin->default_qos;
1915*4882a593Smuzhiyun vp_oper->state.vlan_proto = vp_admin->vlan_proto;
1916*4882a593Smuzhiyun vp_oper->state.link_state = vp_admin->link_state;
1917*4882a593Smuzhiyun vp_oper->state.qos_vport = vp_admin->qos_vport;
1918*4882a593Smuzhiyun
1919*4882a593Smuzhiyun if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1920*4882a593Smuzhiyun work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun /* iterate over QPs owned by this slave, using UPDATE_QP */
1923*4882a593Smuzhiyun work->port = port;
1924*4882a593Smuzhiyun work->slave = slave;
1925*4882a593Smuzhiyun work->qos = vp_oper->state.default_qos;
1926*4882a593Smuzhiyun work->qos_vport = vp_oper->state.qos_vport;
1927*4882a593Smuzhiyun work->vlan_id = vp_oper->state.default_vlan;
1928*4882a593Smuzhiyun work->vlan_ix = vp_oper->vlan_idx;
1929*4882a593Smuzhiyun work->vlan_proto = vp_oper->state.vlan_proto;
1930*4882a593Smuzhiyun work->priv = priv;
1931*4882a593Smuzhiyun INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1932*4882a593Smuzhiyun queue_work(priv->mfunc.master.comm_wq, &work->work);
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun return 0;
1935*4882a593Smuzhiyun }
1936*4882a593Smuzhiyun
mlx4_set_default_port_qos(struct mlx4_dev * dev,int port)1937*4882a593Smuzhiyun static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
1938*4882a593Smuzhiyun {
1939*4882a593Smuzhiyun struct mlx4_qos_manager *port_qos_ctl;
1940*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
1943*4882a593Smuzhiyun bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun /* Enable only default prio at PF init routine */
1946*4882a593Smuzhiyun set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
1947*4882a593Smuzhiyun }
1948*4882a593Smuzhiyun
mlx4_allocate_port_vpps(struct mlx4_dev * dev,int port)1949*4882a593Smuzhiyun static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
1950*4882a593Smuzhiyun {
1951*4882a593Smuzhiyun int i;
1952*4882a593Smuzhiyun int err;
1953*4882a593Smuzhiyun int num_vfs;
1954*4882a593Smuzhiyun u16 available_vpp;
1955*4882a593Smuzhiyun u8 vpp_param[MLX4_NUM_UP];
1956*4882a593Smuzhiyun struct mlx4_qos_manager *port_qos;
1957*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
1958*4882a593Smuzhiyun
1959*4882a593Smuzhiyun err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param);
1960*4882a593Smuzhiyun if (err) {
1961*4882a593Smuzhiyun mlx4_info(dev, "Failed query available VPPs\n");
1962*4882a593Smuzhiyun return;
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun port_qos = &priv->mfunc.master.qos_ctl[port];
1966*4882a593Smuzhiyun num_vfs = (available_vpp /
1967*4882a593Smuzhiyun bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun for (i = 0; i < MLX4_NUM_UP; i++) {
1970*4882a593Smuzhiyun if (test_bit(i, port_qos->priority_bm))
1971*4882a593Smuzhiyun vpp_param[i] = num_vfs;
1972*4882a593Smuzhiyun }
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
1975*4882a593Smuzhiyun if (err) {
1976*4882a593Smuzhiyun mlx4_info(dev, "Failed allocating VPPs\n");
1977*4882a593Smuzhiyun return;
1978*4882a593Smuzhiyun }
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun /* Query actual allocated VPP, just to make sure */
1981*4882a593Smuzhiyun err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param);
1982*4882a593Smuzhiyun if (err) {
1983*4882a593Smuzhiyun mlx4_info(dev, "Failed query available VPPs\n");
1984*4882a593Smuzhiyun return;
1985*4882a593Smuzhiyun }
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun port_qos->num_of_qos_vfs = num_vfs;
1988*4882a593Smuzhiyun mlx4_dbg(dev, "Port %d Available VPPs %d\n", port, available_vpp);
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun for (i = 0; i < MLX4_NUM_UP; i++)
1991*4882a593Smuzhiyun mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
1992*4882a593Smuzhiyun vpp_param[i]);
1993*4882a593Smuzhiyun }
1994*4882a593Smuzhiyun
mlx4_master_activate_admin_state(struct mlx4_priv * priv,int slave)1995*4882a593Smuzhiyun static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1996*4882a593Smuzhiyun {
1997*4882a593Smuzhiyun int port, err;
1998*4882a593Smuzhiyun struct mlx4_vport_state *vp_admin;
1999*4882a593Smuzhiyun struct mlx4_vport_oper_state *vp_oper;
2000*4882a593Smuzhiyun struct mlx4_slave_state *slave_state =
2001*4882a593Smuzhiyun &priv->mfunc.master.slave_state[slave];
2002*4882a593Smuzhiyun struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2003*4882a593Smuzhiyun &priv->dev, slave);
2004*4882a593Smuzhiyun int min_port = find_first_bit(actv_ports.ports,
2005*4882a593Smuzhiyun priv->dev.caps.num_ports) + 1;
2006*4882a593Smuzhiyun int max_port = min_port - 1 +
2007*4882a593Smuzhiyun bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun for (port = min_port; port <= max_port; port++) {
2010*4882a593Smuzhiyun if (!test_bit(port - 1, actv_ports.ports))
2011*4882a593Smuzhiyun continue;
2012*4882a593Smuzhiyun priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2013*4882a593Smuzhiyun priv->mfunc.master.vf_admin[slave].enable_smi[port];
2014*4882a593Smuzhiyun vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2015*4882a593Smuzhiyun vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2016*4882a593Smuzhiyun if (vp_admin->vlan_proto != htons(ETH_P_8021AD) ||
2017*4882a593Smuzhiyun slave_state->vst_qinq_supported) {
2018*4882a593Smuzhiyun vp_oper->state.vlan_proto = vp_admin->vlan_proto;
2019*4882a593Smuzhiyun vp_oper->state.default_vlan = vp_admin->default_vlan;
2020*4882a593Smuzhiyun vp_oper->state.default_qos = vp_admin->default_qos;
2021*4882a593Smuzhiyun }
2022*4882a593Smuzhiyun vp_oper->state.link_state = vp_admin->link_state;
2023*4882a593Smuzhiyun vp_oper->state.mac = vp_admin->mac;
2024*4882a593Smuzhiyun vp_oper->state.spoofchk = vp_admin->spoofchk;
2025*4882a593Smuzhiyun vp_oper->state.tx_rate = vp_admin->tx_rate;
2026*4882a593Smuzhiyun vp_oper->state.qos_vport = vp_admin->qos_vport;
2027*4882a593Smuzhiyun vp_oper->state.guid = vp_admin->guid;
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun if (MLX4_VGT != vp_admin->default_vlan) {
2030*4882a593Smuzhiyun err = __mlx4_register_vlan(&priv->dev, port,
2031*4882a593Smuzhiyun vp_admin->default_vlan, &(vp_oper->vlan_idx));
2032*4882a593Smuzhiyun if (err) {
2033*4882a593Smuzhiyun vp_oper->vlan_idx = NO_INDX;
2034*4882a593Smuzhiyun vp_oper->state.default_vlan = MLX4_VGT;
2035*4882a593Smuzhiyun vp_oper->state.vlan_proto = htons(ETH_P_8021Q);
2036*4882a593Smuzhiyun mlx4_warn(&priv->dev,
2037*4882a593Smuzhiyun "No vlan resources slave %d, port %d\n",
2038*4882a593Smuzhiyun slave, port);
2039*4882a593Smuzhiyun return err;
2040*4882a593Smuzhiyun }
2041*4882a593Smuzhiyun mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
2042*4882a593Smuzhiyun (int)(vp_oper->state.default_vlan),
2043*4882a593Smuzhiyun vp_oper->vlan_idx, slave, port);
2044*4882a593Smuzhiyun }
2045*4882a593Smuzhiyun if (vp_admin->spoofchk) {
2046*4882a593Smuzhiyun vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
2047*4882a593Smuzhiyun port,
2048*4882a593Smuzhiyun vp_admin->mac);
2049*4882a593Smuzhiyun if (0 > vp_oper->mac_idx) {
2050*4882a593Smuzhiyun err = vp_oper->mac_idx;
2051*4882a593Smuzhiyun vp_oper->mac_idx = NO_INDX;
2052*4882a593Smuzhiyun mlx4_warn(&priv->dev,
2053*4882a593Smuzhiyun "No mac resources slave %d, port %d\n",
2054*4882a593Smuzhiyun slave, port);
2055*4882a593Smuzhiyun return err;
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
2058*4882a593Smuzhiyun vp_oper->state.mac, vp_oper->mac_idx, slave, port);
2059*4882a593Smuzhiyun }
2060*4882a593Smuzhiyun }
2061*4882a593Smuzhiyun return 0;
2062*4882a593Smuzhiyun }
2063*4882a593Smuzhiyun
mlx4_master_deactivate_admin_state(struct mlx4_priv * priv,int slave)2064*4882a593Smuzhiyun static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
2065*4882a593Smuzhiyun {
2066*4882a593Smuzhiyun int port;
2067*4882a593Smuzhiyun struct mlx4_vport_oper_state *vp_oper;
2068*4882a593Smuzhiyun struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2069*4882a593Smuzhiyun &priv->dev, slave);
2070*4882a593Smuzhiyun int min_port = find_first_bit(actv_ports.ports,
2071*4882a593Smuzhiyun priv->dev.caps.num_ports) + 1;
2072*4882a593Smuzhiyun int max_port = min_port - 1 +
2073*4882a593Smuzhiyun bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun for (port = min_port; port <= max_port; port++) {
2077*4882a593Smuzhiyun if (!test_bit(port - 1, actv_ports.ports))
2078*4882a593Smuzhiyun continue;
2079*4882a593Smuzhiyun priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2080*4882a593Smuzhiyun MLX4_VF_SMI_DISABLED;
2081*4882a593Smuzhiyun vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2082*4882a593Smuzhiyun if (NO_INDX != vp_oper->vlan_idx) {
2083*4882a593Smuzhiyun __mlx4_unregister_vlan(&priv->dev,
2084*4882a593Smuzhiyun port, vp_oper->state.default_vlan);
2085*4882a593Smuzhiyun vp_oper->vlan_idx = NO_INDX;
2086*4882a593Smuzhiyun }
2087*4882a593Smuzhiyun if (NO_INDX != vp_oper->mac_idx) {
2088*4882a593Smuzhiyun __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
2089*4882a593Smuzhiyun vp_oper->mac_idx = NO_INDX;
2090*4882a593Smuzhiyun }
2091*4882a593Smuzhiyun }
2092*4882a593Smuzhiyun return;
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun
mlx4_master_do_cmd(struct mlx4_dev * dev,int slave,u8 cmd,u16 param,u8 toggle)2095*4882a593Smuzhiyun static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
2096*4882a593Smuzhiyun u16 param, u8 toggle)
2097*4882a593Smuzhiyun {
2098*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2099*4882a593Smuzhiyun struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2100*4882a593Smuzhiyun u32 reply;
2101*4882a593Smuzhiyun u8 is_going_down = 0;
2102*4882a593Smuzhiyun int i;
2103*4882a593Smuzhiyun unsigned long flags;
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun slave_state[slave].comm_toggle ^= 1;
2106*4882a593Smuzhiyun reply = (u32) slave_state[slave].comm_toggle << 31;
2107*4882a593Smuzhiyun if (toggle != slave_state[slave].comm_toggle) {
2108*4882a593Smuzhiyun mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2109*4882a593Smuzhiyun toggle, slave);
2110*4882a593Smuzhiyun goto reset_slave;
2111*4882a593Smuzhiyun }
2112*4882a593Smuzhiyun if (cmd == MLX4_COMM_CMD_RESET) {
2113*4882a593Smuzhiyun mlx4_warn(dev, "Received reset from slave:%d\n", slave);
2114*4882a593Smuzhiyun slave_state[slave].active = false;
2115*4882a593Smuzhiyun slave_state[slave].old_vlan_api = false;
2116*4882a593Smuzhiyun slave_state[slave].vst_qinq_supported = false;
2117*4882a593Smuzhiyun mlx4_master_deactivate_admin_state(priv, slave);
2118*4882a593Smuzhiyun for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
2119*4882a593Smuzhiyun slave_state[slave].event_eq[i].eqn = -1;
2120*4882a593Smuzhiyun slave_state[slave].event_eq[i].token = 0;
2121*4882a593Smuzhiyun }
2122*4882a593Smuzhiyun /*check if we are in the middle of FLR process,
2123*4882a593Smuzhiyun if so return "retry" status to the slave*/
2124*4882a593Smuzhiyun if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
2125*4882a593Smuzhiyun goto inform_slave_state;
2126*4882a593Smuzhiyun
2127*4882a593Smuzhiyun mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun /* write the version in the event field */
2130*4882a593Smuzhiyun reply |= mlx4_comm_get_version();
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun goto reset_slave;
2133*4882a593Smuzhiyun }
2134*4882a593Smuzhiyun /*command from slave in the middle of FLR*/
2135*4882a593Smuzhiyun if (cmd != MLX4_COMM_CMD_RESET &&
2136*4882a593Smuzhiyun MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
2137*4882a593Smuzhiyun mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2138*4882a593Smuzhiyun slave, cmd);
2139*4882a593Smuzhiyun return;
2140*4882a593Smuzhiyun }
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun switch (cmd) {
2143*4882a593Smuzhiyun case MLX4_COMM_CMD_VHCR0:
2144*4882a593Smuzhiyun if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
2145*4882a593Smuzhiyun goto reset_slave;
2146*4882a593Smuzhiyun slave_state[slave].vhcr_dma = ((u64) param) << 48;
2147*4882a593Smuzhiyun priv->mfunc.master.slave_state[slave].cookie = 0;
2148*4882a593Smuzhiyun break;
2149*4882a593Smuzhiyun case MLX4_COMM_CMD_VHCR1:
2150*4882a593Smuzhiyun if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
2151*4882a593Smuzhiyun goto reset_slave;
2152*4882a593Smuzhiyun slave_state[slave].vhcr_dma |= ((u64) param) << 32;
2153*4882a593Smuzhiyun break;
2154*4882a593Smuzhiyun case MLX4_COMM_CMD_VHCR2:
2155*4882a593Smuzhiyun if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
2156*4882a593Smuzhiyun goto reset_slave;
2157*4882a593Smuzhiyun slave_state[slave].vhcr_dma |= ((u64) param) << 16;
2158*4882a593Smuzhiyun break;
2159*4882a593Smuzhiyun case MLX4_COMM_CMD_VHCR_EN:
2160*4882a593Smuzhiyun if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
2161*4882a593Smuzhiyun goto reset_slave;
2162*4882a593Smuzhiyun slave_state[slave].vhcr_dma |= param;
2163*4882a593Smuzhiyun if (mlx4_master_activate_admin_state(priv, slave))
2164*4882a593Smuzhiyun goto reset_slave;
2165*4882a593Smuzhiyun slave_state[slave].active = true;
2166*4882a593Smuzhiyun mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
2167*4882a593Smuzhiyun break;
2168*4882a593Smuzhiyun case MLX4_COMM_CMD_VHCR_POST:
2169*4882a593Smuzhiyun if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
2170*4882a593Smuzhiyun (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2171*4882a593Smuzhiyun mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2172*4882a593Smuzhiyun slave, cmd, slave_state[slave].last_cmd);
2173*4882a593Smuzhiyun goto reset_slave;
2174*4882a593Smuzhiyun }
2175*4882a593Smuzhiyun
2176*4882a593Smuzhiyun mutex_lock(&priv->cmd.slave_cmd_mutex);
2177*4882a593Smuzhiyun if (mlx4_master_process_vhcr(dev, slave, NULL)) {
2178*4882a593Smuzhiyun mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
2179*4882a593Smuzhiyun slave);
2180*4882a593Smuzhiyun mutex_unlock(&priv->cmd.slave_cmd_mutex);
2181*4882a593Smuzhiyun goto reset_slave;
2182*4882a593Smuzhiyun }
2183*4882a593Smuzhiyun mutex_unlock(&priv->cmd.slave_cmd_mutex);
2184*4882a593Smuzhiyun break;
2185*4882a593Smuzhiyun default:
2186*4882a593Smuzhiyun mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
2187*4882a593Smuzhiyun goto reset_slave;
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2190*4882a593Smuzhiyun if (!slave_state[slave].is_slave_going_down)
2191*4882a593Smuzhiyun slave_state[slave].last_cmd = cmd;
2192*4882a593Smuzhiyun else
2193*4882a593Smuzhiyun is_going_down = 1;
2194*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2195*4882a593Smuzhiyun if (is_going_down) {
2196*4882a593Smuzhiyun mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
2197*4882a593Smuzhiyun cmd, slave);
2198*4882a593Smuzhiyun return;
2199*4882a593Smuzhiyun }
2200*4882a593Smuzhiyun __raw_writel((__force u32) cpu_to_be32(reply),
2201*4882a593Smuzhiyun &priv->mfunc.comm[slave].slave_read);
2202*4882a593Smuzhiyun
2203*4882a593Smuzhiyun return;
2204*4882a593Smuzhiyun
2205*4882a593Smuzhiyun reset_slave:
2206*4882a593Smuzhiyun /* cleanup any slave resources */
2207*4882a593Smuzhiyun if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2208*4882a593Smuzhiyun mlx4_delete_all_resources_for_slave(dev, slave);
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun if (cmd != MLX4_COMM_CMD_RESET) {
2211*4882a593Smuzhiyun mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2212*4882a593Smuzhiyun slave, cmd);
2213*4882a593Smuzhiyun /* Turn on internal error letting slave reset itself immeditaly,
2214*4882a593Smuzhiyun * otherwise it might take till timeout on command is passed
2215*4882a593Smuzhiyun */
2216*4882a593Smuzhiyun reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2217*4882a593Smuzhiyun }
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2220*4882a593Smuzhiyun if (!slave_state[slave].is_slave_going_down)
2221*4882a593Smuzhiyun slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2222*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2223*4882a593Smuzhiyun /*with slave in the middle of flr, no need to clean resources again.*/
2224*4882a593Smuzhiyun inform_slave_state:
2225*4882a593Smuzhiyun memset(&slave_state[slave].event_eq, 0,
2226*4882a593Smuzhiyun sizeof(struct mlx4_slave_event_eq_info));
2227*4882a593Smuzhiyun __raw_writel((__force u32) cpu_to_be32(reply),
2228*4882a593Smuzhiyun &priv->mfunc.comm[slave].slave_read);
2229*4882a593Smuzhiyun wmb();
2230*4882a593Smuzhiyun }
2231*4882a593Smuzhiyun
2232*4882a593Smuzhiyun /* master command processing */
mlx4_master_comm_channel(struct work_struct * work)2233*4882a593Smuzhiyun void mlx4_master_comm_channel(struct work_struct *work)
2234*4882a593Smuzhiyun {
2235*4882a593Smuzhiyun struct mlx4_mfunc_master_ctx *master =
2236*4882a593Smuzhiyun container_of(work,
2237*4882a593Smuzhiyun struct mlx4_mfunc_master_ctx,
2238*4882a593Smuzhiyun comm_work);
2239*4882a593Smuzhiyun struct mlx4_mfunc *mfunc =
2240*4882a593Smuzhiyun container_of(master, struct mlx4_mfunc, master);
2241*4882a593Smuzhiyun struct mlx4_priv *priv =
2242*4882a593Smuzhiyun container_of(mfunc, struct mlx4_priv, mfunc);
2243*4882a593Smuzhiyun struct mlx4_dev *dev = &priv->dev;
2244*4882a593Smuzhiyun __be32 *bit_vec;
2245*4882a593Smuzhiyun u32 comm_cmd;
2246*4882a593Smuzhiyun u32 vec;
2247*4882a593Smuzhiyun int i, j, slave;
2248*4882a593Smuzhiyun int toggle;
2249*4882a593Smuzhiyun int served = 0;
2250*4882a593Smuzhiyun int reported = 0;
2251*4882a593Smuzhiyun u32 slt;
2252*4882a593Smuzhiyun
2253*4882a593Smuzhiyun bit_vec = master->comm_arm_bit_vector;
2254*4882a593Smuzhiyun for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2255*4882a593Smuzhiyun vec = be32_to_cpu(bit_vec[i]);
2256*4882a593Smuzhiyun for (j = 0; j < 32; j++) {
2257*4882a593Smuzhiyun if (!(vec & (1 << j)))
2258*4882a593Smuzhiyun continue;
2259*4882a593Smuzhiyun ++reported;
2260*4882a593Smuzhiyun slave = (i * 32) + j;
2261*4882a593Smuzhiyun comm_cmd = swab32(readl(
2262*4882a593Smuzhiyun &mfunc->comm[slave].slave_write));
2263*4882a593Smuzhiyun slt = swab32(readl(&mfunc->comm[slave].slave_read))
2264*4882a593Smuzhiyun >> 31;
2265*4882a593Smuzhiyun toggle = comm_cmd >> 31;
2266*4882a593Smuzhiyun if (toggle != slt) {
2267*4882a593Smuzhiyun if (master->slave_state[slave].comm_toggle
2268*4882a593Smuzhiyun != slt) {
2269*4882a593Smuzhiyun pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2270*4882a593Smuzhiyun slave, slt,
2271*4882a593Smuzhiyun master->slave_state[slave].comm_toggle);
2272*4882a593Smuzhiyun master->slave_state[slave].comm_toggle =
2273*4882a593Smuzhiyun slt;
2274*4882a593Smuzhiyun }
2275*4882a593Smuzhiyun mlx4_master_do_cmd(dev, slave,
2276*4882a593Smuzhiyun comm_cmd >> 16 & 0xff,
2277*4882a593Smuzhiyun comm_cmd & 0xffff, toggle);
2278*4882a593Smuzhiyun ++served;
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun }
2281*4882a593Smuzhiyun }
2282*4882a593Smuzhiyun
2283*4882a593Smuzhiyun if (reported && reported != served)
2284*4882a593Smuzhiyun mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
2285*4882a593Smuzhiyun reported, served);
2286*4882a593Smuzhiyun
2287*4882a593Smuzhiyun if (mlx4_ARM_COMM_CHANNEL(dev))
2288*4882a593Smuzhiyun mlx4_warn(dev, "Failed to arm comm channel events\n");
2289*4882a593Smuzhiyun }
2290*4882a593Smuzhiyun
sync_toggles(struct mlx4_dev * dev)2291*4882a593Smuzhiyun static int sync_toggles(struct mlx4_dev *dev)
2292*4882a593Smuzhiyun {
2293*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2294*4882a593Smuzhiyun u32 wr_toggle;
2295*4882a593Smuzhiyun u32 rd_toggle;
2296*4882a593Smuzhiyun unsigned long end;
2297*4882a593Smuzhiyun
2298*4882a593Smuzhiyun wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2299*4882a593Smuzhiyun if (wr_toggle == 0xffffffff)
2300*4882a593Smuzhiyun end = jiffies + msecs_to_jiffies(30000);
2301*4882a593Smuzhiyun else
2302*4882a593Smuzhiyun end = jiffies + msecs_to_jiffies(5000);
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun while (time_before(jiffies, end)) {
2305*4882a593Smuzhiyun rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2306*4882a593Smuzhiyun if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2307*4882a593Smuzhiyun /* PCI might be offline */
2308*4882a593Smuzhiyun
2309*4882a593Smuzhiyun /* If device removal has been requested,
2310*4882a593Smuzhiyun * do not continue retrying.
2311*4882a593Smuzhiyun */
2312*4882a593Smuzhiyun if (dev->persist->interface_state &
2313*4882a593Smuzhiyun MLX4_INTERFACE_STATE_NOWAIT) {
2314*4882a593Smuzhiyun mlx4_warn(dev,
2315*4882a593Smuzhiyun "communication channel is offline\n");
2316*4882a593Smuzhiyun return -EIO;
2317*4882a593Smuzhiyun }
2318*4882a593Smuzhiyun
2319*4882a593Smuzhiyun msleep(100);
2320*4882a593Smuzhiyun wr_toggle = swab32(readl(&priv->mfunc.comm->
2321*4882a593Smuzhiyun slave_write));
2322*4882a593Smuzhiyun continue;
2323*4882a593Smuzhiyun }
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun if (rd_toggle >> 31 == wr_toggle >> 31) {
2326*4882a593Smuzhiyun priv->cmd.comm_toggle = rd_toggle >> 31;
2327*4882a593Smuzhiyun return 0;
2328*4882a593Smuzhiyun }
2329*4882a593Smuzhiyun
2330*4882a593Smuzhiyun cond_resched();
2331*4882a593Smuzhiyun }
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun /*
2334*4882a593Smuzhiyun * we could reach here if for example the previous VM using this
2335*4882a593Smuzhiyun * function misbehaved and left the channel with unsynced state. We
2336*4882a593Smuzhiyun * should fix this here and give this VM a chance to use a properly
2337*4882a593Smuzhiyun * synced channel
2338*4882a593Smuzhiyun */
2339*4882a593Smuzhiyun mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2340*4882a593Smuzhiyun __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2341*4882a593Smuzhiyun __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2342*4882a593Smuzhiyun priv->cmd.comm_toggle = 0;
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun return 0;
2345*4882a593Smuzhiyun }
2346*4882a593Smuzhiyun
mlx4_multi_func_init(struct mlx4_dev * dev)2347*4882a593Smuzhiyun int mlx4_multi_func_init(struct mlx4_dev *dev)
2348*4882a593Smuzhiyun {
2349*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2350*4882a593Smuzhiyun struct mlx4_slave_state *s_state;
2351*4882a593Smuzhiyun int i, j, err, port;
2352*4882a593Smuzhiyun
2353*4882a593Smuzhiyun if (mlx4_is_master(dev))
2354*4882a593Smuzhiyun priv->mfunc.comm =
2355*4882a593Smuzhiyun ioremap(pci_resource_start(dev->persist->pdev,
2356*4882a593Smuzhiyun priv->fw.comm_bar) +
2357*4882a593Smuzhiyun priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2358*4882a593Smuzhiyun else
2359*4882a593Smuzhiyun priv->mfunc.comm =
2360*4882a593Smuzhiyun ioremap(pci_resource_start(dev->persist->pdev, 2) +
2361*4882a593Smuzhiyun MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2362*4882a593Smuzhiyun if (!priv->mfunc.comm) {
2363*4882a593Smuzhiyun mlx4_err(dev, "Couldn't map communication vector\n");
2364*4882a593Smuzhiyun goto err_vhcr;
2365*4882a593Smuzhiyun }
2366*4882a593Smuzhiyun
2367*4882a593Smuzhiyun if (mlx4_is_master(dev)) {
2368*4882a593Smuzhiyun struct mlx4_vf_oper_state *vf_oper;
2369*4882a593Smuzhiyun struct mlx4_vf_admin_state *vf_admin;
2370*4882a593Smuzhiyun
2371*4882a593Smuzhiyun priv->mfunc.master.slave_state =
2372*4882a593Smuzhiyun kcalloc(dev->num_slaves,
2373*4882a593Smuzhiyun sizeof(struct mlx4_slave_state),
2374*4882a593Smuzhiyun GFP_KERNEL);
2375*4882a593Smuzhiyun if (!priv->mfunc.master.slave_state)
2376*4882a593Smuzhiyun goto err_comm;
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun priv->mfunc.master.vf_admin =
2379*4882a593Smuzhiyun kcalloc(dev->num_slaves,
2380*4882a593Smuzhiyun sizeof(struct mlx4_vf_admin_state),
2381*4882a593Smuzhiyun GFP_KERNEL);
2382*4882a593Smuzhiyun if (!priv->mfunc.master.vf_admin)
2383*4882a593Smuzhiyun goto err_comm_admin;
2384*4882a593Smuzhiyun
2385*4882a593Smuzhiyun priv->mfunc.master.vf_oper =
2386*4882a593Smuzhiyun kcalloc(dev->num_slaves,
2387*4882a593Smuzhiyun sizeof(struct mlx4_vf_oper_state),
2388*4882a593Smuzhiyun GFP_KERNEL);
2389*4882a593Smuzhiyun if (!priv->mfunc.master.vf_oper)
2390*4882a593Smuzhiyun goto err_comm_oper;
2391*4882a593Smuzhiyun
2392*4882a593Smuzhiyun for (i = 0; i < dev->num_slaves; ++i) {
2393*4882a593Smuzhiyun vf_admin = &priv->mfunc.master.vf_admin[i];
2394*4882a593Smuzhiyun vf_oper = &priv->mfunc.master.vf_oper[i];
2395*4882a593Smuzhiyun s_state = &priv->mfunc.master.slave_state[i];
2396*4882a593Smuzhiyun s_state->last_cmd = MLX4_COMM_CMD_RESET;
2397*4882a593Smuzhiyun s_state->vst_qinq_supported = false;
2398*4882a593Smuzhiyun mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2399*4882a593Smuzhiyun for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2400*4882a593Smuzhiyun s_state->event_eq[j].eqn = -1;
2401*4882a593Smuzhiyun __raw_writel((__force u32) 0,
2402*4882a593Smuzhiyun &priv->mfunc.comm[i].slave_write);
2403*4882a593Smuzhiyun __raw_writel((__force u32) 0,
2404*4882a593Smuzhiyun &priv->mfunc.comm[i].slave_read);
2405*4882a593Smuzhiyun for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2406*4882a593Smuzhiyun struct mlx4_vport_state *admin_vport;
2407*4882a593Smuzhiyun struct mlx4_vport_state *oper_vport;
2408*4882a593Smuzhiyun
2409*4882a593Smuzhiyun s_state->vlan_filter[port] =
2410*4882a593Smuzhiyun kzalloc(sizeof(struct mlx4_vlan_fltr),
2411*4882a593Smuzhiyun GFP_KERNEL);
2412*4882a593Smuzhiyun if (!s_state->vlan_filter[port]) {
2413*4882a593Smuzhiyun if (--port)
2414*4882a593Smuzhiyun kfree(s_state->vlan_filter[port]);
2415*4882a593Smuzhiyun goto err_slaves;
2416*4882a593Smuzhiyun }
2417*4882a593Smuzhiyun
2418*4882a593Smuzhiyun admin_vport = &vf_admin->vport[port];
2419*4882a593Smuzhiyun oper_vport = &vf_oper->vport[port].state;
2420*4882a593Smuzhiyun INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2421*4882a593Smuzhiyun admin_vport->default_vlan = MLX4_VGT;
2422*4882a593Smuzhiyun oper_vport->default_vlan = MLX4_VGT;
2423*4882a593Smuzhiyun admin_vport->qos_vport =
2424*4882a593Smuzhiyun MLX4_VPP_DEFAULT_VPORT;
2425*4882a593Smuzhiyun oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
2426*4882a593Smuzhiyun admin_vport->vlan_proto = htons(ETH_P_8021Q);
2427*4882a593Smuzhiyun oper_vport->vlan_proto = htons(ETH_P_8021Q);
2428*4882a593Smuzhiyun vf_oper->vport[port].vlan_idx = NO_INDX;
2429*4882a593Smuzhiyun vf_oper->vport[port].mac_idx = NO_INDX;
2430*4882a593Smuzhiyun mlx4_set_random_admin_guid(dev, i, port);
2431*4882a593Smuzhiyun }
2432*4882a593Smuzhiyun spin_lock_init(&s_state->lock);
2433*4882a593Smuzhiyun }
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
2436*4882a593Smuzhiyun for (port = 1; port <= dev->caps.num_ports; port++) {
2437*4882a593Smuzhiyun if (mlx4_is_eth(dev, port)) {
2438*4882a593Smuzhiyun mlx4_set_default_port_qos(dev, port);
2439*4882a593Smuzhiyun mlx4_allocate_port_vpps(dev, port);
2440*4882a593Smuzhiyun }
2441*4882a593Smuzhiyun }
2442*4882a593Smuzhiyun }
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
2445*4882a593Smuzhiyun priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2446*4882a593Smuzhiyun INIT_WORK(&priv->mfunc.master.comm_work,
2447*4882a593Smuzhiyun mlx4_master_comm_channel);
2448*4882a593Smuzhiyun INIT_WORK(&priv->mfunc.master.slave_event_work,
2449*4882a593Smuzhiyun mlx4_gen_slave_eqe);
2450*4882a593Smuzhiyun INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2451*4882a593Smuzhiyun mlx4_master_handle_slave_flr);
2452*4882a593Smuzhiyun spin_lock_init(&priv->mfunc.master.slave_state_lock);
2453*4882a593Smuzhiyun spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2454*4882a593Smuzhiyun priv->mfunc.master.comm_wq =
2455*4882a593Smuzhiyun create_singlethread_workqueue("mlx4_comm");
2456*4882a593Smuzhiyun if (!priv->mfunc.master.comm_wq)
2457*4882a593Smuzhiyun goto err_slaves;
2458*4882a593Smuzhiyun
2459*4882a593Smuzhiyun if (mlx4_init_resource_tracker(dev))
2460*4882a593Smuzhiyun goto err_thread;
2461*4882a593Smuzhiyun
2462*4882a593Smuzhiyun } else {
2463*4882a593Smuzhiyun err = sync_toggles(dev);
2464*4882a593Smuzhiyun if (err) {
2465*4882a593Smuzhiyun mlx4_err(dev, "Couldn't sync toggles\n");
2466*4882a593Smuzhiyun goto err_comm;
2467*4882a593Smuzhiyun }
2468*4882a593Smuzhiyun }
2469*4882a593Smuzhiyun return 0;
2470*4882a593Smuzhiyun
2471*4882a593Smuzhiyun err_thread:
2472*4882a593Smuzhiyun flush_workqueue(priv->mfunc.master.comm_wq);
2473*4882a593Smuzhiyun destroy_workqueue(priv->mfunc.master.comm_wq);
2474*4882a593Smuzhiyun err_slaves:
2475*4882a593Smuzhiyun while (i--) {
2476*4882a593Smuzhiyun for (port = 1; port <= MLX4_MAX_PORTS; port++)
2477*4882a593Smuzhiyun kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2478*4882a593Smuzhiyun }
2479*4882a593Smuzhiyun kfree(priv->mfunc.master.vf_oper);
2480*4882a593Smuzhiyun err_comm_oper:
2481*4882a593Smuzhiyun kfree(priv->mfunc.master.vf_admin);
2482*4882a593Smuzhiyun err_comm_admin:
2483*4882a593Smuzhiyun kfree(priv->mfunc.master.slave_state);
2484*4882a593Smuzhiyun err_comm:
2485*4882a593Smuzhiyun iounmap(priv->mfunc.comm);
2486*4882a593Smuzhiyun priv->mfunc.comm = NULL;
2487*4882a593Smuzhiyun err_vhcr:
2488*4882a593Smuzhiyun dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2489*4882a593Smuzhiyun priv->mfunc.vhcr,
2490*4882a593Smuzhiyun priv->mfunc.vhcr_dma);
2491*4882a593Smuzhiyun priv->mfunc.vhcr = NULL;
2492*4882a593Smuzhiyun return -ENOMEM;
2493*4882a593Smuzhiyun }
2494*4882a593Smuzhiyun
mlx4_cmd_init(struct mlx4_dev * dev)2495*4882a593Smuzhiyun int mlx4_cmd_init(struct mlx4_dev *dev)
2496*4882a593Smuzhiyun {
2497*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2498*4882a593Smuzhiyun int flags = 0;
2499*4882a593Smuzhiyun
2500*4882a593Smuzhiyun if (!priv->cmd.initialized) {
2501*4882a593Smuzhiyun init_rwsem(&priv->cmd.switch_sem);
2502*4882a593Smuzhiyun mutex_init(&priv->cmd.slave_cmd_mutex);
2503*4882a593Smuzhiyun sema_init(&priv->cmd.poll_sem, 1);
2504*4882a593Smuzhiyun priv->cmd.use_events = 0;
2505*4882a593Smuzhiyun priv->cmd.toggle = 1;
2506*4882a593Smuzhiyun priv->cmd.initialized = 1;
2507*4882a593Smuzhiyun flags |= MLX4_CMD_CLEANUP_STRUCT;
2508*4882a593Smuzhiyun }
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2511*4882a593Smuzhiyun priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2512*4882a593Smuzhiyun 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2513*4882a593Smuzhiyun if (!priv->cmd.hcr) {
2514*4882a593Smuzhiyun mlx4_err(dev, "Couldn't map command register\n");
2515*4882a593Smuzhiyun goto err;
2516*4882a593Smuzhiyun }
2517*4882a593Smuzhiyun flags |= MLX4_CMD_CLEANUP_HCR;
2518*4882a593Smuzhiyun }
2519*4882a593Smuzhiyun
2520*4882a593Smuzhiyun if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2521*4882a593Smuzhiyun priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2522*4882a593Smuzhiyun PAGE_SIZE,
2523*4882a593Smuzhiyun &priv->mfunc.vhcr_dma,
2524*4882a593Smuzhiyun GFP_KERNEL);
2525*4882a593Smuzhiyun if (!priv->mfunc.vhcr)
2526*4882a593Smuzhiyun goto err;
2527*4882a593Smuzhiyun
2528*4882a593Smuzhiyun flags |= MLX4_CMD_CLEANUP_VHCR;
2529*4882a593Smuzhiyun }
2530*4882a593Smuzhiyun
2531*4882a593Smuzhiyun if (!priv->cmd.pool) {
2532*4882a593Smuzhiyun priv->cmd.pool = dma_pool_create("mlx4_cmd",
2533*4882a593Smuzhiyun &dev->persist->pdev->dev,
2534*4882a593Smuzhiyun MLX4_MAILBOX_SIZE,
2535*4882a593Smuzhiyun MLX4_MAILBOX_SIZE, 0);
2536*4882a593Smuzhiyun if (!priv->cmd.pool)
2537*4882a593Smuzhiyun goto err;
2538*4882a593Smuzhiyun
2539*4882a593Smuzhiyun flags |= MLX4_CMD_CLEANUP_POOL;
2540*4882a593Smuzhiyun }
2541*4882a593Smuzhiyun
2542*4882a593Smuzhiyun return 0;
2543*4882a593Smuzhiyun
2544*4882a593Smuzhiyun err:
2545*4882a593Smuzhiyun mlx4_cmd_cleanup(dev, flags);
2546*4882a593Smuzhiyun return -ENOMEM;
2547*4882a593Smuzhiyun }
2548*4882a593Smuzhiyun
mlx4_report_internal_err_comm_event(struct mlx4_dev * dev)2549*4882a593Smuzhiyun void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2550*4882a593Smuzhiyun {
2551*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2552*4882a593Smuzhiyun int slave;
2553*4882a593Smuzhiyun u32 slave_read;
2554*4882a593Smuzhiyun
2555*4882a593Smuzhiyun /* If the comm channel has not yet been initialized,
2556*4882a593Smuzhiyun * skip reporting the internal error event to all
2557*4882a593Smuzhiyun * the communication channels.
2558*4882a593Smuzhiyun */
2559*4882a593Smuzhiyun if (!priv->mfunc.comm)
2560*4882a593Smuzhiyun return;
2561*4882a593Smuzhiyun
2562*4882a593Smuzhiyun /* Report an internal error event to all
2563*4882a593Smuzhiyun * communication channels.
2564*4882a593Smuzhiyun */
2565*4882a593Smuzhiyun for (slave = 0; slave < dev->num_slaves; slave++) {
2566*4882a593Smuzhiyun slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2567*4882a593Smuzhiyun slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2568*4882a593Smuzhiyun __raw_writel((__force u32)cpu_to_be32(slave_read),
2569*4882a593Smuzhiyun &priv->mfunc.comm[slave].slave_read);
2570*4882a593Smuzhiyun }
2571*4882a593Smuzhiyun }
2572*4882a593Smuzhiyun
mlx4_multi_func_cleanup(struct mlx4_dev * dev)2573*4882a593Smuzhiyun void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2574*4882a593Smuzhiyun {
2575*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2576*4882a593Smuzhiyun int i, port;
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun if (mlx4_is_master(dev)) {
2579*4882a593Smuzhiyun flush_workqueue(priv->mfunc.master.comm_wq);
2580*4882a593Smuzhiyun destroy_workqueue(priv->mfunc.master.comm_wq);
2581*4882a593Smuzhiyun for (i = 0; i < dev->num_slaves; i++) {
2582*4882a593Smuzhiyun for (port = 1; port <= MLX4_MAX_PORTS; port++)
2583*4882a593Smuzhiyun kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2584*4882a593Smuzhiyun }
2585*4882a593Smuzhiyun kfree(priv->mfunc.master.slave_state);
2586*4882a593Smuzhiyun kfree(priv->mfunc.master.vf_admin);
2587*4882a593Smuzhiyun kfree(priv->mfunc.master.vf_oper);
2588*4882a593Smuzhiyun dev->num_slaves = 0;
2589*4882a593Smuzhiyun }
2590*4882a593Smuzhiyun
2591*4882a593Smuzhiyun iounmap(priv->mfunc.comm);
2592*4882a593Smuzhiyun priv->mfunc.comm = NULL;
2593*4882a593Smuzhiyun }
2594*4882a593Smuzhiyun
mlx4_cmd_cleanup(struct mlx4_dev * dev,int cleanup_mask)2595*4882a593Smuzhiyun void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2596*4882a593Smuzhiyun {
2597*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2598*4882a593Smuzhiyun
2599*4882a593Smuzhiyun if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2600*4882a593Smuzhiyun dma_pool_destroy(priv->cmd.pool);
2601*4882a593Smuzhiyun priv->cmd.pool = NULL;
2602*4882a593Smuzhiyun }
2603*4882a593Smuzhiyun
2604*4882a593Smuzhiyun if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2605*4882a593Smuzhiyun (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2606*4882a593Smuzhiyun iounmap(priv->cmd.hcr);
2607*4882a593Smuzhiyun priv->cmd.hcr = NULL;
2608*4882a593Smuzhiyun }
2609*4882a593Smuzhiyun if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2610*4882a593Smuzhiyun (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2611*4882a593Smuzhiyun dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2612*4882a593Smuzhiyun priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2613*4882a593Smuzhiyun priv->mfunc.vhcr = NULL;
2614*4882a593Smuzhiyun }
2615*4882a593Smuzhiyun if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2616*4882a593Smuzhiyun priv->cmd.initialized = 0;
2617*4882a593Smuzhiyun }
2618*4882a593Smuzhiyun
2619*4882a593Smuzhiyun /*
2620*4882a593Smuzhiyun * Switch to using events to issue FW commands (can only be called
2621*4882a593Smuzhiyun * after event queue for command events has been initialized).
2622*4882a593Smuzhiyun */
mlx4_cmd_use_events(struct mlx4_dev * dev)2623*4882a593Smuzhiyun int mlx4_cmd_use_events(struct mlx4_dev *dev)
2624*4882a593Smuzhiyun {
2625*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2626*4882a593Smuzhiyun int i;
2627*4882a593Smuzhiyun int err = 0;
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun priv->cmd.context = kmalloc_array(priv->cmd.max_cmds,
2630*4882a593Smuzhiyun sizeof(struct mlx4_cmd_context),
2631*4882a593Smuzhiyun GFP_KERNEL);
2632*4882a593Smuzhiyun if (!priv->cmd.context)
2633*4882a593Smuzhiyun return -ENOMEM;
2634*4882a593Smuzhiyun
2635*4882a593Smuzhiyun if (mlx4_is_mfunc(dev))
2636*4882a593Smuzhiyun mutex_lock(&priv->cmd.slave_cmd_mutex);
2637*4882a593Smuzhiyun down_write(&priv->cmd.switch_sem);
2638*4882a593Smuzhiyun for (i = 0; i < priv->cmd.max_cmds; ++i) {
2639*4882a593Smuzhiyun priv->cmd.context[i].token = i;
2640*4882a593Smuzhiyun priv->cmd.context[i].next = i + 1;
2641*4882a593Smuzhiyun /* To support fatal error flow, initialize all
2642*4882a593Smuzhiyun * cmd contexts to allow simulating completions
2643*4882a593Smuzhiyun * with complete() at any time.
2644*4882a593Smuzhiyun */
2645*4882a593Smuzhiyun init_completion(&priv->cmd.context[i].done);
2646*4882a593Smuzhiyun }
2647*4882a593Smuzhiyun
2648*4882a593Smuzhiyun priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2649*4882a593Smuzhiyun priv->cmd.free_head = 0;
2650*4882a593Smuzhiyun
2651*4882a593Smuzhiyun sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2652*4882a593Smuzhiyun
2653*4882a593Smuzhiyun for (priv->cmd.token_mask = 1;
2654*4882a593Smuzhiyun priv->cmd.token_mask < priv->cmd.max_cmds;
2655*4882a593Smuzhiyun priv->cmd.token_mask <<= 1)
2656*4882a593Smuzhiyun ; /* nothing */
2657*4882a593Smuzhiyun --priv->cmd.token_mask;
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun down(&priv->cmd.poll_sem);
2660*4882a593Smuzhiyun priv->cmd.use_events = 1;
2661*4882a593Smuzhiyun up_write(&priv->cmd.switch_sem);
2662*4882a593Smuzhiyun if (mlx4_is_mfunc(dev))
2663*4882a593Smuzhiyun mutex_unlock(&priv->cmd.slave_cmd_mutex);
2664*4882a593Smuzhiyun
2665*4882a593Smuzhiyun return err;
2666*4882a593Smuzhiyun }
2667*4882a593Smuzhiyun
2668*4882a593Smuzhiyun /*
2669*4882a593Smuzhiyun * Switch back to polling (used when shutting down the device)
2670*4882a593Smuzhiyun */
mlx4_cmd_use_polling(struct mlx4_dev * dev)2671*4882a593Smuzhiyun void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2672*4882a593Smuzhiyun {
2673*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2674*4882a593Smuzhiyun int i;
2675*4882a593Smuzhiyun
2676*4882a593Smuzhiyun if (mlx4_is_mfunc(dev))
2677*4882a593Smuzhiyun mutex_lock(&priv->cmd.slave_cmd_mutex);
2678*4882a593Smuzhiyun down_write(&priv->cmd.switch_sem);
2679*4882a593Smuzhiyun priv->cmd.use_events = 0;
2680*4882a593Smuzhiyun
2681*4882a593Smuzhiyun for (i = 0; i < priv->cmd.max_cmds; ++i)
2682*4882a593Smuzhiyun down(&priv->cmd.event_sem);
2683*4882a593Smuzhiyun
2684*4882a593Smuzhiyun kfree(priv->cmd.context);
2685*4882a593Smuzhiyun priv->cmd.context = NULL;
2686*4882a593Smuzhiyun
2687*4882a593Smuzhiyun up(&priv->cmd.poll_sem);
2688*4882a593Smuzhiyun up_write(&priv->cmd.switch_sem);
2689*4882a593Smuzhiyun if (mlx4_is_mfunc(dev))
2690*4882a593Smuzhiyun mutex_unlock(&priv->cmd.slave_cmd_mutex);
2691*4882a593Smuzhiyun }
2692*4882a593Smuzhiyun
mlx4_alloc_cmd_mailbox(struct mlx4_dev * dev)2693*4882a593Smuzhiyun struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2694*4882a593Smuzhiyun {
2695*4882a593Smuzhiyun struct mlx4_cmd_mailbox *mailbox;
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL);
2698*4882a593Smuzhiyun if (!mailbox)
2699*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2700*4882a593Smuzhiyun
2701*4882a593Smuzhiyun mailbox->buf = dma_pool_zalloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2702*4882a593Smuzhiyun &mailbox->dma);
2703*4882a593Smuzhiyun if (!mailbox->buf) {
2704*4882a593Smuzhiyun kfree(mailbox);
2705*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun return mailbox;
2709*4882a593Smuzhiyun }
2710*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2711*4882a593Smuzhiyun
mlx4_free_cmd_mailbox(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * mailbox)2712*4882a593Smuzhiyun void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2713*4882a593Smuzhiyun struct mlx4_cmd_mailbox *mailbox)
2714*4882a593Smuzhiyun {
2715*4882a593Smuzhiyun if (!mailbox)
2716*4882a593Smuzhiyun return;
2717*4882a593Smuzhiyun
2718*4882a593Smuzhiyun dma_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2719*4882a593Smuzhiyun kfree(mailbox);
2720*4882a593Smuzhiyun }
2721*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2722*4882a593Smuzhiyun
mlx4_comm_get_version(void)2723*4882a593Smuzhiyun u32 mlx4_comm_get_version(void)
2724*4882a593Smuzhiyun {
2725*4882a593Smuzhiyun return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2726*4882a593Smuzhiyun }
2727*4882a593Smuzhiyun
mlx4_get_slave_indx(struct mlx4_dev * dev,int vf)2728*4882a593Smuzhiyun static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2729*4882a593Smuzhiyun {
2730*4882a593Smuzhiyun if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2731*4882a593Smuzhiyun mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2732*4882a593Smuzhiyun vf, dev->persist->num_vfs);
2733*4882a593Smuzhiyun return -EINVAL;
2734*4882a593Smuzhiyun }
2735*4882a593Smuzhiyun
2736*4882a593Smuzhiyun return vf+1;
2737*4882a593Smuzhiyun }
2738*4882a593Smuzhiyun
mlx4_get_vf_indx(struct mlx4_dev * dev,int slave)2739*4882a593Smuzhiyun int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2740*4882a593Smuzhiyun {
2741*4882a593Smuzhiyun if (slave < 1 || slave > dev->persist->num_vfs) {
2742*4882a593Smuzhiyun mlx4_err(dev,
2743*4882a593Smuzhiyun "Bad slave number:%d (number of activated slaves: %lu)\n",
2744*4882a593Smuzhiyun slave, dev->num_slaves);
2745*4882a593Smuzhiyun return -EINVAL;
2746*4882a593Smuzhiyun }
2747*4882a593Smuzhiyun return slave - 1;
2748*4882a593Smuzhiyun }
2749*4882a593Smuzhiyun
mlx4_cmd_wake_completions(struct mlx4_dev * dev)2750*4882a593Smuzhiyun void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2751*4882a593Smuzhiyun {
2752*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2753*4882a593Smuzhiyun struct mlx4_cmd_context *context;
2754*4882a593Smuzhiyun int i;
2755*4882a593Smuzhiyun
2756*4882a593Smuzhiyun spin_lock(&priv->cmd.context_lock);
2757*4882a593Smuzhiyun if (priv->cmd.context) {
2758*4882a593Smuzhiyun for (i = 0; i < priv->cmd.max_cmds; ++i) {
2759*4882a593Smuzhiyun context = &priv->cmd.context[i];
2760*4882a593Smuzhiyun context->fw_status = CMD_STAT_INTERNAL_ERR;
2761*4882a593Smuzhiyun context->result =
2762*4882a593Smuzhiyun mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2763*4882a593Smuzhiyun complete(&context->done);
2764*4882a593Smuzhiyun }
2765*4882a593Smuzhiyun }
2766*4882a593Smuzhiyun spin_unlock(&priv->cmd.context_lock);
2767*4882a593Smuzhiyun }
2768*4882a593Smuzhiyun
mlx4_get_active_ports(struct mlx4_dev * dev,int slave)2769*4882a593Smuzhiyun struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2770*4882a593Smuzhiyun {
2771*4882a593Smuzhiyun struct mlx4_active_ports actv_ports;
2772*4882a593Smuzhiyun int vf;
2773*4882a593Smuzhiyun
2774*4882a593Smuzhiyun bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2775*4882a593Smuzhiyun
2776*4882a593Smuzhiyun if (slave == 0) {
2777*4882a593Smuzhiyun bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2778*4882a593Smuzhiyun return actv_ports;
2779*4882a593Smuzhiyun }
2780*4882a593Smuzhiyun
2781*4882a593Smuzhiyun vf = mlx4_get_vf_indx(dev, slave);
2782*4882a593Smuzhiyun if (vf < 0)
2783*4882a593Smuzhiyun return actv_ports;
2784*4882a593Smuzhiyun
2785*4882a593Smuzhiyun bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2786*4882a593Smuzhiyun min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2787*4882a593Smuzhiyun dev->caps.num_ports));
2788*4882a593Smuzhiyun
2789*4882a593Smuzhiyun return actv_ports;
2790*4882a593Smuzhiyun }
2791*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2792*4882a593Smuzhiyun
mlx4_slave_convert_port(struct mlx4_dev * dev,int slave,int port)2793*4882a593Smuzhiyun int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2794*4882a593Smuzhiyun {
2795*4882a593Smuzhiyun unsigned n;
2796*4882a593Smuzhiyun struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2797*4882a593Smuzhiyun unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2798*4882a593Smuzhiyun
2799*4882a593Smuzhiyun if (port <= 0 || port > m)
2800*4882a593Smuzhiyun return -EINVAL;
2801*4882a593Smuzhiyun
2802*4882a593Smuzhiyun n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2803*4882a593Smuzhiyun if (port <= n)
2804*4882a593Smuzhiyun port = n + 1;
2805*4882a593Smuzhiyun
2806*4882a593Smuzhiyun return port;
2807*4882a593Smuzhiyun }
2808*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2809*4882a593Smuzhiyun
mlx4_phys_to_slave_port(struct mlx4_dev * dev,int slave,int port)2810*4882a593Smuzhiyun int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2811*4882a593Smuzhiyun {
2812*4882a593Smuzhiyun struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2813*4882a593Smuzhiyun if (test_bit(port - 1, actv_ports.ports))
2814*4882a593Smuzhiyun return port -
2815*4882a593Smuzhiyun find_first_bit(actv_ports.ports, dev->caps.num_ports);
2816*4882a593Smuzhiyun
2817*4882a593Smuzhiyun return -1;
2818*4882a593Smuzhiyun }
2819*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2820*4882a593Smuzhiyun
mlx4_phys_to_slaves_pport(struct mlx4_dev * dev,int port)2821*4882a593Smuzhiyun struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2822*4882a593Smuzhiyun int port)
2823*4882a593Smuzhiyun {
2824*4882a593Smuzhiyun unsigned i;
2825*4882a593Smuzhiyun struct mlx4_slaves_pport slaves_pport;
2826*4882a593Smuzhiyun
2827*4882a593Smuzhiyun bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun if (port <= 0 || port > dev->caps.num_ports)
2830*4882a593Smuzhiyun return slaves_pport;
2831*4882a593Smuzhiyun
2832*4882a593Smuzhiyun for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2833*4882a593Smuzhiyun struct mlx4_active_ports actv_ports =
2834*4882a593Smuzhiyun mlx4_get_active_ports(dev, i);
2835*4882a593Smuzhiyun if (test_bit(port - 1, actv_ports.ports))
2836*4882a593Smuzhiyun set_bit(i, slaves_pport.slaves);
2837*4882a593Smuzhiyun }
2838*4882a593Smuzhiyun
2839*4882a593Smuzhiyun return slaves_pport;
2840*4882a593Smuzhiyun }
2841*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2842*4882a593Smuzhiyun
mlx4_phys_to_slaves_pport_actv(struct mlx4_dev * dev,const struct mlx4_active_ports * crit_ports)2843*4882a593Smuzhiyun struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2844*4882a593Smuzhiyun struct mlx4_dev *dev,
2845*4882a593Smuzhiyun const struct mlx4_active_ports *crit_ports)
2846*4882a593Smuzhiyun {
2847*4882a593Smuzhiyun unsigned i;
2848*4882a593Smuzhiyun struct mlx4_slaves_pport slaves_pport;
2849*4882a593Smuzhiyun
2850*4882a593Smuzhiyun bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2851*4882a593Smuzhiyun
2852*4882a593Smuzhiyun for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2853*4882a593Smuzhiyun struct mlx4_active_ports actv_ports =
2854*4882a593Smuzhiyun mlx4_get_active_ports(dev, i);
2855*4882a593Smuzhiyun if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2856*4882a593Smuzhiyun dev->caps.num_ports))
2857*4882a593Smuzhiyun set_bit(i, slaves_pport.slaves);
2858*4882a593Smuzhiyun }
2859*4882a593Smuzhiyun
2860*4882a593Smuzhiyun return slaves_pport;
2861*4882a593Smuzhiyun }
2862*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2863*4882a593Smuzhiyun
mlx4_slaves_closest_port(struct mlx4_dev * dev,int slave,int port)2864*4882a593Smuzhiyun static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2865*4882a593Smuzhiyun {
2866*4882a593Smuzhiyun struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2867*4882a593Smuzhiyun int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2868*4882a593Smuzhiyun + 1;
2869*4882a593Smuzhiyun int max_port = min_port +
2870*4882a593Smuzhiyun bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2871*4882a593Smuzhiyun
2872*4882a593Smuzhiyun if (port < min_port)
2873*4882a593Smuzhiyun port = min_port;
2874*4882a593Smuzhiyun else if (port >= max_port)
2875*4882a593Smuzhiyun port = max_port - 1;
2876*4882a593Smuzhiyun
2877*4882a593Smuzhiyun return port;
2878*4882a593Smuzhiyun }
2879*4882a593Smuzhiyun
mlx4_set_vport_qos(struct mlx4_priv * priv,int slave,int port,int max_tx_rate)2880*4882a593Smuzhiyun static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
2881*4882a593Smuzhiyun int max_tx_rate)
2882*4882a593Smuzhiyun {
2883*4882a593Smuzhiyun int i;
2884*4882a593Smuzhiyun int err;
2885*4882a593Smuzhiyun struct mlx4_qos_manager *port_qos;
2886*4882a593Smuzhiyun struct mlx4_dev *dev = &priv->dev;
2887*4882a593Smuzhiyun struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
2888*4882a593Smuzhiyun
2889*4882a593Smuzhiyun port_qos = &priv->mfunc.master.qos_ctl[port];
2890*4882a593Smuzhiyun memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
2891*4882a593Smuzhiyun
2892*4882a593Smuzhiyun if (slave > port_qos->num_of_qos_vfs) {
2893*4882a593Smuzhiyun mlx4_info(dev, "No available VPP resources for this VF\n");
2894*4882a593Smuzhiyun return -EINVAL;
2895*4882a593Smuzhiyun }
2896*4882a593Smuzhiyun
2897*4882a593Smuzhiyun /* Query for default QoS values from Vport 0 is needed */
2898*4882a593Smuzhiyun err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
2899*4882a593Smuzhiyun if (err) {
2900*4882a593Smuzhiyun mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
2901*4882a593Smuzhiyun return err;
2902*4882a593Smuzhiyun }
2903*4882a593Smuzhiyun
2904*4882a593Smuzhiyun for (i = 0; i < MLX4_NUM_UP; i++) {
2905*4882a593Smuzhiyun if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
2906*4882a593Smuzhiyun vpp_qos[i].max_avg_bw = max_tx_rate;
2907*4882a593Smuzhiyun vpp_qos[i].enable = 1;
2908*4882a593Smuzhiyun } else {
2909*4882a593Smuzhiyun /* if user supplied tx_rate == 0, meaning no rate limit
2910*4882a593Smuzhiyun * configuration is required. so we are leaving the
2911*4882a593Smuzhiyun * value of max_avg_bw as queried from Vport 0.
2912*4882a593Smuzhiyun */
2913*4882a593Smuzhiyun vpp_qos[i].enable = 0;
2914*4882a593Smuzhiyun }
2915*4882a593Smuzhiyun }
2916*4882a593Smuzhiyun
2917*4882a593Smuzhiyun err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
2918*4882a593Smuzhiyun if (err) {
2919*4882a593Smuzhiyun mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
2920*4882a593Smuzhiyun return err;
2921*4882a593Smuzhiyun }
2922*4882a593Smuzhiyun
2923*4882a593Smuzhiyun return 0;
2924*4882a593Smuzhiyun }
2925*4882a593Smuzhiyun
mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev * dev,int port,struct mlx4_vport_state * vf_admin)2926*4882a593Smuzhiyun static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
2927*4882a593Smuzhiyun struct mlx4_vport_state *vf_admin)
2928*4882a593Smuzhiyun {
2929*4882a593Smuzhiyun struct mlx4_qos_manager *info;
2930*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2931*4882a593Smuzhiyun
2932*4882a593Smuzhiyun if (!mlx4_is_master(dev) ||
2933*4882a593Smuzhiyun !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2934*4882a593Smuzhiyun return false;
2935*4882a593Smuzhiyun
2936*4882a593Smuzhiyun info = &priv->mfunc.master.qos_ctl[port];
2937*4882a593Smuzhiyun
2938*4882a593Smuzhiyun if (vf_admin->default_vlan != MLX4_VGT &&
2939*4882a593Smuzhiyun test_bit(vf_admin->default_qos, info->priority_bm))
2940*4882a593Smuzhiyun return true;
2941*4882a593Smuzhiyun
2942*4882a593Smuzhiyun return false;
2943*4882a593Smuzhiyun }
2944*4882a593Smuzhiyun
mlx4_valid_vf_state_change(struct mlx4_dev * dev,int port,struct mlx4_vport_state * vf_admin,int vlan,int qos)2945*4882a593Smuzhiyun static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
2946*4882a593Smuzhiyun struct mlx4_vport_state *vf_admin,
2947*4882a593Smuzhiyun int vlan, int qos)
2948*4882a593Smuzhiyun {
2949*4882a593Smuzhiyun struct mlx4_vport_state dummy_admin = {0};
2950*4882a593Smuzhiyun
2951*4882a593Smuzhiyun if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
2952*4882a593Smuzhiyun !vf_admin->tx_rate)
2953*4882a593Smuzhiyun return true;
2954*4882a593Smuzhiyun
2955*4882a593Smuzhiyun dummy_admin.default_qos = qos;
2956*4882a593Smuzhiyun dummy_admin.default_vlan = vlan;
2957*4882a593Smuzhiyun
2958*4882a593Smuzhiyun /* VF wants to move to other VST state which is valid with current
2959*4882a593Smuzhiyun * rate limit. Either differnt default vlan in VST or other
2960*4882a593Smuzhiyun * supported QoS priority. Otherwise we don't allow this change when
2961*4882a593Smuzhiyun * the TX rate is still configured.
2962*4882a593Smuzhiyun */
2963*4882a593Smuzhiyun if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
2964*4882a593Smuzhiyun return true;
2965*4882a593Smuzhiyun
2966*4882a593Smuzhiyun mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
2967*4882a593Smuzhiyun (vlan == MLX4_VGT) ? "VGT" : "VST");
2968*4882a593Smuzhiyun
2969*4882a593Smuzhiyun if (vlan != MLX4_VGT)
2970*4882a593Smuzhiyun mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
2971*4882a593Smuzhiyun
2972*4882a593Smuzhiyun mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
2973*4882a593Smuzhiyun
2974*4882a593Smuzhiyun return false;
2975*4882a593Smuzhiyun }
2976*4882a593Smuzhiyun
mlx4_set_vf_mac(struct mlx4_dev * dev,int port,int vf,u8 * mac)2977*4882a593Smuzhiyun int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
2978*4882a593Smuzhiyun {
2979*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
2980*4882a593Smuzhiyun struct mlx4_vport_state *s_info;
2981*4882a593Smuzhiyun int slave;
2982*4882a593Smuzhiyun
2983*4882a593Smuzhiyun if (!mlx4_is_master(dev))
2984*4882a593Smuzhiyun return -EPROTONOSUPPORT;
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun if (is_multicast_ether_addr(mac))
2987*4882a593Smuzhiyun return -EINVAL;
2988*4882a593Smuzhiyun
2989*4882a593Smuzhiyun slave = mlx4_get_slave_indx(dev, vf);
2990*4882a593Smuzhiyun if (slave < 0)
2991*4882a593Smuzhiyun return -EINVAL;
2992*4882a593Smuzhiyun
2993*4882a593Smuzhiyun port = mlx4_slaves_closest_port(dev, slave, port);
2994*4882a593Smuzhiyun s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2995*4882a593Smuzhiyun
2996*4882a593Smuzhiyun if (s_info->spoofchk && is_zero_ether_addr(mac)) {
2997*4882a593Smuzhiyun mlx4_info(dev, "MAC invalidation is not allowed when spoofchk is on\n");
2998*4882a593Smuzhiyun return -EPERM;
2999*4882a593Smuzhiyun }
3000*4882a593Smuzhiyun
3001*4882a593Smuzhiyun s_info->mac = mlx4_mac_to_u64(mac);
3002*4882a593Smuzhiyun mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
3003*4882a593Smuzhiyun vf, port, s_info->mac);
3004*4882a593Smuzhiyun return 0;
3005*4882a593Smuzhiyun }
3006*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
3007*4882a593Smuzhiyun
3008*4882a593Smuzhiyun
mlx4_set_vf_vlan(struct mlx4_dev * dev,int port,int vf,u16 vlan,u8 qos,__be16 proto)3009*4882a593Smuzhiyun int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos,
3010*4882a593Smuzhiyun __be16 proto)
3011*4882a593Smuzhiyun {
3012*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
3013*4882a593Smuzhiyun struct mlx4_vport_state *vf_admin;
3014*4882a593Smuzhiyun struct mlx4_slave_state *slave_state;
3015*4882a593Smuzhiyun struct mlx4_vport_oper_state *vf_oper;
3016*4882a593Smuzhiyun int slave;
3017*4882a593Smuzhiyun
3018*4882a593Smuzhiyun if ((!mlx4_is_master(dev)) ||
3019*4882a593Smuzhiyun !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
3020*4882a593Smuzhiyun return -EPROTONOSUPPORT;
3021*4882a593Smuzhiyun
3022*4882a593Smuzhiyun if ((vlan > 4095) || (qos > 7))
3023*4882a593Smuzhiyun return -EINVAL;
3024*4882a593Smuzhiyun
3025*4882a593Smuzhiyun if (proto == htons(ETH_P_8021AD) &&
3026*4882a593Smuzhiyun !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP))
3027*4882a593Smuzhiyun return -EPROTONOSUPPORT;
3028*4882a593Smuzhiyun
3029*4882a593Smuzhiyun if (proto != htons(ETH_P_8021Q) &&
3030*4882a593Smuzhiyun proto != htons(ETH_P_8021AD))
3031*4882a593Smuzhiyun return -EINVAL;
3032*4882a593Smuzhiyun
3033*4882a593Smuzhiyun if ((proto == htons(ETH_P_8021AD)) &&
3034*4882a593Smuzhiyun ((vlan == 0) || (vlan == MLX4_VGT)))
3035*4882a593Smuzhiyun return -EINVAL;
3036*4882a593Smuzhiyun
3037*4882a593Smuzhiyun slave = mlx4_get_slave_indx(dev, vf);
3038*4882a593Smuzhiyun if (slave < 0)
3039*4882a593Smuzhiyun return -EINVAL;
3040*4882a593Smuzhiyun
3041*4882a593Smuzhiyun slave_state = &priv->mfunc.master.slave_state[slave];
3042*4882a593Smuzhiyun if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) &&
3043*4882a593Smuzhiyun (!slave_state->vst_qinq_supported)) {
3044*4882a593Smuzhiyun mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf);
3045*4882a593Smuzhiyun return -EPROTONOSUPPORT;
3046*4882a593Smuzhiyun }
3047*4882a593Smuzhiyun port = mlx4_slaves_closest_port(dev, slave, port);
3048*4882a593Smuzhiyun vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3049*4882a593Smuzhiyun vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3050*4882a593Smuzhiyun
3051*4882a593Smuzhiyun if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
3052*4882a593Smuzhiyun return -EPERM;
3053*4882a593Smuzhiyun
3054*4882a593Smuzhiyun if ((0 == vlan) && (0 == qos))
3055*4882a593Smuzhiyun vf_admin->default_vlan = MLX4_VGT;
3056*4882a593Smuzhiyun else
3057*4882a593Smuzhiyun vf_admin->default_vlan = vlan;
3058*4882a593Smuzhiyun vf_admin->default_qos = qos;
3059*4882a593Smuzhiyun vf_admin->vlan_proto = proto;
3060*4882a593Smuzhiyun
3061*4882a593Smuzhiyun /* If rate was configured prior to VST, we saved the configured rate
3062*4882a593Smuzhiyun * in vf_admin->rate and now, if priority supported we enforce the QoS
3063*4882a593Smuzhiyun */
3064*4882a593Smuzhiyun if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
3065*4882a593Smuzhiyun vf_admin->tx_rate)
3066*4882a593Smuzhiyun vf_admin->qos_vport = slave;
3067*4882a593Smuzhiyun
3068*4882a593Smuzhiyun /* Try to activate new vf state without restart,
3069*4882a593Smuzhiyun * this option is not supported while moving to VST QinQ mode.
3070*4882a593Smuzhiyun */
3071*4882a593Smuzhiyun if ((proto == htons(ETH_P_8021AD) &&
3072*4882a593Smuzhiyun vf_oper->state.vlan_proto != proto) ||
3073*4882a593Smuzhiyun mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3074*4882a593Smuzhiyun mlx4_info(dev,
3075*4882a593Smuzhiyun "updating vf %d port %d config will take effect on next VF restart\n",
3076*4882a593Smuzhiyun vf, port);
3077*4882a593Smuzhiyun return 0;
3078*4882a593Smuzhiyun }
3079*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
3080*4882a593Smuzhiyun
mlx4_set_vf_rate(struct mlx4_dev * dev,int port,int vf,int min_tx_rate,int max_tx_rate)3081*4882a593Smuzhiyun int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
3082*4882a593Smuzhiyun int max_tx_rate)
3083*4882a593Smuzhiyun {
3084*4882a593Smuzhiyun int err;
3085*4882a593Smuzhiyun int slave;
3086*4882a593Smuzhiyun struct mlx4_vport_state *vf_admin;
3087*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
3088*4882a593Smuzhiyun
3089*4882a593Smuzhiyun if (!mlx4_is_master(dev) ||
3090*4882a593Smuzhiyun !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
3091*4882a593Smuzhiyun return -EPROTONOSUPPORT;
3092*4882a593Smuzhiyun
3093*4882a593Smuzhiyun if (min_tx_rate) {
3094*4882a593Smuzhiyun mlx4_info(dev, "Minimum BW share not supported\n");
3095*4882a593Smuzhiyun return -EPROTONOSUPPORT;
3096*4882a593Smuzhiyun }
3097*4882a593Smuzhiyun
3098*4882a593Smuzhiyun slave = mlx4_get_slave_indx(dev, vf);
3099*4882a593Smuzhiyun if (slave < 0)
3100*4882a593Smuzhiyun return -EINVAL;
3101*4882a593Smuzhiyun
3102*4882a593Smuzhiyun port = mlx4_slaves_closest_port(dev, slave, port);
3103*4882a593Smuzhiyun vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3104*4882a593Smuzhiyun
3105*4882a593Smuzhiyun err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
3106*4882a593Smuzhiyun if (err) {
3107*4882a593Smuzhiyun mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
3108*4882a593Smuzhiyun max_tx_rate);
3109*4882a593Smuzhiyun return err;
3110*4882a593Smuzhiyun }
3111*4882a593Smuzhiyun
3112*4882a593Smuzhiyun vf_admin->tx_rate = max_tx_rate;
3113*4882a593Smuzhiyun /* if VF is not in supported mode (VST with supported prio),
3114*4882a593Smuzhiyun * we do not change vport configuration for its QPs, but save
3115*4882a593Smuzhiyun * the rate, so it will be enforced when it moves to supported
3116*4882a593Smuzhiyun * mode next time.
3117*4882a593Smuzhiyun */
3118*4882a593Smuzhiyun if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
3119*4882a593Smuzhiyun mlx4_info(dev,
3120*4882a593Smuzhiyun "rate set for VF %d when not in valid state\n", vf);
3121*4882a593Smuzhiyun
3122*4882a593Smuzhiyun if (vf_admin->default_vlan != MLX4_VGT)
3123*4882a593Smuzhiyun mlx4_info(dev, "VST priority not supported by QoS\n");
3124*4882a593Smuzhiyun else
3125*4882a593Smuzhiyun mlx4_info(dev, "VF in VGT mode (needed VST)\n");
3126*4882a593Smuzhiyun
3127*4882a593Smuzhiyun mlx4_info(dev,
3128*4882a593Smuzhiyun "rate %d take affect when VF moves to valid state\n",
3129*4882a593Smuzhiyun max_tx_rate);
3130*4882a593Smuzhiyun return 0;
3131*4882a593Smuzhiyun }
3132*4882a593Smuzhiyun
3133*4882a593Smuzhiyun /* If user sets rate 0 assigning default vport for its QPs */
3134*4882a593Smuzhiyun vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun if (priv->mfunc.master.slave_state[slave].active &&
3137*4882a593Smuzhiyun dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
3138*4882a593Smuzhiyun mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
3139*4882a593Smuzhiyun
3140*4882a593Smuzhiyun return 0;
3141*4882a593Smuzhiyun }
3142*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
3143*4882a593Smuzhiyun
3144*4882a593Smuzhiyun /* mlx4_get_slave_default_vlan -
3145*4882a593Smuzhiyun * return true if VST ( default vlan)
3146*4882a593Smuzhiyun * if VST, will return vlan & qos (if not NULL)
3147*4882a593Smuzhiyun */
mlx4_get_slave_default_vlan(struct mlx4_dev * dev,int port,int slave,u16 * vlan,u8 * qos)3148*4882a593Smuzhiyun bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
3149*4882a593Smuzhiyun u16 *vlan, u8 *qos)
3150*4882a593Smuzhiyun {
3151*4882a593Smuzhiyun struct mlx4_vport_oper_state *vp_oper;
3152*4882a593Smuzhiyun struct mlx4_priv *priv;
3153*4882a593Smuzhiyun
3154*4882a593Smuzhiyun priv = mlx4_priv(dev);
3155*4882a593Smuzhiyun port = mlx4_slaves_closest_port(dev, slave, port);
3156*4882a593Smuzhiyun vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3157*4882a593Smuzhiyun
3158*4882a593Smuzhiyun if (MLX4_VGT != vp_oper->state.default_vlan) {
3159*4882a593Smuzhiyun if (vlan)
3160*4882a593Smuzhiyun *vlan = vp_oper->state.default_vlan;
3161*4882a593Smuzhiyun if (qos)
3162*4882a593Smuzhiyun *qos = vp_oper->state.default_qos;
3163*4882a593Smuzhiyun return true;
3164*4882a593Smuzhiyun }
3165*4882a593Smuzhiyun return false;
3166*4882a593Smuzhiyun }
3167*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
3168*4882a593Smuzhiyun
mlx4_set_vf_spoofchk(struct mlx4_dev * dev,int port,int vf,bool setting)3169*4882a593Smuzhiyun int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
3170*4882a593Smuzhiyun {
3171*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
3172*4882a593Smuzhiyun struct mlx4_vport_state *s_info;
3173*4882a593Smuzhiyun int slave;
3174*4882a593Smuzhiyun u8 mac[ETH_ALEN];
3175*4882a593Smuzhiyun
3176*4882a593Smuzhiyun if ((!mlx4_is_master(dev)) ||
3177*4882a593Smuzhiyun !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
3178*4882a593Smuzhiyun return -EPROTONOSUPPORT;
3179*4882a593Smuzhiyun
3180*4882a593Smuzhiyun slave = mlx4_get_slave_indx(dev, vf);
3181*4882a593Smuzhiyun if (slave < 0)
3182*4882a593Smuzhiyun return -EINVAL;
3183*4882a593Smuzhiyun
3184*4882a593Smuzhiyun port = mlx4_slaves_closest_port(dev, slave, port);
3185*4882a593Smuzhiyun s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3186*4882a593Smuzhiyun
3187*4882a593Smuzhiyun mlx4_u64_to_mac(mac, s_info->mac);
3188*4882a593Smuzhiyun if (setting && !is_valid_ether_addr(mac)) {
3189*4882a593Smuzhiyun mlx4_info(dev, "Illegal MAC with spoofchk\n");
3190*4882a593Smuzhiyun return -EPERM;
3191*4882a593Smuzhiyun }
3192*4882a593Smuzhiyun
3193*4882a593Smuzhiyun s_info->spoofchk = setting;
3194*4882a593Smuzhiyun
3195*4882a593Smuzhiyun return 0;
3196*4882a593Smuzhiyun }
3197*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
3198*4882a593Smuzhiyun
mlx4_get_vf_config(struct mlx4_dev * dev,int port,int vf,struct ifla_vf_info * ivf)3199*4882a593Smuzhiyun int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
3200*4882a593Smuzhiyun {
3201*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
3202*4882a593Smuzhiyun struct mlx4_vport_state *s_info;
3203*4882a593Smuzhiyun int slave;
3204*4882a593Smuzhiyun
3205*4882a593Smuzhiyun if (!mlx4_is_master(dev))
3206*4882a593Smuzhiyun return -EPROTONOSUPPORT;
3207*4882a593Smuzhiyun
3208*4882a593Smuzhiyun slave = mlx4_get_slave_indx(dev, vf);
3209*4882a593Smuzhiyun if (slave < 0)
3210*4882a593Smuzhiyun return -EINVAL;
3211*4882a593Smuzhiyun
3212*4882a593Smuzhiyun s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3213*4882a593Smuzhiyun ivf->vf = vf;
3214*4882a593Smuzhiyun
3215*4882a593Smuzhiyun /* need to convert it to a func */
3216*4882a593Smuzhiyun ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
3217*4882a593Smuzhiyun ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
3218*4882a593Smuzhiyun ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
3219*4882a593Smuzhiyun ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
3220*4882a593Smuzhiyun ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
3221*4882a593Smuzhiyun ivf->mac[5] = ((s_info->mac) & 0xff);
3222*4882a593Smuzhiyun
3223*4882a593Smuzhiyun ivf->vlan = s_info->default_vlan;
3224*4882a593Smuzhiyun ivf->qos = s_info->default_qos;
3225*4882a593Smuzhiyun ivf->vlan_proto = s_info->vlan_proto;
3226*4882a593Smuzhiyun
3227*4882a593Smuzhiyun if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
3228*4882a593Smuzhiyun ivf->max_tx_rate = s_info->tx_rate;
3229*4882a593Smuzhiyun else
3230*4882a593Smuzhiyun ivf->max_tx_rate = 0;
3231*4882a593Smuzhiyun
3232*4882a593Smuzhiyun ivf->min_tx_rate = 0;
3233*4882a593Smuzhiyun ivf->spoofchk = s_info->spoofchk;
3234*4882a593Smuzhiyun ivf->linkstate = s_info->link_state;
3235*4882a593Smuzhiyun
3236*4882a593Smuzhiyun return 0;
3237*4882a593Smuzhiyun }
3238*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
3239*4882a593Smuzhiyun
mlx4_set_vf_link_state(struct mlx4_dev * dev,int port,int vf,int link_state)3240*4882a593Smuzhiyun int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
3241*4882a593Smuzhiyun {
3242*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
3243*4882a593Smuzhiyun struct mlx4_vport_state *s_info;
3244*4882a593Smuzhiyun int slave;
3245*4882a593Smuzhiyun u8 link_stat_event;
3246*4882a593Smuzhiyun
3247*4882a593Smuzhiyun slave = mlx4_get_slave_indx(dev, vf);
3248*4882a593Smuzhiyun if (slave < 0)
3249*4882a593Smuzhiyun return -EINVAL;
3250*4882a593Smuzhiyun
3251*4882a593Smuzhiyun port = mlx4_slaves_closest_port(dev, slave, port);
3252*4882a593Smuzhiyun switch (link_state) {
3253*4882a593Smuzhiyun case IFLA_VF_LINK_STATE_AUTO:
3254*4882a593Smuzhiyun /* get current link state */
3255*4882a593Smuzhiyun if (!priv->sense.do_sense_port[port])
3256*4882a593Smuzhiyun link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3257*4882a593Smuzhiyun else
3258*4882a593Smuzhiyun link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3259*4882a593Smuzhiyun break;
3260*4882a593Smuzhiyun
3261*4882a593Smuzhiyun case IFLA_VF_LINK_STATE_ENABLE:
3262*4882a593Smuzhiyun link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3263*4882a593Smuzhiyun break;
3264*4882a593Smuzhiyun
3265*4882a593Smuzhiyun case IFLA_VF_LINK_STATE_DISABLE:
3266*4882a593Smuzhiyun link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3267*4882a593Smuzhiyun break;
3268*4882a593Smuzhiyun
3269*4882a593Smuzhiyun default:
3270*4882a593Smuzhiyun mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
3271*4882a593Smuzhiyun link_state, slave, port);
3272*4882a593Smuzhiyun return -EINVAL;
3273*4882a593Smuzhiyun }
3274*4882a593Smuzhiyun s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3275*4882a593Smuzhiyun s_info->link_state = link_state;
3276*4882a593Smuzhiyun
3277*4882a593Smuzhiyun /* send event */
3278*4882a593Smuzhiyun mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
3279*4882a593Smuzhiyun
3280*4882a593Smuzhiyun if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3281*4882a593Smuzhiyun mlx4_dbg(dev,
3282*4882a593Smuzhiyun "updating vf %d port %d no link state HW enforcement\n",
3283*4882a593Smuzhiyun vf, port);
3284*4882a593Smuzhiyun return 0;
3285*4882a593Smuzhiyun }
3286*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
3287*4882a593Smuzhiyun
mlx4_get_counter_stats(struct mlx4_dev * dev,int counter_index,struct mlx4_counter * counter_stats,int reset)3288*4882a593Smuzhiyun int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
3289*4882a593Smuzhiyun struct mlx4_counter *counter_stats, int reset)
3290*4882a593Smuzhiyun {
3291*4882a593Smuzhiyun struct mlx4_cmd_mailbox *mailbox = NULL;
3292*4882a593Smuzhiyun struct mlx4_counter *tmp_counter;
3293*4882a593Smuzhiyun int err;
3294*4882a593Smuzhiyun u32 if_stat_in_mod;
3295*4882a593Smuzhiyun
3296*4882a593Smuzhiyun if (!counter_stats)
3297*4882a593Smuzhiyun return -EINVAL;
3298*4882a593Smuzhiyun
3299*4882a593Smuzhiyun if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
3300*4882a593Smuzhiyun return 0;
3301*4882a593Smuzhiyun
3302*4882a593Smuzhiyun mailbox = mlx4_alloc_cmd_mailbox(dev);
3303*4882a593Smuzhiyun if (IS_ERR(mailbox))
3304*4882a593Smuzhiyun return PTR_ERR(mailbox);
3305*4882a593Smuzhiyun
3306*4882a593Smuzhiyun memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
3307*4882a593Smuzhiyun if_stat_in_mod = counter_index;
3308*4882a593Smuzhiyun if (reset)
3309*4882a593Smuzhiyun if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
3310*4882a593Smuzhiyun err = mlx4_cmd_box(dev, 0, mailbox->dma,
3311*4882a593Smuzhiyun if_stat_in_mod, 0,
3312*4882a593Smuzhiyun MLX4_CMD_QUERY_IF_STAT,
3313*4882a593Smuzhiyun MLX4_CMD_TIME_CLASS_C,
3314*4882a593Smuzhiyun MLX4_CMD_NATIVE);
3315*4882a593Smuzhiyun if (err) {
3316*4882a593Smuzhiyun mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
3317*4882a593Smuzhiyun __func__, counter_index);
3318*4882a593Smuzhiyun goto if_stat_out;
3319*4882a593Smuzhiyun }
3320*4882a593Smuzhiyun tmp_counter = (struct mlx4_counter *)mailbox->buf;
3321*4882a593Smuzhiyun counter_stats->counter_mode = tmp_counter->counter_mode;
3322*4882a593Smuzhiyun if (counter_stats->counter_mode == 0) {
3323*4882a593Smuzhiyun counter_stats->rx_frames =
3324*4882a593Smuzhiyun cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
3325*4882a593Smuzhiyun be64_to_cpu(tmp_counter->rx_frames));
3326*4882a593Smuzhiyun counter_stats->tx_frames =
3327*4882a593Smuzhiyun cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
3328*4882a593Smuzhiyun be64_to_cpu(tmp_counter->tx_frames));
3329*4882a593Smuzhiyun counter_stats->rx_bytes =
3330*4882a593Smuzhiyun cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
3331*4882a593Smuzhiyun be64_to_cpu(tmp_counter->rx_bytes));
3332*4882a593Smuzhiyun counter_stats->tx_bytes =
3333*4882a593Smuzhiyun cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
3334*4882a593Smuzhiyun be64_to_cpu(tmp_counter->tx_bytes));
3335*4882a593Smuzhiyun }
3336*4882a593Smuzhiyun
3337*4882a593Smuzhiyun if_stat_out:
3338*4882a593Smuzhiyun mlx4_free_cmd_mailbox(dev, mailbox);
3339*4882a593Smuzhiyun
3340*4882a593Smuzhiyun return err;
3341*4882a593Smuzhiyun }
3342*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
3343*4882a593Smuzhiyun
mlx4_get_vf_stats(struct mlx4_dev * dev,int port,int vf_idx,struct ifla_vf_stats * vf_stats)3344*4882a593Smuzhiyun int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
3345*4882a593Smuzhiyun struct ifla_vf_stats *vf_stats)
3346*4882a593Smuzhiyun {
3347*4882a593Smuzhiyun struct mlx4_counter tmp_vf_stats;
3348*4882a593Smuzhiyun int slave;
3349*4882a593Smuzhiyun int err = 0;
3350*4882a593Smuzhiyun
3351*4882a593Smuzhiyun if (!vf_stats)
3352*4882a593Smuzhiyun return -EINVAL;
3353*4882a593Smuzhiyun
3354*4882a593Smuzhiyun if (!mlx4_is_master(dev))
3355*4882a593Smuzhiyun return -EPROTONOSUPPORT;
3356*4882a593Smuzhiyun
3357*4882a593Smuzhiyun slave = mlx4_get_slave_indx(dev, vf_idx);
3358*4882a593Smuzhiyun if (slave < 0)
3359*4882a593Smuzhiyun return -EINVAL;
3360*4882a593Smuzhiyun
3361*4882a593Smuzhiyun port = mlx4_slaves_closest_port(dev, slave, port);
3362*4882a593Smuzhiyun err = mlx4_calc_vf_counters(dev, slave, port, &tmp_vf_stats);
3363*4882a593Smuzhiyun if (!err && tmp_vf_stats.counter_mode == 0) {
3364*4882a593Smuzhiyun vf_stats->rx_packets = be64_to_cpu(tmp_vf_stats.rx_frames);
3365*4882a593Smuzhiyun vf_stats->tx_packets = be64_to_cpu(tmp_vf_stats.tx_frames);
3366*4882a593Smuzhiyun vf_stats->rx_bytes = be64_to_cpu(tmp_vf_stats.rx_bytes);
3367*4882a593Smuzhiyun vf_stats->tx_bytes = be64_to_cpu(tmp_vf_stats.tx_bytes);
3368*4882a593Smuzhiyun }
3369*4882a593Smuzhiyun
3370*4882a593Smuzhiyun return err;
3371*4882a593Smuzhiyun }
3372*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_get_vf_stats);
3373*4882a593Smuzhiyun
mlx4_vf_smi_enabled(struct mlx4_dev * dev,int slave,int port)3374*4882a593Smuzhiyun int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
3375*4882a593Smuzhiyun {
3376*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
3377*4882a593Smuzhiyun
3378*4882a593Smuzhiyun if (slave < 1 || slave >= dev->num_slaves ||
3379*4882a593Smuzhiyun port < 1 || port > MLX4_MAX_PORTS)
3380*4882a593Smuzhiyun return 0;
3381*4882a593Smuzhiyun
3382*4882a593Smuzhiyun return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
3383*4882a593Smuzhiyun MLX4_VF_SMI_ENABLED;
3384*4882a593Smuzhiyun }
3385*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
3386*4882a593Smuzhiyun
mlx4_vf_get_enable_smi_admin(struct mlx4_dev * dev,int slave,int port)3387*4882a593Smuzhiyun int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
3388*4882a593Smuzhiyun {
3389*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
3390*4882a593Smuzhiyun
3391*4882a593Smuzhiyun if (slave == mlx4_master_func_num(dev))
3392*4882a593Smuzhiyun return 1;
3393*4882a593Smuzhiyun
3394*4882a593Smuzhiyun if (slave < 1 || slave >= dev->num_slaves ||
3395*4882a593Smuzhiyun port < 1 || port > MLX4_MAX_PORTS)
3396*4882a593Smuzhiyun return 0;
3397*4882a593Smuzhiyun
3398*4882a593Smuzhiyun return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
3399*4882a593Smuzhiyun MLX4_VF_SMI_ENABLED;
3400*4882a593Smuzhiyun }
3401*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
3402*4882a593Smuzhiyun
mlx4_vf_set_enable_smi_admin(struct mlx4_dev * dev,int slave,int port,int enabled)3403*4882a593Smuzhiyun int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
3404*4882a593Smuzhiyun int enabled)
3405*4882a593Smuzhiyun {
3406*4882a593Smuzhiyun struct mlx4_priv *priv = mlx4_priv(dev);
3407*4882a593Smuzhiyun struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
3408*4882a593Smuzhiyun &priv->dev, slave);
3409*4882a593Smuzhiyun int min_port = find_first_bit(actv_ports.ports,
3410*4882a593Smuzhiyun priv->dev.caps.num_ports) + 1;
3411*4882a593Smuzhiyun int max_port = min_port - 1 +
3412*4882a593Smuzhiyun bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
3413*4882a593Smuzhiyun
3414*4882a593Smuzhiyun if (slave == mlx4_master_func_num(dev))
3415*4882a593Smuzhiyun return 0;
3416*4882a593Smuzhiyun
3417*4882a593Smuzhiyun if (slave < 1 || slave >= dev->num_slaves ||
3418*4882a593Smuzhiyun port < 1 || port > MLX4_MAX_PORTS ||
3419*4882a593Smuzhiyun enabled < 0 || enabled > 1)
3420*4882a593Smuzhiyun return -EINVAL;
3421*4882a593Smuzhiyun
3422*4882a593Smuzhiyun if (min_port == max_port && dev->caps.num_ports > 1) {
3423*4882a593Smuzhiyun mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
3424*4882a593Smuzhiyun return -EPROTONOSUPPORT;
3425*4882a593Smuzhiyun }
3426*4882a593Smuzhiyun
3427*4882a593Smuzhiyun priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
3428*4882a593Smuzhiyun return 0;
3429*4882a593Smuzhiyun }
3430*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
3431