xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/mellanox/mlx4/intf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3*4882a593Smuzhiyun  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun  * OpenIB.org BSD license below:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
13*4882a593Smuzhiyun  *     conditions are met:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
16*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun  *        disclaimer.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun  *        provided with the distribution.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun  * SOFTWARE.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include <linux/slab.h>
35*4882a593Smuzhiyun #include <linux/export.h>
36*4882a593Smuzhiyun #include <linux/errno.h>
37*4882a593Smuzhiyun #include <net/devlink.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include "mlx4.h"
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun struct mlx4_device_context {
42*4882a593Smuzhiyun 	struct list_head	list;
43*4882a593Smuzhiyun 	struct list_head	bond_list;
44*4882a593Smuzhiyun 	struct mlx4_interface  *intf;
45*4882a593Smuzhiyun 	void		       *context;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun static LIST_HEAD(intf_list);
49*4882a593Smuzhiyun static LIST_HEAD(dev_list);
50*4882a593Smuzhiyun static DEFINE_MUTEX(intf_mutex);
51*4882a593Smuzhiyun 
mlx4_add_device(struct mlx4_interface * intf,struct mlx4_priv * priv)52*4882a593Smuzhiyun static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	struct mlx4_device_context *dev_ctx;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
57*4882a593Smuzhiyun 	if (!dev_ctx)
58*4882a593Smuzhiyun 		return;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	dev_ctx->intf    = intf;
61*4882a593Smuzhiyun 	dev_ctx->context = intf->add(&priv->dev);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	if (dev_ctx->context) {
64*4882a593Smuzhiyun 		spin_lock_irq(&priv->ctx_lock);
65*4882a593Smuzhiyun 		list_add_tail(&dev_ctx->list, &priv->ctx_list);
66*4882a593Smuzhiyun 		spin_unlock_irq(&priv->ctx_lock);
67*4882a593Smuzhiyun 		if (intf->activate)
68*4882a593Smuzhiyun 			intf->activate(&priv->dev, dev_ctx->context);
69*4882a593Smuzhiyun 	} else
70*4882a593Smuzhiyun 		kfree(dev_ctx);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
mlx4_remove_device(struct mlx4_interface * intf,struct mlx4_priv * priv)74*4882a593Smuzhiyun static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	struct mlx4_device_context *dev_ctx;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
79*4882a593Smuzhiyun 		if (dev_ctx->intf == intf) {
80*4882a593Smuzhiyun 			spin_lock_irq(&priv->ctx_lock);
81*4882a593Smuzhiyun 			list_del(&dev_ctx->list);
82*4882a593Smuzhiyun 			spin_unlock_irq(&priv->ctx_lock);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 			intf->remove(&priv->dev, dev_ctx->context);
85*4882a593Smuzhiyun 			kfree(dev_ctx);
86*4882a593Smuzhiyun 			return;
87*4882a593Smuzhiyun 		}
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
mlx4_register_interface(struct mlx4_interface * intf)90*4882a593Smuzhiyun int mlx4_register_interface(struct mlx4_interface *intf)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	struct mlx4_priv *priv;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (!intf->add || !intf->remove)
95*4882a593Smuzhiyun 		return -EINVAL;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	mutex_lock(&intf_mutex);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	list_add_tail(&intf->list, &intf_list);
100*4882a593Smuzhiyun 	list_for_each_entry(priv, &dev_list, dev_list) {
101*4882a593Smuzhiyun 		if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
102*4882a593Smuzhiyun 			mlx4_dbg(&priv->dev,
103*4882a593Smuzhiyun 				 "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
104*4882a593Smuzhiyun 			intf->flags &= ~MLX4_INTFF_BONDING;
105*4882a593Smuzhiyun 		}
106*4882a593Smuzhiyun 		mlx4_add_device(intf, priv);
107*4882a593Smuzhiyun 	}
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	mutex_unlock(&intf_mutex);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	return 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_register_interface);
114*4882a593Smuzhiyun 
mlx4_unregister_interface(struct mlx4_interface * intf)115*4882a593Smuzhiyun void mlx4_unregister_interface(struct mlx4_interface *intf)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct mlx4_priv *priv;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	mutex_lock(&intf_mutex);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	list_for_each_entry(priv, &dev_list, dev_list)
122*4882a593Smuzhiyun 		mlx4_remove_device(intf, priv);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	list_del(&intf->list);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	mutex_unlock(&intf_mutex);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
129*4882a593Smuzhiyun 
mlx4_do_bond(struct mlx4_dev * dev,bool enable)130*4882a593Smuzhiyun int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	struct mlx4_priv *priv = mlx4_priv(dev);
133*4882a593Smuzhiyun 	struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
134*4882a593Smuzhiyun 	unsigned long flags;
135*4882a593Smuzhiyun 	int ret;
136*4882a593Smuzhiyun 	LIST_HEAD(bond_list);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
139*4882a593Smuzhiyun 		return -EOPNOTSUPP;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	ret = mlx4_disable_rx_port_check(dev, enable);
142*4882a593Smuzhiyun 	if (ret) {
143*4882a593Smuzhiyun 		mlx4_err(dev, "Fail to %s rx port check\n",
144*4882a593Smuzhiyun 			 enable ? "enable" : "disable");
145*4882a593Smuzhiyun 		return ret;
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 	if (enable) {
148*4882a593Smuzhiyun 		dev->flags |= MLX4_FLAG_BONDED;
149*4882a593Smuzhiyun 	} else {
150*4882a593Smuzhiyun 		ret = mlx4_virt2phy_port_map(dev, 1, 2);
151*4882a593Smuzhiyun 		if (ret) {
152*4882a593Smuzhiyun 			mlx4_err(dev, "Fail to reset port map\n");
153*4882a593Smuzhiyun 			return ret;
154*4882a593Smuzhiyun 		}
155*4882a593Smuzhiyun 		dev->flags &= ~MLX4_FLAG_BONDED;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->ctx_lock, flags);
159*4882a593Smuzhiyun 	list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
160*4882a593Smuzhiyun 		if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
161*4882a593Smuzhiyun 			list_add_tail(&dev_ctx->bond_list, &bond_list);
162*4882a593Smuzhiyun 			list_del(&dev_ctx->list);
163*4882a593Smuzhiyun 		}
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->ctx_lock, flags);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	list_for_each_entry(dev_ctx, &bond_list, bond_list) {
168*4882a593Smuzhiyun 		dev_ctx->intf->remove(dev, dev_ctx->context);
169*4882a593Smuzhiyun 		dev_ctx->context =  dev_ctx->intf->add(dev);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 		spin_lock_irqsave(&priv->ctx_lock, flags);
172*4882a593Smuzhiyun 		list_add_tail(&dev_ctx->list, &priv->ctx_list);
173*4882a593Smuzhiyun 		spin_unlock_irqrestore(&priv->ctx_lock, flags);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n",
176*4882a593Smuzhiyun 			 dev_ctx->intf->protocol, enable ?
177*4882a593Smuzhiyun 			 "enabled" : "disabled");
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 	return 0;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
mlx4_dispatch_event(struct mlx4_dev * dev,enum mlx4_dev_event type,unsigned long param)182*4882a593Smuzhiyun void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
183*4882a593Smuzhiyun 			 unsigned long param)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct mlx4_priv *priv = mlx4_priv(dev);
186*4882a593Smuzhiyun 	struct mlx4_device_context *dev_ctx;
187*4882a593Smuzhiyun 	unsigned long flags;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->ctx_lock, flags);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
192*4882a593Smuzhiyun 		if (dev_ctx->intf->event)
193*4882a593Smuzhiyun 			dev_ctx->intf->event(dev, dev_ctx->context, type, param);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->ctx_lock, flags);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
mlx4_register_device(struct mlx4_dev * dev)198*4882a593Smuzhiyun int mlx4_register_device(struct mlx4_dev *dev)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct mlx4_priv *priv = mlx4_priv(dev);
201*4882a593Smuzhiyun 	struct mlx4_interface *intf;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	mutex_lock(&intf_mutex);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
206*4882a593Smuzhiyun 	list_add_tail(&priv->dev_list, &dev_list);
207*4882a593Smuzhiyun 	list_for_each_entry(intf, &intf_list, list)
208*4882a593Smuzhiyun 		mlx4_add_device(intf, priv);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	mutex_unlock(&intf_mutex);
211*4882a593Smuzhiyun 	mlx4_start_catas_poll(dev);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	return 0;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
mlx4_unregister_device(struct mlx4_dev * dev)216*4882a593Smuzhiyun void mlx4_unregister_device(struct mlx4_dev *dev)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	struct mlx4_priv *priv = mlx4_priv(dev);
219*4882a593Smuzhiyun 	struct mlx4_interface *intf;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP))
222*4882a593Smuzhiyun 		return;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	mlx4_stop_catas_poll(dev);
225*4882a593Smuzhiyun 	if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
226*4882a593Smuzhiyun 	    mlx4_is_slave(dev)) {
227*4882a593Smuzhiyun 		/* In mlx4_remove_one on a VF */
228*4882a593Smuzhiyun 		u32 slave_read =
229*4882a593Smuzhiyun 			swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		if (mlx4_comm_internal_err(slave_read)) {
232*4882a593Smuzhiyun 			mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
233*4882a593Smuzhiyun 				 __func__);
234*4882a593Smuzhiyun 			mlx4_enter_error_state(dev->persist);
235*4882a593Smuzhiyun 		}
236*4882a593Smuzhiyun 	}
237*4882a593Smuzhiyun 	mutex_lock(&intf_mutex);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	list_for_each_entry(intf, &intf_list, list)
240*4882a593Smuzhiyun 		mlx4_remove_device(intf, priv);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	list_del(&priv->dev_list);
243*4882a593Smuzhiyun 	dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	mutex_unlock(&intf_mutex);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
mlx4_get_protocol_dev(struct mlx4_dev * dev,enum mlx4_protocol proto,int port)248*4882a593Smuzhiyun void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct mlx4_priv *priv = mlx4_priv(dev);
251*4882a593Smuzhiyun 	struct mlx4_device_context *dev_ctx;
252*4882a593Smuzhiyun 	unsigned long flags;
253*4882a593Smuzhiyun 	void *result = NULL;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->ctx_lock, flags);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
258*4882a593Smuzhiyun 		if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
259*4882a593Smuzhiyun 			result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
260*4882a593Smuzhiyun 			break;
261*4882a593Smuzhiyun 		}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->ctx_lock, flags);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	return result;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
268*4882a593Smuzhiyun 
mlx4_get_devlink_port(struct mlx4_dev * dev,int port)269*4882a593Smuzhiyun struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	return &info->devlink_port;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_get_devlink_port);
276