1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun * OpenIB.org BSD license below:
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun * without modification, are permitted provided that the following
12*4882a593Smuzhiyun * conditions are met:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * - Redistributions of source code must retain the above
15*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun * disclaimer.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun * provided with the distribution.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun * SOFTWARE.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <rdma/ib_mad.h>
34*4882a593Smuzhiyun #include <rdma/ib_smi.h>
35*4882a593Smuzhiyun #include <rdma/ib_cache.h>
36*4882a593Smuzhiyun #include <rdma/ib_sa.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <linux/mlx4/cmd.h>
39*4882a593Smuzhiyun #include <linux/rbtree.h>
40*4882a593Smuzhiyun #include <linux/delay.h>
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #include "mlx4_ib.h"
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define MAX_VFS 80
45*4882a593Smuzhiyun #define MAX_PEND_REQS_PER_FUNC 4
46*4882a593Smuzhiyun #define MAD_TIMEOUT_MS 2000
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
49*4882a593Smuzhiyun #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
50*4882a593Smuzhiyun #define mcg_warn_group(group, format, arg...) \
51*4882a593Smuzhiyun pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
52*4882a593Smuzhiyun (group)->name, group->demux->port, ## arg)
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define mcg_debug_group(group, format, arg...) \
55*4882a593Smuzhiyun pr_debug("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
56*4882a593Smuzhiyun (group)->name, (group)->demux->port, ## arg)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #define mcg_error_group(group, format, arg...) \
59*4882a593Smuzhiyun pr_err(" %16s: " format, (group)->name, ## arg)
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun static union ib_gid mgid0;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun static struct workqueue_struct *clean_wq;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun enum mcast_state {
67*4882a593Smuzhiyun MCAST_NOT_MEMBER = 0,
68*4882a593Smuzhiyun MCAST_MEMBER,
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun enum mcast_group_state {
72*4882a593Smuzhiyun MCAST_IDLE,
73*4882a593Smuzhiyun MCAST_JOIN_SENT,
74*4882a593Smuzhiyun MCAST_LEAVE_SENT,
75*4882a593Smuzhiyun MCAST_RESP_READY
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun struct mcast_member {
79*4882a593Smuzhiyun enum mcast_state state;
80*4882a593Smuzhiyun uint8_t join_state;
81*4882a593Smuzhiyun int num_pend_reqs;
82*4882a593Smuzhiyun struct list_head pending;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun struct ib_sa_mcmember_data {
86*4882a593Smuzhiyun union ib_gid mgid;
87*4882a593Smuzhiyun union ib_gid port_gid;
88*4882a593Smuzhiyun __be32 qkey;
89*4882a593Smuzhiyun __be16 mlid;
90*4882a593Smuzhiyun u8 mtusel_mtu;
91*4882a593Smuzhiyun u8 tclass;
92*4882a593Smuzhiyun __be16 pkey;
93*4882a593Smuzhiyun u8 ratesel_rate;
94*4882a593Smuzhiyun u8 lifetmsel_lifetm;
95*4882a593Smuzhiyun __be32 sl_flowlabel_hoplimit;
96*4882a593Smuzhiyun u8 scope_join_state;
97*4882a593Smuzhiyun u8 proxy_join;
98*4882a593Smuzhiyun u8 reserved[2];
99*4882a593Smuzhiyun } __packed __aligned(4);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun struct mcast_group {
102*4882a593Smuzhiyun struct ib_sa_mcmember_data rec;
103*4882a593Smuzhiyun struct rb_node node;
104*4882a593Smuzhiyun struct list_head mgid0_list;
105*4882a593Smuzhiyun struct mlx4_ib_demux_ctx *demux;
106*4882a593Smuzhiyun struct mcast_member func[MAX_VFS];
107*4882a593Smuzhiyun struct mutex lock;
108*4882a593Smuzhiyun struct work_struct work;
109*4882a593Smuzhiyun struct list_head pending_list;
110*4882a593Smuzhiyun int members[3];
111*4882a593Smuzhiyun enum mcast_group_state state;
112*4882a593Smuzhiyun enum mcast_group_state prev_state;
113*4882a593Smuzhiyun struct ib_sa_mad response_sa_mad;
114*4882a593Smuzhiyun __be64 last_req_tid;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun char name[33]; /* MGID string */
117*4882a593Smuzhiyun struct device_attribute dentry;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* refcount is the reference count for the following:
120*4882a593Smuzhiyun 1. Each queued request
121*4882a593Smuzhiyun 2. Each invocation of the worker thread
122*4882a593Smuzhiyun 3. Membership of the port at the SA
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun atomic_t refcount;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* delayed work to clean pending SM request */
127*4882a593Smuzhiyun struct delayed_work timeout_work;
128*4882a593Smuzhiyun struct list_head cleanup_list;
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun struct mcast_req {
132*4882a593Smuzhiyun int func;
133*4882a593Smuzhiyun struct ib_sa_mad sa_mad;
134*4882a593Smuzhiyun struct list_head group_list;
135*4882a593Smuzhiyun struct list_head func_list;
136*4882a593Smuzhiyun struct mcast_group *group;
137*4882a593Smuzhiyun int clean;
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun #define safe_atomic_dec(ref) \
142*4882a593Smuzhiyun do {\
143*4882a593Smuzhiyun if (atomic_dec_and_test(ref)) \
144*4882a593Smuzhiyun mcg_warn_group(group, "did not expect to reach zero\n"); \
145*4882a593Smuzhiyun } while (0)
146*4882a593Smuzhiyun
get_state_string(enum mcast_group_state state)147*4882a593Smuzhiyun static const char *get_state_string(enum mcast_group_state state)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun switch (state) {
150*4882a593Smuzhiyun case MCAST_IDLE:
151*4882a593Smuzhiyun return "MCAST_IDLE";
152*4882a593Smuzhiyun case MCAST_JOIN_SENT:
153*4882a593Smuzhiyun return "MCAST_JOIN_SENT";
154*4882a593Smuzhiyun case MCAST_LEAVE_SENT:
155*4882a593Smuzhiyun return "MCAST_LEAVE_SENT";
156*4882a593Smuzhiyun case MCAST_RESP_READY:
157*4882a593Smuzhiyun return "MCAST_RESP_READY";
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun return "Invalid State";
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
mcast_find(struct mlx4_ib_demux_ctx * ctx,union ib_gid * mgid)162*4882a593Smuzhiyun static struct mcast_group *mcast_find(struct mlx4_ib_demux_ctx *ctx,
163*4882a593Smuzhiyun union ib_gid *mgid)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct rb_node *node = ctx->mcg_table.rb_node;
166*4882a593Smuzhiyun struct mcast_group *group;
167*4882a593Smuzhiyun int ret;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun while (node) {
170*4882a593Smuzhiyun group = rb_entry(node, struct mcast_group, node);
171*4882a593Smuzhiyun ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
172*4882a593Smuzhiyun if (!ret)
173*4882a593Smuzhiyun return group;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (ret < 0)
176*4882a593Smuzhiyun node = node->rb_left;
177*4882a593Smuzhiyun else
178*4882a593Smuzhiyun node = node->rb_right;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun return NULL;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
mcast_insert(struct mlx4_ib_demux_ctx * ctx,struct mcast_group * group)183*4882a593Smuzhiyun static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx,
184*4882a593Smuzhiyun struct mcast_group *group)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun struct rb_node **link = &ctx->mcg_table.rb_node;
187*4882a593Smuzhiyun struct rb_node *parent = NULL;
188*4882a593Smuzhiyun struct mcast_group *cur_group;
189*4882a593Smuzhiyun int ret;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun while (*link) {
192*4882a593Smuzhiyun parent = *link;
193*4882a593Smuzhiyun cur_group = rb_entry(parent, struct mcast_group, node);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
196*4882a593Smuzhiyun sizeof group->rec.mgid);
197*4882a593Smuzhiyun if (ret < 0)
198*4882a593Smuzhiyun link = &(*link)->rb_left;
199*4882a593Smuzhiyun else if (ret > 0)
200*4882a593Smuzhiyun link = &(*link)->rb_right;
201*4882a593Smuzhiyun else
202*4882a593Smuzhiyun return cur_group;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun rb_link_node(&group->node, parent, link);
205*4882a593Smuzhiyun rb_insert_color(&group->node, &ctx->mcg_table);
206*4882a593Smuzhiyun return NULL;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
send_mad_to_wire(struct mlx4_ib_demux_ctx * ctx,struct ib_mad * mad)209*4882a593Smuzhiyun static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun struct mlx4_ib_dev *dev = ctx->dev;
212*4882a593Smuzhiyun struct rdma_ah_attr ah_attr;
213*4882a593Smuzhiyun unsigned long flags;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun spin_lock_irqsave(&dev->sm_lock, flags);
216*4882a593Smuzhiyun if (!dev->sm_ah[ctx->port - 1]) {
217*4882a593Smuzhiyun /* port is not yet Active, sm_ah not ready */
218*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->sm_lock, flags);
219*4882a593Smuzhiyun return -EAGAIN;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
222*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->sm_lock, flags);
223*4882a593Smuzhiyun return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
224*4882a593Smuzhiyun ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
225*4882a593Smuzhiyun &ah_attr, NULL, 0xffff, mad);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
send_mad_to_slave(int slave,struct mlx4_ib_demux_ctx * ctx,struct ib_mad * mad)228*4882a593Smuzhiyun static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
229*4882a593Smuzhiyun struct ib_mad *mad)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun struct mlx4_ib_dev *dev = ctx->dev;
232*4882a593Smuzhiyun struct ib_mad_agent *agent = dev->send_agent[ctx->port - 1][1];
233*4882a593Smuzhiyun struct ib_wc wc;
234*4882a593Smuzhiyun struct rdma_ah_attr ah_attr;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* Our agent might not yet be registered when mads start to arrive */
237*4882a593Smuzhiyun if (!agent)
238*4882a593Smuzhiyun return -EAGAIN;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun rdma_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index))
243*4882a593Smuzhiyun return -EINVAL;
244*4882a593Smuzhiyun wc.sl = 0;
245*4882a593Smuzhiyun wc.dlid_path_bits = 0;
246*4882a593Smuzhiyun wc.port_num = ctx->port;
247*4882a593Smuzhiyun wc.slid = rdma_ah_get_dlid(&ah_attr); /* opensm lid */
248*4882a593Smuzhiyun wc.src_qp = 1;
249*4882a593Smuzhiyun return mlx4_ib_send_to_slave(dev, slave, ctx->port, IB_QPT_GSI, &wc, NULL, mad);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
send_join_to_wire(struct mcast_group * group,struct ib_sa_mad * sa_mad)252*4882a593Smuzhiyun static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct ib_sa_mad mad;
255*4882a593Smuzhiyun struct ib_sa_mcmember_data *sa_mad_data = (struct ib_sa_mcmember_data *)&mad.data;
256*4882a593Smuzhiyun int ret;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* we rely on a mad request as arrived from a VF */
259*4882a593Smuzhiyun memcpy(&mad, sa_mad, sizeof mad);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* fix port GID to be the real one (slave 0) */
262*4882a593Smuzhiyun sa_mad_data->port_gid.global.interface_id = group->demux->guid_cache[0];
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* assign our own TID */
265*4882a593Smuzhiyun mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
266*4882a593Smuzhiyun group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
269*4882a593Smuzhiyun /* set timeout handler */
270*4882a593Smuzhiyun if (!ret) {
271*4882a593Smuzhiyun /* calls mlx4_ib_mcg_timeout_handler */
272*4882a593Smuzhiyun queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
273*4882a593Smuzhiyun msecs_to_jiffies(MAD_TIMEOUT_MS));
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun return ret;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
send_leave_to_wire(struct mcast_group * group,u8 join_state)279*4882a593Smuzhiyun static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun struct ib_sa_mad mad;
282*4882a593Smuzhiyun struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
283*4882a593Smuzhiyun int ret;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun memset(&mad, 0, sizeof mad);
286*4882a593Smuzhiyun mad.mad_hdr.base_version = 1;
287*4882a593Smuzhiyun mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
288*4882a593Smuzhiyun mad.mad_hdr.class_version = 2;
289*4882a593Smuzhiyun mad.mad_hdr.method = IB_SA_METHOD_DELETE;
290*4882a593Smuzhiyun mad.mad_hdr.status = cpu_to_be16(0);
291*4882a593Smuzhiyun mad.mad_hdr.class_specific = cpu_to_be16(0);
292*4882a593Smuzhiyun mad.mad_hdr.tid = mlx4_ib_get_new_demux_tid(group->demux);
293*4882a593Smuzhiyun group->last_req_tid = mad.mad_hdr.tid; /* keep it for later validation */
294*4882a593Smuzhiyun mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
295*4882a593Smuzhiyun mad.mad_hdr.attr_mod = cpu_to_be32(0);
296*4882a593Smuzhiyun mad.sa_hdr.sm_key = 0x0;
297*4882a593Smuzhiyun mad.sa_hdr.attr_offset = cpu_to_be16(7);
298*4882a593Smuzhiyun mad.sa_hdr.comp_mask = IB_SA_MCMEMBER_REC_MGID |
299*4882a593Smuzhiyun IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_JOIN_STATE;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun *sa_data = group->rec;
302*4882a593Smuzhiyun sa_data->scope_join_state = join_state;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun ret = send_mad_to_wire(group->demux, (struct ib_mad *)&mad);
305*4882a593Smuzhiyun if (ret)
306*4882a593Smuzhiyun group->state = MCAST_IDLE;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* set timeout handler */
309*4882a593Smuzhiyun if (!ret) {
310*4882a593Smuzhiyun /* calls mlx4_ib_mcg_timeout_handler */
311*4882a593Smuzhiyun queue_delayed_work(group->demux->mcg_wq, &group->timeout_work,
312*4882a593Smuzhiyun msecs_to_jiffies(MAD_TIMEOUT_MS));
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun return ret;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
send_reply_to_slave(int slave,struct mcast_group * group,struct ib_sa_mad * req_sa_mad,u16 status)318*4882a593Smuzhiyun static int send_reply_to_slave(int slave, struct mcast_group *group,
319*4882a593Smuzhiyun struct ib_sa_mad *req_sa_mad, u16 status)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct ib_sa_mad mad;
322*4882a593Smuzhiyun struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)&mad.data;
323*4882a593Smuzhiyun struct ib_sa_mcmember_data *req_sa_data = (struct ib_sa_mcmember_data *)&req_sa_mad->data;
324*4882a593Smuzhiyun int ret;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun memset(&mad, 0, sizeof mad);
327*4882a593Smuzhiyun mad.mad_hdr.base_version = 1;
328*4882a593Smuzhiyun mad.mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
329*4882a593Smuzhiyun mad.mad_hdr.class_version = 2;
330*4882a593Smuzhiyun mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
331*4882a593Smuzhiyun mad.mad_hdr.status = cpu_to_be16(status);
332*4882a593Smuzhiyun mad.mad_hdr.class_specific = cpu_to_be16(0);
333*4882a593Smuzhiyun mad.mad_hdr.tid = req_sa_mad->mad_hdr.tid;
334*4882a593Smuzhiyun *(u8 *)&mad.mad_hdr.tid = 0; /* resetting tid to 0 */
335*4882a593Smuzhiyun mad.mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
336*4882a593Smuzhiyun mad.mad_hdr.attr_mod = cpu_to_be32(0);
337*4882a593Smuzhiyun mad.sa_hdr.sm_key = req_sa_mad->sa_hdr.sm_key;
338*4882a593Smuzhiyun mad.sa_hdr.attr_offset = cpu_to_be16(7);
339*4882a593Smuzhiyun mad.sa_hdr.comp_mask = 0; /* ignored on responses, see IBTA spec */
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun *sa_data = group->rec;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /* reconstruct VF's requested join_state and port_gid */
344*4882a593Smuzhiyun sa_data->scope_join_state &= 0xf0;
345*4882a593Smuzhiyun sa_data->scope_join_state |= (group->func[slave].join_state & 0x0f);
346*4882a593Smuzhiyun memcpy(&sa_data->port_gid, &req_sa_data->port_gid, sizeof req_sa_data->port_gid);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun ret = send_mad_to_slave(slave, group->demux, (struct ib_mad *)&mad);
349*4882a593Smuzhiyun return ret;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
check_selector(ib_sa_comp_mask comp_mask,ib_sa_comp_mask selector_mask,ib_sa_comp_mask value_mask,u8 src_value,u8 dst_value)352*4882a593Smuzhiyun static int check_selector(ib_sa_comp_mask comp_mask,
353*4882a593Smuzhiyun ib_sa_comp_mask selector_mask,
354*4882a593Smuzhiyun ib_sa_comp_mask value_mask,
355*4882a593Smuzhiyun u8 src_value, u8 dst_value)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun int err;
358*4882a593Smuzhiyun u8 selector = dst_value >> 6;
359*4882a593Smuzhiyun dst_value &= 0x3f;
360*4882a593Smuzhiyun src_value &= 0x3f;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
363*4882a593Smuzhiyun return 0;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun switch (selector) {
366*4882a593Smuzhiyun case IB_SA_GT:
367*4882a593Smuzhiyun err = (src_value <= dst_value);
368*4882a593Smuzhiyun break;
369*4882a593Smuzhiyun case IB_SA_LT:
370*4882a593Smuzhiyun err = (src_value >= dst_value);
371*4882a593Smuzhiyun break;
372*4882a593Smuzhiyun case IB_SA_EQ:
373*4882a593Smuzhiyun err = (src_value != dst_value);
374*4882a593Smuzhiyun break;
375*4882a593Smuzhiyun default:
376*4882a593Smuzhiyun err = 0;
377*4882a593Smuzhiyun break;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return err;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
cmp_rec(struct ib_sa_mcmember_data * src,struct ib_sa_mcmember_data * dst,ib_sa_comp_mask comp_mask)383*4882a593Smuzhiyun static u16 cmp_rec(struct ib_sa_mcmember_data *src,
384*4882a593Smuzhiyun struct ib_sa_mcmember_data *dst, ib_sa_comp_mask comp_mask)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun /* src is group record, dst is request record */
387*4882a593Smuzhiyun /* MGID must already match */
388*4882a593Smuzhiyun /* Port_GID we always replace to our Port_GID, so it is a match */
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun #define MAD_STATUS_REQ_INVALID 0x0200
391*4882a593Smuzhiyun if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
392*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
393*4882a593Smuzhiyun if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
394*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
395*4882a593Smuzhiyun if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
396*4882a593Smuzhiyun IB_SA_MCMEMBER_REC_MTU,
397*4882a593Smuzhiyun src->mtusel_mtu, dst->mtusel_mtu))
398*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
399*4882a593Smuzhiyun if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
400*4882a593Smuzhiyun src->tclass != dst->tclass)
401*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
402*4882a593Smuzhiyun if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
403*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
404*4882a593Smuzhiyun if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
405*4882a593Smuzhiyun IB_SA_MCMEMBER_REC_RATE,
406*4882a593Smuzhiyun src->ratesel_rate, dst->ratesel_rate))
407*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
408*4882a593Smuzhiyun if (check_selector(comp_mask,
409*4882a593Smuzhiyun IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
410*4882a593Smuzhiyun IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
411*4882a593Smuzhiyun src->lifetmsel_lifetm, dst->lifetmsel_lifetm))
412*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
413*4882a593Smuzhiyun if (comp_mask & IB_SA_MCMEMBER_REC_SL &&
414*4882a593Smuzhiyun (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0xf0000000) !=
415*4882a593Smuzhiyun (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0xf0000000))
416*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
417*4882a593Smuzhiyun if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
418*4882a593Smuzhiyun (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x0fffff00) !=
419*4882a593Smuzhiyun (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x0fffff00))
420*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
421*4882a593Smuzhiyun if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
422*4882a593Smuzhiyun (be32_to_cpu(src->sl_flowlabel_hoplimit) & 0x000000ff) !=
423*4882a593Smuzhiyun (be32_to_cpu(dst->sl_flowlabel_hoplimit) & 0x000000ff))
424*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
425*4882a593Smuzhiyun if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE &&
426*4882a593Smuzhiyun (src->scope_join_state & 0xf0) !=
427*4882a593Smuzhiyun (dst->scope_join_state & 0xf0))
428*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /* join_state checked separately, proxy_join ignored */
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun return 0;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* release group, return 1 if this was last release and group is destroyed
436*4882a593Smuzhiyun * timout work is canceled sync */
release_group(struct mcast_group * group,int from_timeout_handler)437*4882a593Smuzhiyun static int release_group(struct mcast_group *group, int from_timeout_handler)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun struct mlx4_ib_demux_ctx *ctx = group->demux;
440*4882a593Smuzhiyun int nzgroup;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun mutex_lock(&ctx->mcg_table_lock);
443*4882a593Smuzhiyun mutex_lock(&group->lock);
444*4882a593Smuzhiyun if (atomic_dec_and_test(&group->refcount)) {
445*4882a593Smuzhiyun if (!from_timeout_handler) {
446*4882a593Smuzhiyun if (group->state != MCAST_IDLE &&
447*4882a593Smuzhiyun !cancel_delayed_work(&group->timeout_work)) {
448*4882a593Smuzhiyun atomic_inc(&group->refcount);
449*4882a593Smuzhiyun mutex_unlock(&group->lock);
450*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
451*4882a593Smuzhiyun return 0;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun nzgroup = memcmp(&group->rec.mgid, &mgid0, sizeof mgid0);
456*4882a593Smuzhiyun if (nzgroup)
457*4882a593Smuzhiyun del_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
458*4882a593Smuzhiyun if (!list_empty(&group->pending_list))
459*4882a593Smuzhiyun mcg_warn_group(group, "releasing a group with non empty pending list\n");
460*4882a593Smuzhiyun if (nzgroup)
461*4882a593Smuzhiyun rb_erase(&group->node, &ctx->mcg_table);
462*4882a593Smuzhiyun list_del_init(&group->mgid0_list);
463*4882a593Smuzhiyun mutex_unlock(&group->lock);
464*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
465*4882a593Smuzhiyun kfree(group);
466*4882a593Smuzhiyun return 1;
467*4882a593Smuzhiyun } else {
468*4882a593Smuzhiyun mutex_unlock(&group->lock);
469*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun return 0;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
adjust_membership(struct mcast_group * group,u8 join_state,int inc)474*4882a593Smuzhiyun static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun int i;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun for (i = 0; i < 3; i++, join_state >>= 1)
479*4882a593Smuzhiyun if (join_state & 0x1)
480*4882a593Smuzhiyun group->members[i] += inc;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
get_leave_state(struct mcast_group * group)483*4882a593Smuzhiyun static u8 get_leave_state(struct mcast_group *group)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun u8 leave_state = 0;
486*4882a593Smuzhiyun int i;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun for (i = 0; i < 3; i++)
489*4882a593Smuzhiyun if (!group->members[i])
490*4882a593Smuzhiyun leave_state |= (1 << i);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun return leave_state & (group->rec.scope_join_state & 0xf);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
join_group(struct mcast_group * group,int slave,u8 join_mask)495*4882a593Smuzhiyun static int join_group(struct mcast_group *group, int slave, u8 join_mask)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun int ret = 0;
498*4882a593Smuzhiyun u8 join_state;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* remove bits that slave is already member of, and adjust */
501*4882a593Smuzhiyun join_state = join_mask & (~group->func[slave].join_state);
502*4882a593Smuzhiyun adjust_membership(group, join_state, 1);
503*4882a593Smuzhiyun group->func[slave].join_state |= join_state;
504*4882a593Smuzhiyun if (group->func[slave].state != MCAST_MEMBER && join_state) {
505*4882a593Smuzhiyun group->func[slave].state = MCAST_MEMBER;
506*4882a593Smuzhiyun ret = 1;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun return ret;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
leave_group(struct mcast_group * group,int slave,u8 leave_state)511*4882a593Smuzhiyun static int leave_group(struct mcast_group *group, int slave, u8 leave_state)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun int ret = 0;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun adjust_membership(group, leave_state, -1);
516*4882a593Smuzhiyun group->func[slave].join_state &= ~leave_state;
517*4882a593Smuzhiyun if (!group->func[slave].join_state) {
518*4882a593Smuzhiyun group->func[slave].state = MCAST_NOT_MEMBER;
519*4882a593Smuzhiyun ret = 1;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun return ret;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
check_leave(struct mcast_group * group,int slave,u8 leave_mask)524*4882a593Smuzhiyun static int check_leave(struct mcast_group *group, int slave, u8 leave_mask)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun if (group->func[slave].state != MCAST_MEMBER)
527*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* make sure we're not deleting unset bits */
530*4882a593Smuzhiyun if (~group->func[slave].join_state & leave_mask)
531*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (!leave_mask)
534*4882a593Smuzhiyun return MAD_STATUS_REQ_INVALID;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun return 0;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
mlx4_ib_mcg_timeout_handler(struct work_struct * work)539*4882a593Smuzhiyun static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun struct delayed_work *delay = to_delayed_work(work);
542*4882a593Smuzhiyun struct mcast_group *group;
543*4882a593Smuzhiyun struct mcast_req *req = NULL;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun group = container_of(delay, typeof(*group), timeout_work);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun mutex_lock(&group->lock);
548*4882a593Smuzhiyun if (group->state == MCAST_JOIN_SENT) {
549*4882a593Smuzhiyun if (!list_empty(&group->pending_list)) {
550*4882a593Smuzhiyun req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
551*4882a593Smuzhiyun list_del(&req->group_list);
552*4882a593Smuzhiyun list_del(&req->func_list);
553*4882a593Smuzhiyun --group->func[req->func].num_pend_reqs;
554*4882a593Smuzhiyun mutex_unlock(&group->lock);
555*4882a593Smuzhiyun kfree(req);
556*4882a593Smuzhiyun if (memcmp(&group->rec.mgid, &mgid0, sizeof mgid0)) {
557*4882a593Smuzhiyun if (release_group(group, 1))
558*4882a593Smuzhiyun return;
559*4882a593Smuzhiyun } else {
560*4882a593Smuzhiyun kfree(group);
561*4882a593Smuzhiyun return;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun mutex_lock(&group->lock);
564*4882a593Smuzhiyun } else
565*4882a593Smuzhiyun mcg_warn_group(group, "DRIVER BUG\n");
566*4882a593Smuzhiyun } else if (group->state == MCAST_LEAVE_SENT) {
567*4882a593Smuzhiyun if (group->rec.scope_join_state & 0xf)
568*4882a593Smuzhiyun group->rec.scope_join_state &= 0xf0;
569*4882a593Smuzhiyun group->state = MCAST_IDLE;
570*4882a593Smuzhiyun mutex_unlock(&group->lock);
571*4882a593Smuzhiyun if (release_group(group, 1))
572*4882a593Smuzhiyun return;
573*4882a593Smuzhiyun mutex_lock(&group->lock);
574*4882a593Smuzhiyun } else
575*4882a593Smuzhiyun mcg_warn_group(group, "invalid state %s\n", get_state_string(group->state));
576*4882a593Smuzhiyun group->state = MCAST_IDLE;
577*4882a593Smuzhiyun atomic_inc(&group->refcount);
578*4882a593Smuzhiyun if (!queue_work(group->demux->mcg_wq, &group->work))
579*4882a593Smuzhiyun safe_atomic_dec(&group->refcount);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun mutex_unlock(&group->lock);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
handle_leave_req(struct mcast_group * group,u8 leave_mask,struct mcast_req * req)584*4882a593Smuzhiyun static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
585*4882a593Smuzhiyun struct mcast_req *req)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun u16 status;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (req->clean)
590*4882a593Smuzhiyun leave_mask = group->func[req->func].join_state;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun status = check_leave(group, req->func, leave_mask);
593*4882a593Smuzhiyun if (!status)
594*4882a593Smuzhiyun leave_group(group, req->func, leave_mask);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun if (!req->clean)
597*4882a593Smuzhiyun send_reply_to_slave(req->func, group, &req->sa_mad, status);
598*4882a593Smuzhiyun --group->func[req->func].num_pend_reqs;
599*4882a593Smuzhiyun list_del(&req->group_list);
600*4882a593Smuzhiyun list_del(&req->func_list);
601*4882a593Smuzhiyun kfree(req);
602*4882a593Smuzhiyun return 1;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
handle_join_req(struct mcast_group * group,u8 join_mask,struct mcast_req * req)605*4882a593Smuzhiyun static int handle_join_req(struct mcast_group *group, u8 join_mask,
606*4882a593Smuzhiyun struct mcast_req *req)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun u8 group_join_state = group->rec.scope_join_state & 0xf;
609*4882a593Smuzhiyun int ref = 0;
610*4882a593Smuzhiyun u16 status;
611*4882a593Smuzhiyun struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun if (join_mask == (group_join_state & join_mask)) {
614*4882a593Smuzhiyun /* port's membership need not change */
615*4882a593Smuzhiyun status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask);
616*4882a593Smuzhiyun if (!status)
617*4882a593Smuzhiyun join_group(group, req->func, join_mask);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun --group->func[req->func].num_pend_reqs;
620*4882a593Smuzhiyun send_reply_to_slave(req->func, group, &req->sa_mad, status);
621*4882a593Smuzhiyun list_del(&req->group_list);
622*4882a593Smuzhiyun list_del(&req->func_list);
623*4882a593Smuzhiyun kfree(req);
624*4882a593Smuzhiyun ++ref;
625*4882a593Smuzhiyun } else {
626*4882a593Smuzhiyun /* port's membership needs to be updated */
627*4882a593Smuzhiyun group->prev_state = group->state;
628*4882a593Smuzhiyun if (send_join_to_wire(group, &req->sa_mad)) {
629*4882a593Smuzhiyun --group->func[req->func].num_pend_reqs;
630*4882a593Smuzhiyun list_del(&req->group_list);
631*4882a593Smuzhiyun list_del(&req->func_list);
632*4882a593Smuzhiyun kfree(req);
633*4882a593Smuzhiyun ref = 1;
634*4882a593Smuzhiyun group->state = group->prev_state;
635*4882a593Smuzhiyun } else
636*4882a593Smuzhiyun group->state = MCAST_JOIN_SENT;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun return ref;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
mlx4_ib_mcg_work_handler(struct work_struct * work)642*4882a593Smuzhiyun static void mlx4_ib_mcg_work_handler(struct work_struct *work)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct mcast_group *group;
645*4882a593Smuzhiyun struct mcast_req *req = NULL;
646*4882a593Smuzhiyun struct ib_sa_mcmember_data *sa_data;
647*4882a593Smuzhiyun u8 req_join_state;
648*4882a593Smuzhiyun int rc = 1; /* release_count - this is for the scheduled work */
649*4882a593Smuzhiyun u16 status;
650*4882a593Smuzhiyun u8 method;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun group = container_of(work, typeof(*group), work);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun mutex_lock(&group->lock);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /* First, let's see if a response from SM is waiting regarding this group.
657*4882a593Smuzhiyun * If so, we need to update the group's REC. If this is a bad response, we
658*4882a593Smuzhiyun * may need to send a bad response to a VF waiting for it. If VF is waiting
659*4882a593Smuzhiyun * and this is a good response, the VF will be answered later in this func. */
660*4882a593Smuzhiyun if (group->state == MCAST_RESP_READY) {
661*4882a593Smuzhiyun /* cancels mlx4_ib_mcg_timeout_handler */
662*4882a593Smuzhiyun cancel_delayed_work(&group->timeout_work);
663*4882a593Smuzhiyun status = be16_to_cpu(group->response_sa_mad.mad_hdr.status);
664*4882a593Smuzhiyun method = group->response_sa_mad.mad_hdr.method;
665*4882a593Smuzhiyun if (group->last_req_tid != group->response_sa_mad.mad_hdr.tid) {
666*4882a593Smuzhiyun mcg_warn_group(group, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
667*4882a593Smuzhiyun be64_to_cpu(group->response_sa_mad.mad_hdr.tid),
668*4882a593Smuzhiyun be64_to_cpu(group->last_req_tid));
669*4882a593Smuzhiyun group->state = group->prev_state;
670*4882a593Smuzhiyun goto process_requests;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun if (status) {
673*4882a593Smuzhiyun if (!list_empty(&group->pending_list))
674*4882a593Smuzhiyun req = list_first_entry(&group->pending_list,
675*4882a593Smuzhiyun struct mcast_req, group_list);
676*4882a593Smuzhiyun if (method == IB_MGMT_METHOD_GET_RESP) {
677*4882a593Smuzhiyun if (req) {
678*4882a593Smuzhiyun send_reply_to_slave(req->func, group, &req->sa_mad, status);
679*4882a593Smuzhiyun --group->func[req->func].num_pend_reqs;
680*4882a593Smuzhiyun list_del(&req->group_list);
681*4882a593Smuzhiyun list_del(&req->func_list);
682*4882a593Smuzhiyun kfree(req);
683*4882a593Smuzhiyun ++rc;
684*4882a593Smuzhiyun } else
685*4882a593Smuzhiyun mcg_warn_group(group, "no request for failed join\n");
686*4882a593Smuzhiyun } else if (method == IB_SA_METHOD_DELETE_RESP && group->demux->flushing)
687*4882a593Smuzhiyun ++rc;
688*4882a593Smuzhiyun } else {
689*4882a593Smuzhiyun u8 resp_join_state;
690*4882a593Smuzhiyun u8 cur_join_state;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun resp_join_state = ((struct ib_sa_mcmember_data *)
693*4882a593Smuzhiyun group->response_sa_mad.data)->scope_join_state & 0xf;
694*4882a593Smuzhiyun cur_join_state = group->rec.scope_join_state & 0xf;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (method == IB_MGMT_METHOD_GET_RESP) {
697*4882a593Smuzhiyun /* successfull join */
698*4882a593Smuzhiyun if (!cur_join_state && resp_join_state)
699*4882a593Smuzhiyun --rc;
700*4882a593Smuzhiyun } else if (!resp_join_state)
701*4882a593Smuzhiyun ++rc;
702*4882a593Smuzhiyun memcpy(&group->rec, group->response_sa_mad.data, sizeof group->rec);
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun group->state = MCAST_IDLE;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun process_requests:
708*4882a593Smuzhiyun /* We should now go over pending join/leave requests, as long as we are idle. */
709*4882a593Smuzhiyun while (!list_empty(&group->pending_list) && group->state == MCAST_IDLE) {
710*4882a593Smuzhiyun req = list_first_entry(&group->pending_list, struct mcast_req,
711*4882a593Smuzhiyun group_list);
712*4882a593Smuzhiyun sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data;
713*4882a593Smuzhiyun req_join_state = sa_data->scope_join_state & 0xf;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /* For a leave request, we will immediately answer the VF, and
716*4882a593Smuzhiyun * update our internal counters. The actual leave will be sent
717*4882a593Smuzhiyun * to SM later, if at all needed. We dequeue the request now. */
718*4882a593Smuzhiyun if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE)
719*4882a593Smuzhiyun rc += handle_leave_req(group, req_join_state, req);
720*4882a593Smuzhiyun else
721*4882a593Smuzhiyun rc += handle_join_req(group, req_join_state, req);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /* Handle leaves */
725*4882a593Smuzhiyun if (group->state == MCAST_IDLE) {
726*4882a593Smuzhiyun req_join_state = get_leave_state(group);
727*4882a593Smuzhiyun if (req_join_state) {
728*4882a593Smuzhiyun group->rec.scope_join_state &= ~req_join_state;
729*4882a593Smuzhiyun group->prev_state = group->state;
730*4882a593Smuzhiyun if (send_leave_to_wire(group, req_join_state)) {
731*4882a593Smuzhiyun group->state = group->prev_state;
732*4882a593Smuzhiyun ++rc;
733*4882a593Smuzhiyun } else
734*4882a593Smuzhiyun group->state = MCAST_LEAVE_SENT;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun if (!list_empty(&group->pending_list) && group->state == MCAST_IDLE)
739*4882a593Smuzhiyun goto process_requests;
740*4882a593Smuzhiyun mutex_unlock(&group->lock);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun while (rc--)
743*4882a593Smuzhiyun release_group(group, 0);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
search_relocate_mgid0_group(struct mlx4_ib_demux_ctx * ctx,__be64 tid,union ib_gid * new_mgid)746*4882a593Smuzhiyun static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx *ctx,
747*4882a593Smuzhiyun __be64 tid,
748*4882a593Smuzhiyun union ib_gid *new_mgid)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun struct mcast_group *group = NULL, *cur_group, *n;
751*4882a593Smuzhiyun struct mcast_req *req;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun mutex_lock(&ctx->mcg_table_lock);
754*4882a593Smuzhiyun list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) {
755*4882a593Smuzhiyun mutex_lock(&group->lock);
756*4882a593Smuzhiyun if (group->last_req_tid == tid) {
757*4882a593Smuzhiyun if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
758*4882a593Smuzhiyun group->rec.mgid = *new_mgid;
759*4882a593Smuzhiyun sprintf(group->name, "%016llx%016llx",
760*4882a593Smuzhiyun be64_to_cpu(group->rec.mgid.global.subnet_prefix),
761*4882a593Smuzhiyun be64_to_cpu(group->rec.mgid.global.interface_id));
762*4882a593Smuzhiyun list_del_init(&group->mgid0_list);
763*4882a593Smuzhiyun cur_group = mcast_insert(ctx, group);
764*4882a593Smuzhiyun if (cur_group) {
765*4882a593Smuzhiyun /* A race between our code and SM. Silently cleaning the new one */
766*4882a593Smuzhiyun req = list_first_entry(&group->pending_list,
767*4882a593Smuzhiyun struct mcast_req, group_list);
768*4882a593Smuzhiyun --group->func[req->func].num_pend_reqs;
769*4882a593Smuzhiyun list_del(&req->group_list);
770*4882a593Smuzhiyun list_del(&req->func_list);
771*4882a593Smuzhiyun kfree(req);
772*4882a593Smuzhiyun mutex_unlock(&group->lock);
773*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
774*4882a593Smuzhiyun release_group(group, 0);
775*4882a593Smuzhiyun return NULL;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun atomic_inc(&group->refcount);
779*4882a593Smuzhiyun add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
780*4882a593Smuzhiyun mutex_unlock(&group->lock);
781*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
782*4882a593Smuzhiyun return group;
783*4882a593Smuzhiyun } else {
784*4882a593Smuzhiyun struct mcast_req *tmp1, *tmp2;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun list_del(&group->mgid0_list);
787*4882a593Smuzhiyun if (!list_empty(&group->pending_list) && group->state != MCAST_IDLE)
788*4882a593Smuzhiyun cancel_delayed_work_sync(&group->timeout_work);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun list_for_each_entry_safe(tmp1, tmp2, &group->pending_list, group_list) {
791*4882a593Smuzhiyun list_del(&tmp1->group_list);
792*4882a593Smuzhiyun kfree(tmp1);
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun mutex_unlock(&group->lock);
795*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
796*4882a593Smuzhiyun kfree(group);
797*4882a593Smuzhiyun return NULL;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun mutex_unlock(&group->lock);
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun return NULL;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun static ssize_t sysfs_show_group(struct device *dev,
808*4882a593Smuzhiyun struct device_attribute *attr, char *buf);
809*4882a593Smuzhiyun
acquire_group(struct mlx4_ib_demux_ctx * ctx,union ib_gid * mgid,int create)810*4882a593Smuzhiyun static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
811*4882a593Smuzhiyun union ib_gid *mgid, int create)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun struct mcast_group *group, *cur_group;
814*4882a593Smuzhiyun int is_mgid0;
815*4882a593Smuzhiyun int i;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
818*4882a593Smuzhiyun if (!is_mgid0) {
819*4882a593Smuzhiyun group = mcast_find(ctx, mgid);
820*4882a593Smuzhiyun if (group)
821*4882a593Smuzhiyun goto found;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun if (!create)
825*4882a593Smuzhiyun return ERR_PTR(-ENOENT);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun group = kzalloc(sizeof(*group), GFP_KERNEL);
828*4882a593Smuzhiyun if (!group)
829*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun group->demux = ctx;
832*4882a593Smuzhiyun group->rec.mgid = *mgid;
833*4882a593Smuzhiyun INIT_LIST_HEAD(&group->pending_list);
834*4882a593Smuzhiyun INIT_LIST_HEAD(&group->mgid0_list);
835*4882a593Smuzhiyun for (i = 0; i < MAX_VFS; ++i)
836*4882a593Smuzhiyun INIT_LIST_HEAD(&group->func[i].pending);
837*4882a593Smuzhiyun INIT_WORK(&group->work, mlx4_ib_mcg_work_handler);
838*4882a593Smuzhiyun INIT_DELAYED_WORK(&group->timeout_work, mlx4_ib_mcg_timeout_handler);
839*4882a593Smuzhiyun mutex_init(&group->lock);
840*4882a593Smuzhiyun sprintf(group->name, "%016llx%016llx",
841*4882a593Smuzhiyun be64_to_cpu(group->rec.mgid.global.subnet_prefix),
842*4882a593Smuzhiyun be64_to_cpu(group->rec.mgid.global.interface_id));
843*4882a593Smuzhiyun sysfs_attr_init(&group->dentry.attr);
844*4882a593Smuzhiyun group->dentry.show = sysfs_show_group;
845*4882a593Smuzhiyun group->dentry.store = NULL;
846*4882a593Smuzhiyun group->dentry.attr.name = group->name;
847*4882a593Smuzhiyun group->dentry.attr.mode = 0400;
848*4882a593Smuzhiyun group->state = MCAST_IDLE;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (is_mgid0) {
851*4882a593Smuzhiyun list_add(&group->mgid0_list, &ctx->mcg_mgid0_list);
852*4882a593Smuzhiyun goto found;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun cur_group = mcast_insert(ctx, group);
856*4882a593Smuzhiyun if (cur_group) {
857*4882a593Smuzhiyun mcg_warn("group just showed up %s - confused\n", cur_group->name);
858*4882a593Smuzhiyun kfree(group);
859*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun add_sysfs_port_mcg_attr(ctx->dev, ctx->port, &group->dentry.attr);
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun found:
865*4882a593Smuzhiyun atomic_inc(&group->refcount);
866*4882a593Smuzhiyun return group;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
queue_req(struct mcast_req * req)869*4882a593Smuzhiyun static void queue_req(struct mcast_req *req)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun struct mcast_group *group = req->group;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun atomic_inc(&group->refcount); /* for the request */
874*4882a593Smuzhiyun atomic_inc(&group->refcount); /* for scheduling the work */
875*4882a593Smuzhiyun list_add_tail(&req->group_list, &group->pending_list);
876*4882a593Smuzhiyun list_add_tail(&req->func_list, &group->func[req->func].pending);
877*4882a593Smuzhiyun /* calls mlx4_ib_mcg_work_handler */
878*4882a593Smuzhiyun if (!queue_work(group->demux->mcg_wq, &group->work))
879*4882a593Smuzhiyun safe_atomic_dec(&group->refcount);
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
mlx4_ib_mcg_demux_handler(struct ib_device * ibdev,int port,int slave,struct ib_sa_mad * mad)882*4882a593Smuzhiyun int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
883*4882a593Smuzhiyun struct ib_sa_mad *mad)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun struct mlx4_ib_dev *dev = to_mdev(ibdev);
886*4882a593Smuzhiyun struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)mad->data;
887*4882a593Smuzhiyun struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
888*4882a593Smuzhiyun struct mcast_group *group;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun switch (mad->mad_hdr.method) {
891*4882a593Smuzhiyun case IB_MGMT_METHOD_GET_RESP:
892*4882a593Smuzhiyun case IB_SA_METHOD_DELETE_RESP:
893*4882a593Smuzhiyun mutex_lock(&ctx->mcg_table_lock);
894*4882a593Smuzhiyun group = acquire_group(ctx, &rec->mgid, 0);
895*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
896*4882a593Smuzhiyun if (IS_ERR(group)) {
897*4882a593Smuzhiyun if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
898*4882a593Smuzhiyun __be64 tid = mad->mad_hdr.tid;
899*4882a593Smuzhiyun *(u8 *)(&tid) = (u8)slave; /* in group we kept the modified TID */
900*4882a593Smuzhiyun group = search_relocate_mgid0_group(ctx, tid, &rec->mgid);
901*4882a593Smuzhiyun } else
902*4882a593Smuzhiyun group = NULL;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun if (!group)
906*4882a593Smuzhiyun return 1;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun mutex_lock(&group->lock);
909*4882a593Smuzhiyun group->response_sa_mad = *mad;
910*4882a593Smuzhiyun group->prev_state = group->state;
911*4882a593Smuzhiyun group->state = MCAST_RESP_READY;
912*4882a593Smuzhiyun /* calls mlx4_ib_mcg_work_handler */
913*4882a593Smuzhiyun atomic_inc(&group->refcount);
914*4882a593Smuzhiyun if (!queue_work(ctx->mcg_wq, &group->work))
915*4882a593Smuzhiyun safe_atomic_dec(&group->refcount);
916*4882a593Smuzhiyun mutex_unlock(&group->lock);
917*4882a593Smuzhiyun release_group(group, 0);
918*4882a593Smuzhiyun return 1; /* consumed */
919*4882a593Smuzhiyun case IB_MGMT_METHOD_SET:
920*4882a593Smuzhiyun case IB_SA_METHOD_GET_TABLE:
921*4882a593Smuzhiyun case IB_SA_METHOD_GET_TABLE_RESP:
922*4882a593Smuzhiyun case IB_SA_METHOD_DELETE:
923*4882a593Smuzhiyun return 0; /* not consumed, pass-through to guest over tunnel */
924*4882a593Smuzhiyun default:
925*4882a593Smuzhiyun mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
926*4882a593Smuzhiyun port, mad->mad_hdr.method);
927*4882a593Smuzhiyun return 1; /* consumed */
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
mlx4_ib_mcg_multiplex_handler(struct ib_device * ibdev,int port,int slave,struct ib_sa_mad * sa_mad)931*4882a593Smuzhiyun int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
932*4882a593Smuzhiyun int slave, struct ib_sa_mad *sa_mad)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun struct mlx4_ib_dev *dev = to_mdev(ibdev);
935*4882a593Smuzhiyun struct ib_sa_mcmember_data *rec = (struct ib_sa_mcmember_data *)sa_mad->data;
936*4882a593Smuzhiyun struct mlx4_ib_demux_ctx *ctx = &dev->sriov.demux[port - 1];
937*4882a593Smuzhiyun struct mcast_group *group;
938*4882a593Smuzhiyun struct mcast_req *req;
939*4882a593Smuzhiyun int may_create = 0;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun if (ctx->flushing)
942*4882a593Smuzhiyun return -EAGAIN;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun switch (sa_mad->mad_hdr.method) {
945*4882a593Smuzhiyun case IB_MGMT_METHOD_SET:
946*4882a593Smuzhiyun may_create = 1;
947*4882a593Smuzhiyun fallthrough;
948*4882a593Smuzhiyun case IB_SA_METHOD_DELETE:
949*4882a593Smuzhiyun req = kzalloc(sizeof *req, GFP_KERNEL);
950*4882a593Smuzhiyun if (!req)
951*4882a593Smuzhiyun return -ENOMEM;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun req->func = slave;
954*4882a593Smuzhiyun req->sa_mad = *sa_mad;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun mutex_lock(&ctx->mcg_table_lock);
957*4882a593Smuzhiyun group = acquire_group(ctx, &rec->mgid, may_create);
958*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
959*4882a593Smuzhiyun if (IS_ERR(group)) {
960*4882a593Smuzhiyun kfree(req);
961*4882a593Smuzhiyun return PTR_ERR(group);
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun mutex_lock(&group->lock);
964*4882a593Smuzhiyun if (group->func[slave].num_pend_reqs > MAX_PEND_REQS_PER_FUNC) {
965*4882a593Smuzhiyun mutex_unlock(&group->lock);
966*4882a593Smuzhiyun mcg_debug_group(group, "Port %d, Func %d has too many pending requests (%d), dropping\n",
967*4882a593Smuzhiyun port, slave, MAX_PEND_REQS_PER_FUNC);
968*4882a593Smuzhiyun release_group(group, 0);
969*4882a593Smuzhiyun kfree(req);
970*4882a593Smuzhiyun return -ENOMEM;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun ++group->func[slave].num_pend_reqs;
973*4882a593Smuzhiyun req->group = group;
974*4882a593Smuzhiyun queue_req(req);
975*4882a593Smuzhiyun mutex_unlock(&group->lock);
976*4882a593Smuzhiyun release_group(group, 0);
977*4882a593Smuzhiyun return 1; /* consumed */
978*4882a593Smuzhiyun case IB_SA_METHOD_GET_TABLE:
979*4882a593Smuzhiyun case IB_MGMT_METHOD_GET_RESP:
980*4882a593Smuzhiyun case IB_SA_METHOD_GET_TABLE_RESP:
981*4882a593Smuzhiyun case IB_SA_METHOD_DELETE_RESP:
982*4882a593Smuzhiyun return 0; /* not consumed, pass-through */
983*4882a593Smuzhiyun default:
984*4882a593Smuzhiyun mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
985*4882a593Smuzhiyun port, slave, sa_mad->mad_hdr.method);
986*4882a593Smuzhiyun return 1; /* consumed */
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
sysfs_show_group(struct device * dev,struct device_attribute * attr,char * buf)990*4882a593Smuzhiyun static ssize_t sysfs_show_group(struct device *dev,
991*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun struct mcast_group *group =
994*4882a593Smuzhiyun container_of(attr, struct mcast_group, dentry);
995*4882a593Smuzhiyun struct mcast_req *req = NULL;
996*4882a593Smuzhiyun char pending_str[40];
997*4882a593Smuzhiyun char state_str[40];
998*4882a593Smuzhiyun ssize_t len = 0;
999*4882a593Smuzhiyun int f;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun if (group->state == MCAST_IDLE)
1002*4882a593Smuzhiyun sprintf(state_str, "%s", get_state_string(group->state));
1003*4882a593Smuzhiyun else
1004*4882a593Smuzhiyun sprintf(state_str, "%s(TID=0x%llx)",
1005*4882a593Smuzhiyun get_state_string(group->state),
1006*4882a593Smuzhiyun be64_to_cpu(group->last_req_tid));
1007*4882a593Smuzhiyun if (list_empty(&group->pending_list)) {
1008*4882a593Smuzhiyun sprintf(pending_str, "No");
1009*4882a593Smuzhiyun } else {
1010*4882a593Smuzhiyun req = list_first_entry(&group->pending_list, struct mcast_req, group_list);
1011*4882a593Smuzhiyun sprintf(pending_str, "Yes(TID=0x%llx)",
1012*4882a593Smuzhiyun be64_to_cpu(req->sa_mad.mad_hdr.tid));
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun len += sprintf(buf + len, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
1015*4882a593Smuzhiyun group->rec.scope_join_state & 0xf,
1016*4882a593Smuzhiyun group->members[2], group->members[1], group->members[0],
1017*4882a593Smuzhiyun atomic_read(&group->refcount),
1018*4882a593Smuzhiyun pending_str,
1019*4882a593Smuzhiyun state_str);
1020*4882a593Smuzhiyun for (f = 0; f < MAX_VFS; ++f)
1021*4882a593Smuzhiyun if (group->func[f].state == MCAST_MEMBER)
1022*4882a593Smuzhiyun len += sprintf(buf + len, "%d[%1x] ",
1023*4882a593Smuzhiyun f, group->func[f].join_state);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun len += sprintf(buf + len, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
1026*4882a593Smuzhiyun "%4x %4x %2x %2x)\n",
1027*4882a593Smuzhiyun be16_to_cpu(group->rec.pkey),
1028*4882a593Smuzhiyun be32_to_cpu(group->rec.qkey),
1029*4882a593Smuzhiyun (group->rec.mtusel_mtu & 0xc0) >> 6,
1030*4882a593Smuzhiyun group->rec.mtusel_mtu & 0x3f,
1031*4882a593Smuzhiyun group->rec.tclass,
1032*4882a593Smuzhiyun (group->rec.ratesel_rate & 0xc0) >> 6,
1033*4882a593Smuzhiyun group->rec.ratesel_rate & 0x3f,
1034*4882a593Smuzhiyun (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0xf0000000) >> 28,
1035*4882a593Smuzhiyun (be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x0fffff00) >> 8,
1036*4882a593Smuzhiyun be32_to_cpu(group->rec.sl_flowlabel_hoplimit) & 0x000000ff,
1037*4882a593Smuzhiyun group->rec.proxy_join);
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun return len;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx * ctx)1042*4882a593Smuzhiyun int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun char name[20];
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun atomic_set(&ctx->tid, 0);
1047*4882a593Smuzhiyun sprintf(name, "mlx4_ib_mcg%d", ctx->port);
1048*4882a593Smuzhiyun ctx->mcg_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
1049*4882a593Smuzhiyun if (!ctx->mcg_wq)
1050*4882a593Smuzhiyun return -ENOMEM;
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun mutex_init(&ctx->mcg_table_lock);
1053*4882a593Smuzhiyun ctx->mcg_table = RB_ROOT;
1054*4882a593Smuzhiyun INIT_LIST_HEAD(&ctx->mcg_mgid0_list);
1055*4882a593Smuzhiyun ctx->flushing = 0;
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun return 0;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
force_clean_group(struct mcast_group * group)1060*4882a593Smuzhiyun static void force_clean_group(struct mcast_group *group)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun struct mcast_req *req, *tmp
1063*4882a593Smuzhiyun ;
1064*4882a593Smuzhiyun list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) {
1065*4882a593Smuzhiyun list_del(&req->group_list);
1066*4882a593Smuzhiyun kfree(req);
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun del_sysfs_port_mcg_attr(group->demux->dev, group->demux->port, &group->dentry.attr);
1069*4882a593Smuzhiyun rb_erase(&group->node, &group->demux->mcg_table);
1070*4882a593Smuzhiyun kfree(group);
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
_mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx * ctx,int destroy_wq)1073*4882a593Smuzhiyun static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun int i;
1076*4882a593Smuzhiyun struct rb_node *p;
1077*4882a593Smuzhiyun struct mcast_group *group;
1078*4882a593Smuzhiyun unsigned long end;
1079*4882a593Smuzhiyun int count;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun for (i = 0; i < MAX_VFS; ++i)
1082*4882a593Smuzhiyun clean_vf_mcast(ctx, i);
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun end = jiffies + msecs_to_jiffies(MAD_TIMEOUT_MS + 3000);
1085*4882a593Smuzhiyun do {
1086*4882a593Smuzhiyun count = 0;
1087*4882a593Smuzhiyun mutex_lock(&ctx->mcg_table_lock);
1088*4882a593Smuzhiyun for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p))
1089*4882a593Smuzhiyun ++count;
1090*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
1091*4882a593Smuzhiyun if (!count)
1092*4882a593Smuzhiyun break;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun usleep_range(1000, 2000);
1095*4882a593Smuzhiyun } while (time_after(end, jiffies));
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun flush_workqueue(ctx->mcg_wq);
1098*4882a593Smuzhiyun if (destroy_wq)
1099*4882a593Smuzhiyun destroy_workqueue(ctx->mcg_wq);
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun mutex_lock(&ctx->mcg_table_lock);
1102*4882a593Smuzhiyun while ((p = rb_first(&ctx->mcg_table)) != NULL) {
1103*4882a593Smuzhiyun group = rb_entry(p, struct mcast_group, node);
1104*4882a593Smuzhiyun if (atomic_read(&group->refcount))
1105*4882a593Smuzhiyun mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n",
1106*4882a593Smuzhiyun atomic_read(&group->refcount), group);
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun force_clean_group(group);
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun struct clean_work {
1114*4882a593Smuzhiyun struct work_struct work;
1115*4882a593Smuzhiyun struct mlx4_ib_demux_ctx *ctx;
1116*4882a593Smuzhiyun int destroy_wq;
1117*4882a593Smuzhiyun };
1118*4882a593Smuzhiyun
mcg_clean_task(struct work_struct * work)1119*4882a593Smuzhiyun static void mcg_clean_task(struct work_struct *work)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun struct clean_work *cw = container_of(work, struct clean_work, work);
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun _mlx4_ib_mcg_port_cleanup(cw->ctx, cw->destroy_wq);
1124*4882a593Smuzhiyun cw->ctx->flushing = 0;
1125*4882a593Smuzhiyun kfree(cw);
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun
mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx * ctx,int destroy_wq)1128*4882a593Smuzhiyun void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
1129*4882a593Smuzhiyun {
1130*4882a593Smuzhiyun struct clean_work *work;
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun if (ctx->flushing)
1133*4882a593Smuzhiyun return;
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun ctx->flushing = 1;
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun if (destroy_wq) {
1138*4882a593Smuzhiyun _mlx4_ib_mcg_port_cleanup(ctx, destroy_wq);
1139*4882a593Smuzhiyun ctx->flushing = 0;
1140*4882a593Smuzhiyun return;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun work = kmalloc(sizeof *work, GFP_KERNEL);
1144*4882a593Smuzhiyun if (!work) {
1145*4882a593Smuzhiyun ctx->flushing = 0;
1146*4882a593Smuzhiyun return;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun work->ctx = ctx;
1150*4882a593Smuzhiyun work->destroy_wq = destroy_wq;
1151*4882a593Smuzhiyun INIT_WORK(&work->work, mcg_clean_task);
1152*4882a593Smuzhiyun queue_work(clean_wq, &work->work);
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun
build_leave_mad(struct mcast_req * req)1155*4882a593Smuzhiyun static void build_leave_mad(struct mcast_req *req)
1156*4882a593Smuzhiyun {
1157*4882a593Smuzhiyun struct ib_sa_mad *mad = &req->sa_mad;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun mad->mad_hdr.method = IB_SA_METHOD_DELETE;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun
clear_pending_reqs(struct mcast_group * group,int vf)1163*4882a593Smuzhiyun static void clear_pending_reqs(struct mcast_group *group, int vf)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun struct mcast_req *req, *tmp, *group_first = NULL;
1166*4882a593Smuzhiyun int clear;
1167*4882a593Smuzhiyun int pend = 0;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (!list_empty(&group->pending_list))
1170*4882a593Smuzhiyun group_first = list_first_entry(&group->pending_list, struct mcast_req, group_list);
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) {
1173*4882a593Smuzhiyun clear = 1;
1174*4882a593Smuzhiyun if (group_first == req &&
1175*4882a593Smuzhiyun (group->state == MCAST_JOIN_SENT ||
1176*4882a593Smuzhiyun group->state == MCAST_LEAVE_SENT)) {
1177*4882a593Smuzhiyun clear = cancel_delayed_work(&group->timeout_work);
1178*4882a593Smuzhiyun pend = !clear;
1179*4882a593Smuzhiyun group->state = MCAST_IDLE;
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun if (clear) {
1182*4882a593Smuzhiyun --group->func[vf].num_pend_reqs;
1183*4882a593Smuzhiyun list_del(&req->group_list);
1184*4882a593Smuzhiyun list_del(&req->func_list);
1185*4882a593Smuzhiyun kfree(req);
1186*4882a593Smuzhiyun atomic_dec(&group->refcount);
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun if (!pend && (!list_empty(&group->func[vf].pending) || group->func[vf].num_pend_reqs)) {
1191*4882a593Smuzhiyun mcg_warn_group(group, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1192*4882a593Smuzhiyun list_empty(&group->func[vf].pending), group->func[vf].num_pend_reqs);
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
push_deleteing_req(struct mcast_group * group,int slave)1196*4882a593Smuzhiyun static int push_deleteing_req(struct mcast_group *group, int slave)
1197*4882a593Smuzhiyun {
1198*4882a593Smuzhiyun struct mcast_req *req;
1199*4882a593Smuzhiyun struct mcast_req *pend_req;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun if (!group->func[slave].join_state)
1202*4882a593Smuzhiyun return 0;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun req = kzalloc(sizeof *req, GFP_KERNEL);
1205*4882a593Smuzhiyun if (!req)
1206*4882a593Smuzhiyun return -ENOMEM;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun if (!list_empty(&group->func[slave].pending)) {
1209*4882a593Smuzhiyun pend_req = list_entry(group->func[slave].pending.prev, struct mcast_req, group_list);
1210*4882a593Smuzhiyun if (pend_req->clean) {
1211*4882a593Smuzhiyun kfree(req);
1212*4882a593Smuzhiyun return 0;
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun req->clean = 1;
1217*4882a593Smuzhiyun req->func = slave;
1218*4882a593Smuzhiyun req->group = group;
1219*4882a593Smuzhiyun ++group->func[slave].num_pend_reqs;
1220*4882a593Smuzhiyun build_leave_mad(req);
1221*4882a593Smuzhiyun queue_req(req);
1222*4882a593Smuzhiyun return 0;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun
clean_vf_mcast(struct mlx4_ib_demux_ctx * ctx,int slave)1225*4882a593Smuzhiyun void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun struct mcast_group *group;
1228*4882a593Smuzhiyun struct rb_node *p;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun mutex_lock(&ctx->mcg_table_lock);
1231*4882a593Smuzhiyun for (p = rb_first(&ctx->mcg_table); p; p = rb_next(p)) {
1232*4882a593Smuzhiyun group = rb_entry(p, struct mcast_group, node);
1233*4882a593Smuzhiyun mutex_lock(&group->lock);
1234*4882a593Smuzhiyun if (atomic_read(&group->refcount)) {
1235*4882a593Smuzhiyun /* clear pending requests of this VF */
1236*4882a593Smuzhiyun clear_pending_reqs(group, slave);
1237*4882a593Smuzhiyun push_deleteing_req(group, slave);
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun mutex_unlock(&group->lock);
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun mutex_unlock(&ctx->mcg_table_lock);
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun
mlx4_ib_mcg_init(void)1245*4882a593Smuzhiyun int mlx4_ib_mcg_init(void)
1246*4882a593Smuzhiyun {
1247*4882a593Smuzhiyun clean_wq = alloc_ordered_workqueue("mlx4_ib_mcg", WQ_MEM_RECLAIM);
1248*4882a593Smuzhiyun if (!clean_wq)
1249*4882a593Smuzhiyun return -ENOMEM;
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun return 0;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun
mlx4_ib_mcg_destroy(void)1254*4882a593Smuzhiyun void mlx4_ib_mcg_destroy(void)
1255*4882a593Smuzhiyun {
1256*4882a593Smuzhiyun destroy_workqueue(clean_wq);
1257*4882a593Smuzhiyun }
1258