xref: /OK3568_Linux_fs/kernel/drivers/infiniband/sw/rdmavt/mcast.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright(c) 2016 Intel Corporation.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is provided under a dual BSD/GPLv2 license.  When using or
5*4882a593Smuzhiyun  * redistributing this file, you may do so under either license.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * GPL LICENSE SUMMARY
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun  * it under the terms of version 2 of the GNU General Public License as
11*4882a593Smuzhiyun  * published by the Free Software Foundation.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun  * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16*4882a593Smuzhiyun  * General Public License for more details.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * BSD LICENSE
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
21*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
22*4882a593Smuzhiyun  * are met:
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  *  - Redistributions of source code must retain the above copyright
25*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer.
26*4882a593Smuzhiyun  *  - Redistributions in binary form must reproduce the above copyright
27*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer in
28*4882a593Smuzhiyun  *    the documentation and/or other materials provided with the
29*4882a593Smuzhiyun  *    distribution.
30*4882a593Smuzhiyun  *  - Neither the name of Intel Corporation nor the names of its
31*4882a593Smuzhiyun  *    contributors may be used to endorse or promote products derived
32*4882a593Smuzhiyun  *    from this software without specific prior written permission.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35*4882a593Smuzhiyun  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36*4882a593Smuzhiyun  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37*4882a593Smuzhiyun  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38*4882a593Smuzhiyun  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39*4882a593Smuzhiyun  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40*4882a593Smuzhiyun  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41*4882a593Smuzhiyun  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42*4882a593Smuzhiyun  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44*4882a593Smuzhiyun  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include <linux/slab.h>
49*4882a593Smuzhiyun #include <linux/sched.h>
50*4882a593Smuzhiyun #include <linux/rculist.h>
51*4882a593Smuzhiyun #include <rdma/rdma_vt.h>
52*4882a593Smuzhiyun #include <rdma/rdmavt_qp.h>
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #include "mcast.h"
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /**
57*4882a593Smuzhiyun  * rvt_driver_mcast - init resources for multicast
58*4882a593Smuzhiyun  * @rdi: rvt dev struct
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * This is per device that registers with rdmavt
61*4882a593Smuzhiyun  */
rvt_driver_mcast_init(struct rvt_dev_info * rdi)62*4882a593Smuzhiyun void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	/*
65*4882a593Smuzhiyun 	 * Anything that needs setup for multicast on a per driver or per rdi
66*4882a593Smuzhiyun 	 * basis should be done in here.
67*4882a593Smuzhiyun 	 */
68*4882a593Smuzhiyun 	spin_lock_init(&rdi->n_mcast_grps_lock);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /**
72*4882a593Smuzhiyun  * mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
73*4882a593Smuzhiyun  * @qp: the QP to link
74*4882a593Smuzhiyun  */
rvt_mcast_qp_alloc(struct rvt_qp * qp)75*4882a593Smuzhiyun static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	struct rvt_mcast_qp *mqp;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
80*4882a593Smuzhiyun 	if (!mqp)
81*4882a593Smuzhiyun 		goto bail;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	mqp->qp = qp;
84*4882a593Smuzhiyun 	rvt_get_qp(qp);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun bail:
87*4882a593Smuzhiyun 	return mqp;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
rvt_mcast_qp_free(struct rvt_mcast_qp * mqp)90*4882a593Smuzhiyun static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	struct rvt_qp *qp = mqp->qp;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/* Notify hfi1_destroy_qp() if it is waiting. */
95*4882a593Smuzhiyun 	rvt_put_qp(qp);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	kfree(mqp);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun  * mcast_alloc - allocate the multicast GID structure
102*4882a593Smuzhiyun  * @mgid: the multicast GID
103*4882a593Smuzhiyun  * @lid: the muilticast LID (host order)
104*4882a593Smuzhiyun  *
105*4882a593Smuzhiyun  * A list of QPs will be attached to this structure.
106*4882a593Smuzhiyun  */
rvt_mcast_alloc(union ib_gid * mgid,u16 lid)107*4882a593Smuzhiyun static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid, u16 lid)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	struct rvt_mcast *mcast;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
112*4882a593Smuzhiyun 	if (!mcast)
113*4882a593Smuzhiyun 		goto bail;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	mcast->mcast_addr.mgid = *mgid;
116*4882a593Smuzhiyun 	mcast->mcast_addr.lid = lid;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	INIT_LIST_HEAD(&mcast->qp_list);
119*4882a593Smuzhiyun 	init_waitqueue_head(&mcast->wait);
120*4882a593Smuzhiyun 	atomic_set(&mcast->refcount, 0);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun bail:
123*4882a593Smuzhiyun 	return mcast;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
rvt_mcast_free(struct rvt_mcast * mcast)126*4882a593Smuzhiyun static void rvt_mcast_free(struct rvt_mcast *mcast)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct rvt_mcast_qp *p, *tmp;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
131*4882a593Smuzhiyun 		rvt_mcast_qp_free(p);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	kfree(mcast);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /**
137*4882a593Smuzhiyun  * rvt_mcast_find - search the global table for the given multicast GID/LID
138*4882a593Smuzhiyun  * NOTE: It is valid to have 1 MLID with multiple MGIDs.  It is not valid
139*4882a593Smuzhiyun  * to have 1 MGID with multiple MLIDs.
140*4882a593Smuzhiyun  * @ibp: the IB port structure
141*4882a593Smuzhiyun  * @mgid: the multicast GID to search for
142*4882a593Smuzhiyun  * @lid: the multicast LID portion of the multicast address (host order)
143*4882a593Smuzhiyun  *
144*4882a593Smuzhiyun  * The caller is responsible for decrementing the reference count if found.
145*4882a593Smuzhiyun  *
146*4882a593Smuzhiyun  * Return: NULL if not found.
147*4882a593Smuzhiyun  */
rvt_mcast_find(struct rvt_ibport * ibp,union ib_gid * mgid,u16 lid)148*4882a593Smuzhiyun struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
149*4882a593Smuzhiyun 				 u16 lid)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	struct rb_node *n;
152*4882a593Smuzhiyun 	unsigned long flags;
153*4882a593Smuzhiyun 	struct rvt_mcast *found = NULL;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	spin_lock_irqsave(&ibp->lock, flags);
156*4882a593Smuzhiyun 	n = ibp->mcast_tree.rb_node;
157*4882a593Smuzhiyun 	while (n) {
158*4882a593Smuzhiyun 		int ret;
159*4882a593Smuzhiyun 		struct rvt_mcast *mcast;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		mcast = rb_entry(n, struct rvt_mcast, rb_node);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 		ret = memcmp(mgid->raw, mcast->mcast_addr.mgid.raw,
164*4882a593Smuzhiyun 			     sizeof(*mgid));
165*4882a593Smuzhiyun 		if (ret < 0) {
166*4882a593Smuzhiyun 			n = n->rb_left;
167*4882a593Smuzhiyun 		} else if (ret > 0) {
168*4882a593Smuzhiyun 			n = n->rb_right;
169*4882a593Smuzhiyun 		} else {
170*4882a593Smuzhiyun 			/* MGID/MLID must match */
171*4882a593Smuzhiyun 			if (mcast->mcast_addr.lid == lid) {
172*4882a593Smuzhiyun 				atomic_inc(&mcast->refcount);
173*4882a593Smuzhiyun 				found = mcast;
174*4882a593Smuzhiyun 			}
175*4882a593Smuzhiyun 			break;
176*4882a593Smuzhiyun 		}
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ibp->lock, flags);
179*4882a593Smuzhiyun 	return found;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_mcast_find);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /**
184*4882a593Smuzhiyun  * mcast_add - insert mcast GID into table and attach QP struct
185*4882a593Smuzhiyun  * @mcast: the mcast GID table
186*4882a593Smuzhiyun  * @mqp: the QP to attach
187*4882a593Smuzhiyun  *
188*4882a593Smuzhiyun  * Return: zero if both were added.  Return EEXIST if the GID was already in
189*4882a593Smuzhiyun  * the table but the QP was added.  Return ESRCH if the QP was already
190*4882a593Smuzhiyun  * attached and neither structure was added. Return EINVAL if the MGID was
191*4882a593Smuzhiyun  * found, but the MLID did NOT match.
192*4882a593Smuzhiyun  */
rvt_mcast_add(struct rvt_dev_info * rdi,struct rvt_ibport * ibp,struct rvt_mcast * mcast,struct rvt_mcast_qp * mqp)193*4882a593Smuzhiyun static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
194*4882a593Smuzhiyun 			 struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	struct rb_node **n = &ibp->mcast_tree.rb_node;
197*4882a593Smuzhiyun 	struct rb_node *pn = NULL;
198*4882a593Smuzhiyun 	int ret;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	spin_lock_irq(&ibp->lock);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	while (*n) {
203*4882a593Smuzhiyun 		struct rvt_mcast *tmcast;
204*4882a593Smuzhiyun 		struct rvt_mcast_qp *p;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		pn = *n;
207*4882a593Smuzhiyun 		tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 		ret = memcmp(mcast->mcast_addr.mgid.raw,
210*4882a593Smuzhiyun 			     tmcast->mcast_addr.mgid.raw,
211*4882a593Smuzhiyun 			     sizeof(mcast->mcast_addr.mgid));
212*4882a593Smuzhiyun 		if (ret < 0) {
213*4882a593Smuzhiyun 			n = &pn->rb_left;
214*4882a593Smuzhiyun 			continue;
215*4882a593Smuzhiyun 		}
216*4882a593Smuzhiyun 		if (ret > 0) {
217*4882a593Smuzhiyun 			n = &pn->rb_right;
218*4882a593Smuzhiyun 			continue;
219*4882a593Smuzhiyun 		}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		if (tmcast->mcast_addr.lid != mcast->mcast_addr.lid) {
222*4882a593Smuzhiyun 			ret = EINVAL;
223*4882a593Smuzhiyun 			goto bail;
224*4882a593Smuzhiyun 		}
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 		/* Search the QP list to see if this is already there. */
227*4882a593Smuzhiyun 		list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
228*4882a593Smuzhiyun 			if (p->qp == mqp->qp) {
229*4882a593Smuzhiyun 				ret = ESRCH;
230*4882a593Smuzhiyun 				goto bail;
231*4882a593Smuzhiyun 			}
232*4882a593Smuzhiyun 		}
233*4882a593Smuzhiyun 		if (tmcast->n_attached ==
234*4882a593Smuzhiyun 		    rdi->dparms.props.max_mcast_qp_attach) {
235*4882a593Smuzhiyun 			ret = ENOMEM;
236*4882a593Smuzhiyun 			goto bail;
237*4882a593Smuzhiyun 		}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		tmcast->n_attached++;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
242*4882a593Smuzhiyun 		ret = EEXIST;
243*4882a593Smuzhiyun 		goto bail;
244*4882a593Smuzhiyun 	}
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	spin_lock(&rdi->n_mcast_grps_lock);
247*4882a593Smuzhiyun 	if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
248*4882a593Smuzhiyun 		spin_unlock(&rdi->n_mcast_grps_lock);
249*4882a593Smuzhiyun 		ret = ENOMEM;
250*4882a593Smuzhiyun 		goto bail;
251*4882a593Smuzhiyun 	}
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	rdi->n_mcast_grps_allocated++;
254*4882a593Smuzhiyun 	spin_unlock(&rdi->n_mcast_grps_lock);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	mcast->n_attached++;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	list_add_tail_rcu(&mqp->list, &mcast->qp_list);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	atomic_inc(&mcast->refcount);
261*4882a593Smuzhiyun 	rb_link_node(&mcast->rb_node, pn, n);
262*4882a593Smuzhiyun 	rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	ret = 0;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun bail:
267*4882a593Smuzhiyun 	spin_unlock_irq(&ibp->lock);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	return ret;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun /**
273*4882a593Smuzhiyun  * rvt_attach_mcast - attach a qp to a multicast group
274*4882a593Smuzhiyun  * @ibqp: Infiniband qp
275*4882a593Smuzhiyun  * @gid: multicast guid
276*4882a593Smuzhiyun  * @lid: multicast lid
277*4882a593Smuzhiyun  *
278*4882a593Smuzhiyun  * Return: 0 on success
279*4882a593Smuzhiyun  */
rvt_attach_mcast(struct ib_qp * ibqp,union ib_gid * gid,u16 lid)280*4882a593Smuzhiyun int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
283*4882a593Smuzhiyun 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
284*4882a593Smuzhiyun 	struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
285*4882a593Smuzhiyun 	struct rvt_mcast *mcast;
286*4882a593Smuzhiyun 	struct rvt_mcast_qp *mqp;
287*4882a593Smuzhiyun 	int ret = -ENOMEM;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
290*4882a593Smuzhiyun 		return -EINVAL;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/*
293*4882a593Smuzhiyun 	 * Allocate data structures since its better to do this outside of
294*4882a593Smuzhiyun 	 * spin locks and it will most likely be needed.
295*4882a593Smuzhiyun 	 */
296*4882a593Smuzhiyun 	mcast = rvt_mcast_alloc(gid, lid);
297*4882a593Smuzhiyun 	if (!mcast)
298*4882a593Smuzhiyun 		return -ENOMEM;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	mqp = rvt_mcast_qp_alloc(qp);
301*4882a593Smuzhiyun 	if (!mqp)
302*4882a593Smuzhiyun 		goto bail_mcast;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
305*4882a593Smuzhiyun 	case ESRCH:
306*4882a593Smuzhiyun 		/* Neither was used: OK to attach the same QP twice. */
307*4882a593Smuzhiyun 		ret = 0;
308*4882a593Smuzhiyun 		goto bail_mqp;
309*4882a593Smuzhiyun 	case EEXIST: /* The mcast wasn't used */
310*4882a593Smuzhiyun 		ret = 0;
311*4882a593Smuzhiyun 		goto bail_mcast;
312*4882a593Smuzhiyun 	case ENOMEM:
313*4882a593Smuzhiyun 		/* Exceeded the maximum number of mcast groups. */
314*4882a593Smuzhiyun 		ret = -ENOMEM;
315*4882a593Smuzhiyun 		goto bail_mqp;
316*4882a593Smuzhiyun 	case EINVAL:
317*4882a593Smuzhiyun 		/* Invalid MGID/MLID pair */
318*4882a593Smuzhiyun 		ret = -EINVAL;
319*4882a593Smuzhiyun 		goto bail_mqp;
320*4882a593Smuzhiyun 	default:
321*4882a593Smuzhiyun 		break;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	return 0;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun bail_mqp:
327*4882a593Smuzhiyun 	rvt_mcast_qp_free(mqp);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun bail_mcast:
330*4882a593Smuzhiyun 	rvt_mcast_free(mcast);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	return ret;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun /**
336*4882a593Smuzhiyun  * rvt_detach_mcast - remove a qp from a multicast group
337*4882a593Smuzhiyun  * @ibqp: Infiniband qp
338*4882a593Smuzhiyun  * @gid: multicast guid
339*4882a593Smuzhiyun  * @lid: multicast lid
340*4882a593Smuzhiyun  *
341*4882a593Smuzhiyun  * Return: 0 on success
342*4882a593Smuzhiyun  */
rvt_detach_mcast(struct ib_qp * ibqp,union ib_gid * gid,u16 lid)343*4882a593Smuzhiyun int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
346*4882a593Smuzhiyun 	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
347*4882a593Smuzhiyun 	struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
348*4882a593Smuzhiyun 	struct rvt_mcast *mcast = NULL;
349*4882a593Smuzhiyun 	struct rvt_mcast_qp *p, *tmp, *delp = NULL;
350*4882a593Smuzhiyun 	struct rb_node *n;
351*4882a593Smuzhiyun 	int last = 0;
352*4882a593Smuzhiyun 	int ret = 0;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (ibqp->qp_num <= 1)
355*4882a593Smuzhiyun 		return -EINVAL;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	spin_lock_irq(&ibp->lock);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/* Find the GID in the mcast table. */
360*4882a593Smuzhiyun 	n = ibp->mcast_tree.rb_node;
361*4882a593Smuzhiyun 	while (1) {
362*4882a593Smuzhiyun 		if (!n) {
363*4882a593Smuzhiyun 			spin_unlock_irq(&ibp->lock);
364*4882a593Smuzhiyun 			return -EINVAL;
365*4882a593Smuzhiyun 		}
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 		mcast = rb_entry(n, struct rvt_mcast, rb_node);
368*4882a593Smuzhiyun 		ret = memcmp(gid->raw, mcast->mcast_addr.mgid.raw,
369*4882a593Smuzhiyun 			     sizeof(*gid));
370*4882a593Smuzhiyun 		if (ret < 0) {
371*4882a593Smuzhiyun 			n = n->rb_left;
372*4882a593Smuzhiyun 		} else if (ret > 0) {
373*4882a593Smuzhiyun 			n = n->rb_right;
374*4882a593Smuzhiyun 		} else {
375*4882a593Smuzhiyun 			/* MGID/MLID must match */
376*4882a593Smuzhiyun 			if (mcast->mcast_addr.lid != lid) {
377*4882a593Smuzhiyun 				spin_unlock_irq(&ibp->lock);
378*4882a593Smuzhiyun 				return -EINVAL;
379*4882a593Smuzhiyun 			}
380*4882a593Smuzhiyun 			break;
381*4882a593Smuzhiyun 		}
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	/* Search the QP list. */
385*4882a593Smuzhiyun 	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
386*4882a593Smuzhiyun 		if (p->qp != qp)
387*4882a593Smuzhiyun 			continue;
388*4882a593Smuzhiyun 		/*
389*4882a593Smuzhiyun 		 * We found it, so remove it, but don't poison the forward
390*4882a593Smuzhiyun 		 * link until we are sure there are no list walkers.
391*4882a593Smuzhiyun 		 */
392*4882a593Smuzhiyun 		list_del_rcu(&p->list);
393*4882a593Smuzhiyun 		mcast->n_attached--;
394*4882a593Smuzhiyun 		delp = p;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		/* If this was the last attached QP, remove the GID too. */
397*4882a593Smuzhiyun 		if (list_empty(&mcast->qp_list)) {
398*4882a593Smuzhiyun 			rb_erase(&mcast->rb_node, &ibp->mcast_tree);
399*4882a593Smuzhiyun 			last = 1;
400*4882a593Smuzhiyun 		}
401*4882a593Smuzhiyun 		break;
402*4882a593Smuzhiyun 	}
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	spin_unlock_irq(&ibp->lock);
405*4882a593Smuzhiyun 	/* QP not attached */
406*4882a593Smuzhiyun 	if (!delp)
407*4882a593Smuzhiyun 		return -EINVAL;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	/*
410*4882a593Smuzhiyun 	 * Wait for any list walkers to finish before freeing the
411*4882a593Smuzhiyun 	 * list element.
412*4882a593Smuzhiyun 	 */
413*4882a593Smuzhiyun 	wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
414*4882a593Smuzhiyun 	rvt_mcast_qp_free(delp);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	if (last) {
417*4882a593Smuzhiyun 		atomic_dec(&mcast->refcount);
418*4882a593Smuzhiyun 		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
419*4882a593Smuzhiyun 		rvt_mcast_free(mcast);
420*4882a593Smuzhiyun 		spin_lock_irq(&rdi->n_mcast_grps_lock);
421*4882a593Smuzhiyun 		rdi->n_mcast_grps_allocated--;
422*4882a593Smuzhiyun 		spin_unlock_irq(&rdi->n_mcast_grps_lock);
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return 0;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun /**
429*4882a593Smuzhiyun  *rvt_mast_tree_empty - determine if any qps are attached to any mcast group
430*4882a593Smuzhiyun  *@rdi: rvt dev struct
431*4882a593Smuzhiyun  *
432*4882a593Smuzhiyun  * Return: in use count
433*4882a593Smuzhiyun  */
rvt_mcast_tree_empty(struct rvt_dev_info * rdi)434*4882a593Smuzhiyun int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	int i;
437*4882a593Smuzhiyun 	int in_use = 0;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	for (i = 0; i < rdi->dparms.nports; i++)
440*4882a593Smuzhiyun 		if (rdi->ports[i]->mcast_tree.rb_node)
441*4882a593Smuzhiyun 			in_use++;
442*4882a593Smuzhiyun 	return in_use;
443*4882a593Smuzhiyun }
444