Lines Matching refs:mcast

74 					       struct ipoib_mcast *mcast,  in __ipoib_mcast_schedule_join_thread()  argument
85 if (mcast && delay) { in __ipoib_mcast_schedule_join_thread()
89 mcast->backoff *= 2; in __ipoib_mcast_schedule_join_thread()
90 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS) in __ipoib_mcast_schedule_join_thread()
91 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS; in __ipoib_mcast_schedule_join_thread()
92 mcast->delay_until = jiffies + (mcast->backoff * HZ); in __ipoib_mcast_schedule_join_thread()
112 static void ipoib_mcast_free(struct ipoib_mcast *mcast) in ipoib_mcast_free() argument
114 struct net_device *dev = mcast->dev; in ipoib_mcast_free()
118 mcast->mcmember.mgid.raw); in ipoib_mcast_free()
121 ipoib_del_neighs_by_gid(dev, mcast->mcmember.mgid.raw); in ipoib_mcast_free()
123 if (mcast->ah) in ipoib_mcast_free()
124 ipoib_put_ah(mcast->ah); in ipoib_mcast_free()
126 while (!skb_queue_empty(&mcast->pkt_queue)) { in ipoib_mcast_free()
128 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); in ipoib_mcast_free()
135 kfree(mcast); in ipoib_mcast_free()
140 struct ipoib_mcast *mcast; in ipoib_mcast_alloc() local
142 mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC); in ipoib_mcast_alloc()
143 if (!mcast) in ipoib_mcast_alloc()
146 mcast->dev = dev; in ipoib_mcast_alloc()
147 mcast->created = jiffies; in ipoib_mcast_alloc()
148 mcast->delay_until = jiffies; in ipoib_mcast_alloc()
149 mcast->backoff = 1; in ipoib_mcast_alloc()
151 INIT_LIST_HEAD(&mcast->list); in ipoib_mcast_alloc()
152 INIT_LIST_HEAD(&mcast->neigh_list); in ipoib_mcast_alloc()
153 skb_queue_head_init(&mcast->pkt_queue); in ipoib_mcast_alloc()
155 return mcast; in ipoib_mcast_alloc()
164 struct ipoib_mcast *mcast; in __ipoib_mcast_find() local
167 mcast = rb_entry(n, struct ipoib_mcast, rb_node); in __ipoib_mcast_find()
169 ret = memcmp(mgid, mcast->mcmember.mgid.raw, in __ipoib_mcast_find()
176 return mcast; in __ipoib_mcast_find()
182 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast) in __ipoib_mcast_add() argument
194 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw, in __ipoib_mcast_add()
204 rb_link_node(&mcast->rb_node, pn, n); in __ipoib_mcast_add()
205 rb_insert_color(&mcast->rb_node, &priv->multicast_tree); in __ipoib_mcast_add()
210 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, in ipoib_mcast_join_finish() argument
213 struct net_device *dev = mcast->dev; in ipoib_mcast_join_finish()
222 mcast->mcmember = *mcmember; in ipoib_mcast_join_finish()
227 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4, in ipoib_mcast_join_finish()
256 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { in ipoib_mcast_join_finish()
257 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { in ipoib_mcast_join_finish()
259 mcast->mcmember.mgid.raw); in ipoib_mcast_join_finish()
264 ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid, in ipoib_mcast_join_finish()
265 be16_to_cpu(mcast->mcmember.mlid), in ipoib_mcast_join_finish()
269 mcast->mcmember.mgid.raw); in ipoib_mcast_join_finish()
271 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags); in ipoib_mcast_join_finish()
278 rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid)), in ipoib_mcast_join_finish()
280 rdma_ah_set_sl(&av, mcast->mcmember.sl); in ipoib_mcast_join_finish()
281 rdma_ah_set_static_rate(&av, mcast->mcmember.rate); in ipoib_mcast_join_finish()
283 rdma_ah_set_grh(&av, &mcast->mcmember.mgid, in ipoib_mcast_join_finish()
284 be32_to_cpu(mcast->mcmember.flow_label), in ipoib_mcast_join_finish()
285 0, mcast->mcmember.hop_limit, in ipoib_mcast_join_finish()
286 mcast->mcmember.traffic_class); in ipoib_mcast_join_finish()
296 mcast->ah = ah; in ipoib_mcast_join_finish()
300 mcast->mcmember.mgid.raw, in ipoib_mcast_join_finish()
301 mcast->ah->ah, in ipoib_mcast_join_finish()
302 be16_to_cpu(mcast->mcmember.mlid), in ipoib_mcast_join_finish()
303 mcast->mcmember.sl); in ipoib_mcast_join_finish()
307 while (!skb_queue_empty(&mcast->pkt_queue)) { in ipoib_mcast_join_finish()
308 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); in ipoib_mcast_join_finish()
369 struct ipoib_mcast *mcast = multicast->context; in ipoib_mcast_join_complete() local
370 struct net_device *dev = mcast->dev; in ipoib_mcast_join_complete()
374 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? in ipoib_mcast_join_complete()
376 mcast->mcmember.mgid.raw, status); in ipoib_mcast_join_complete()
385 status = ipoib_mcast_join_finish(mcast, &multicast->rec); in ipoib_mcast_join_complete()
388 mcast->backoff = 1; in ipoib_mcast_join_complete()
389 mcast->delay_until = jiffies; in ipoib_mcast_join_complete()
398 if (mcast == priv->broadcast) { in ipoib_mcast_join_complete()
406 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && in ipoib_mcast_join_complete()
409 if (mcast->logcount < 20) { in ipoib_mcast_join_complete()
413 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "", in ipoib_mcast_join_complete()
414 mcast->mcmember.mgid.raw, status); in ipoib_mcast_join_complete()
417 test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "", in ipoib_mcast_join_complete()
418 mcast->mcmember.mgid.raw, status); in ipoib_mcast_join_complete()
422 mcast->logcount++; in ipoib_mcast_join_complete()
425 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && in ipoib_mcast_join_complete()
426 mcast->backoff >= 2) { in ipoib_mcast_join_complete()
436 mcast->backoff = 1; in ipoib_mcast_join_complete()
438 while (!skb_queue_empty(&mcast->pkt_queue)) { in ipoib_mcast_join_complete()
440 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); in ipoib_mcast_join_complete()
446 __ipoib_mcast_schedule_join_thread(priv, mcast, 1); in ipoib_mcast_join_complete()
458 mcast->mc = NULL; in ipoib_mcast_join_complete()
460 mcast->mc = multicast; in ipoib_mcast_join_complete()
461 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); in ipoib_mcast_join_complete()
463 complete(&mcast->done); in ipoib_mcast_join_complete()
471 static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) in ipoib_mcast_join() argument
485 init_completion(&mcast->done); in ipoib_mcast_join()
486 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); in ipoib_mcast_join()
488 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); in ipoib_mcast_join()
490 rec.mgid = mcast->mcmember.mgid; in ipoib_mcast_join()
500 if (mcast != priv->broadcast) { in ipoib_mcast_join()
540 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) && in ipoib_mcast_join()
549 ipoib_mcast_join_complete, mcast); in ipoib_mcast_join()
555 __ipoib_mcast_schedule_join_thread(priv, mcast, 1); in ipoib_mcast_join()
556 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); in ipoib_mcast_join()
558 complete(&mcast->done); in ipoib_mcast_join()
571 struct ipoib_mcast *mcast = NULL; in ipoib_mcast_join_task() local
624 mcast = priv->broadcast; in ipoib_mcast_join_task()
625 if (mcast->backoff > 1 && in ipoib_mcast_join_task()
626 time_before(jiffies, mcast->delay_until)) { in ipoib_mcast_join_task()
627 delay_until = mcast->delay_until; in ipoib_mcast_join_task()
628 mcast = NULL; in ipoib_mcast_join_task()
638 list_for_each_entry(mcast, &priv->multicast_list, list) { in ipoib_mcast_join_task()
639 if (IS_ERR_OR_NULL(mcast->mc) && in ipoib_mcast_join_task()
640 !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) && in ipoib_mcast_join_task()
641 (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) || in ipoib_mcast_join_task()
642 !skb_queue_empty(&mcast->pkt_queue))) { in ipoib_mcast_join_task()
643 if (mcast->backoff == 1 || in ipoib_mcast_join_task()
644 time_after_eq(jiffies, mcast->delay_until)) { in ipoib_mcast_join_task()
646 if (ipoib_mcast_join(dev, mcast)) { in ipoib_mcast_join_task()
651 time_before(mcast->delay_until, delay_until)) in ipoib_mcast_join_task()
652 delay_until = mcast->delay_until; in ipoib_mcast_join_task()
656 mcast = NULL; in ipoib_mcast_join_task()
665 if (mcast) in ipoib_mcast_join_task()
666 ipoib_mcast_join(dev, mcast); in ipoib_mcast_join_task()
692 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) in ipoib_mcast_leave() argument
698 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) in ipoib_mcast_leave()
701 if (!IS_ERR_OR_NULL(mcast->mc)) in ipoib_mcast_leave()
702 ib_sa_free_multicast(mcast->mc); in ipoib_mcast_leave()
704 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { in ipoib_mcast_leave()
706 mcast->mcmember.mgid.raw); in ipoib_mcast_leave()
709 ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid, in ipoib_mcast_leave()
710 be16_to_cpu(mcast->mcmember.mlid)); in ipoib_mcast_leave()
713 } else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) in ipoib_mcast_leave()
729 struct ipoib_mcast *mcast = __ipoib_mcast_find(priv->dev, mgid); in ipoib_check_and_add_mcast_sendonly() local
731 if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { in ipoib_check_and_add_mcast_sendonly()
732 list_del(&mcast->list); in ipoib_check_and_add_mcast_sendonly()
733 rb_erase(&mcast->rb_node, &priv->multicast_tree); in ipoib_check_and_add_mcast_sendonly()
734 list_add_tail(&mcast->list, remove_list); in ipoib_check_and_add_mcast_sendonly()
741 struct ipoib_mcast *mcast, *tmcast; in ipoib_mcast_remove_list() local
747 list_for_each_entry_safe(mcast, tmcast, remove_list, list) in ipoib_mcast_remove_list()
748 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) in ipoib_mcast_remove_list()
749 wait_for_completion(&mcast->done); in ipoib_mcast_remove_list()
751 list_for_each_entry_safe(mcast, tmcast, remove_list, list) { in ipoib_mcast_remove_list()
752 ipoib_mcast_leave(mcast->dev, mcast); in ipoib_mcast_remove_list()
753 ipoib_mcast_free(mcast); in ipoib_mcast_remove_list()
761 struct ipoib_mcast *mcast; in ipoib_mcast_send() local
775 mcast = __ipoib_mcast_find(dev, mgid); in ipoib_mcast_send()
776 if (!mcast || !mcast->ah) { in ipoib_mcast_send()
777 if (!mcast) { in ipoib_mcast_send()
782 mcast = ipoib_mcast_alloc(dev); in ipoib_mcast_send()
783 if (!mcast) { in ipoib_mcast_send()
791 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags); in ipoib_mcast_send()
792 memcpy(mcast->mcmember.mgid.raw, mgid, in ipoib_mcast_send()
794 __ipoib_mcast_add(dev, mcast); in ipoib_mcast_send()
795 list_add_tail(&mcast->list, &priv->multicast_list); in ipoib_mcast_send()
797 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) { in ipoib_mcast_send()
800 skb_queue_tail(&mcast->pkt_queue, skb); in ipoib_mcast_send()
805 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) { in ipoib_mcast_send()
820 kref_get(&mcast->ah->ref); in ipoib_mcast_send()
821 neigh->ah = mcast->ah; in ipoib_mcast_send()
823 list_add_tail(&neigh->list, &mcast->neigh_list); in ipoib_mcast_send()
827 mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah, in ipoib_mcast_send()
842 struct ipoib_mcast *mcast, *tmcast; in ipoib_mcast_dev_flush() local
850 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { in ipoib_mcast_dev_flush()
851 list_del(&mcast->list); in ipoib_mcast_dev_flush()
852 rb_erase(&mcast->rb_node, &priv->multicast_tree); in ipoib_mcast_dev_flush()
853 list_add_tail(&mcast->list, &remove_list); in ipoib_mcast_dev_flush()
885 struct ipoib_mcast *mcast, *tmcast; in ipoib_mcast_restart_task() local
908 list_for_each_entry(mcast, &priv->multicast_list, list) in ipoib_mcast_restart_task()
909 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); in ipoib_mcast_restart_task()
920 mcast = __ipoib_mcast_find(dev, &mgid); in ipoib_mcast_restart_task()
921 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { in ipoib_mcast_restart_task()
946 if (mcast) { in ipoib_mcast_restart_task()
948 list_move_tail(&mcast->list, &remove_list); in ipoib_mcast_restart_task()
950 rb_replace_node(&mcast->rb_node, in ipoib_mcast_restart_task()
959 if (mcast) in ipoib_mcast_restart_task()
960 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); in ipoib_mcast_restart_task()
964 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { in ipoib_mcast_restart_task()
965 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && in ipoib_mcast_restart_task()
966 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { in ipoib_mcast_restart_task()
968 mcast->mcmember.mgid.raw); in ipoib_mcast_restart_task()
970 rb_erase(&mcast->rb_node, &priv->multicast_tree); in ipoib_mcast_restart_task()
973 list_move_tail(&mcast->list, &remove_list); in ipoib_mcast_restart_task()
1017 struct ipoib_mcast *mcast; in ipoib_mcast_iter_next() local
1025 mcast = rb_entry(n, struct ipoib_mcast, rb_node); in ipoib_mcast_iter_next()
1027 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw, in ipoib_mcast_iter_next()
1029 iter->mgid = mcast->mcmember.mgid; in ipoib_mcast_iter_next()
1030 iter->created = mcast->created; in ipoib_mcast_iter_next()
1031 iter->queuelen = skb_queue_len(&mcast->pkt_queue); in ipoib_mcast_iter_next()
1032 iter->complete = !!mcast->ah; in ipoib_mcast_iter_next()
1033 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY)); in ipoib_mcast_iter_next()