1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * net/tipc/bcast.c: TIPC broadcast code
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
5*4882a593Smuzhiyun * Copyright (c) 2004, Intel Corporation.
6*4882a593Smuzhiyun * Copyright (c) 2005, 2010-2011, Wind River Systems
7*4882a593Smuzhiyun * All rights reserved.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
10*4882a593Smuzhiyun * modification, are permitted provided that the following conditions are met:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * 1. Redistributions of source code must retain the above copyright
13*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
14*4882a593Smuzhiyun * 2. Redistributions in binary form must reproduce the above copyright
15*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in the
16*4882a593Smuzhiyun * documentation and/or other materials provided with the distribution.
17*4882a593Smuzhiyun * 3. Neither the names of the copyright holders nor the names of its
18*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived from
19*4882a593Smuzhiyun * this software without specific prior written permission.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * Alternatively, this software may be distributed under the terms of the
22*4882a593Smuzhiyun * GNU General Public License ("GPL") version 2 as published by the Free
23*4882a593Smuzhiyun * Software Foundation.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26*4882a593Smuzhiyun * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27*4882a593Smuzhiyun * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28*4882a593Smuzhiyun * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29*4882a593Smuzhiyun * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30*4882a593Smuzhiyun * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31*4882a593Smuzhiyun * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32*4882a593Smuzhiyun * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33*4882a593Smuzhiyun * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34*4882a593Smuzhiyun * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35*4882a593Smuzhiyun * POSSIBILITY OF SUCH DAMAGE.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <linux/tipc_config.h>
39*4882a593Smuzhiyun #include "socket.h"
40*4882a593Smuzhiyun #include "msg.h"
41*4882a593Smuzhiyun #include "bcast.h"
42*4882a593Smuzhiyun #include "link.h"
43*4882a593Smuzhiyun #include "name_table.h"
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
46*4882a593Smuzhiyun #define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun const char tipc_bclink_name[] = "broadcast-link";
49*4882a593Smuzhiyun unsigned long sysctl_tipc_bc_retruni __read_mostly;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /**
52*4882a593Smuzhiyun * struct tipc_bc_base - base structure for keeping broadcast send state
53*4882a593Smuzhiyun * @link: broadcast send link structure
54*4882a593Smuzhiyun * @inputq: data input queue; will only carry SOCK_WAKEUP messages
55*4882a593Smuzhiyun * @dests: array keeping number of reachable destinations per bearer
56*4882a593Smuzhiyun * @primary_bearer: a bearer having links to all broadcast destinations, if any
57*4882a593Smuzhiyun * @bcast_support: indicates if primary bearer, if any, supports broadcast
58*4882a593Smuzhiyun * @force_bcast: forces broadcast for multicast traffic
59*4882a593Smuzhiyun * @rcast_support: indicates if all peer nodes support replicast
60*4882a593Smuzhiyun * @force_rcast: forces replicast for multicast traffic
61*4882a593Smuzhiyun * @rc_ratio: dest count as percentage of cluster size where send method changes
62*4882a593Smuzhiyun * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun struct tipc_bc_base {
65*4882a593Smuzhiyun struct tipc_link *link;
66*4882a593Smuzhiyun struct sk_buff_head inputq;
67*4882a593Smuzhiyun int dests[MAX_BEARERS];
68*4882a593Smuzhiyun int primary_bearer;
69*4882a593Smuzhiyun bool bcast_support;
70*4882a593Smuzhiyun bool force_bcast;
71*4882a593Smuzhiyun bool rcast_support;
72*4882a593Smuzhiyun bool force_rcast;
73*4882a593Smuzhiyun int rc_ratio;
74*4882a593Smuzhiyun int bc_threshold;
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
tipc_bc_base(struct net * net)77*4882a593Smuzhiyun static struct tipc_bc_base *tipc_bc_base(struct net *net)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun return tipc_net(net)->bcbase;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
83*4882a593Smuzhiyun * Note: the MTU is decremented to give room for a tunnel header, in
84*4882a593Smuzhiyun * case the message needs to be sent as replicast
85*4882a593Smuzhiyun */
tipc_bcast_get_mtu(struct net * net)86*4882a593Smuzhiyun int tipc_bcast_get_mtu(struct net *net)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun return tipc_link_mss(tipc_bc_sndlink(net));
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
tipc_bcast_toggle_rcast(struct net * net,bool supp)91*4882a593Smuzhiyun void tipc_bcast_toggle_rcast(struct net *net, bool supp)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun tipc_bc_base(net)->rcast_support = supp;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
tipc_bcbase_calc_bc_threshold(struct net * net)96*4882a593Smuzhiyun static void tipc_bcbase_calc_bc_threshold(struct net *net)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct tipc_bc_base *bb = tipc_bc_base(net);
99*4882a593Smuzhiyun int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net));
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
105*4882a593Smuzhiyun * if any, and make it primary bearer
106*4882a593Smuzhiyun */
tipc_bcbase_select_primary(struct net * net)107*4882a593Smuzhiyun static void tipc_bcbase_select_primary(struct net *net)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun struct tipc_bc_base *bb = tipc_bc_base(net);
110*4882a593Smuzhiyun int all_dests = tipc_link_bc_peers(bb->link);
111*4882a593Smuzhiyun int max_win = tipc_link_max_win(bb->link);
112*4882a593Smuzhiyun int min_win = tipc_link_min_win(bb->link);
113*4882a593Smuzhiyun int i, mtu, prim;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun bb->primary_bearer = INVALID_BEARER_ID;
116*4882a593Smuzhiyun bb->bcast_support = true;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (!all_dests)
119*4882a593Smuzhiyun return;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun for (i = 0; i < MAX_BEARERS; i++) {
122*4882a593Smuzhiyun if (!bb->dests[i])
123*4882a593Smuzhiyun continue;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun mtu = tipc_bearer_mtu(net, i);
126*4882a593Smuzhiyun if (mtu < tipc_link_mtu(bb->link)) {
127*4882a593Smuzhiyun tipc_link_set_mtu(bb->link, mtu);
128*4882a593Smuzhiyun tipc_link_set_queue_limits(bb->link,
129*4882a593Smuzhiyun min_win,
130*4882a593Smuzhiyun max_win);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun bb->bcast_support &= tipc_bearer_bcast_support(net, i);
133*4882a593Smuzhiyun if (bb->dests[i] < all_dests)
134*4882a593Smuzhiyun continue;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun bb->primary_bearer = i;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* Reduce risk that all nodes select same primary */
139*4882a593Smuzhiyun if ((i ^ tipc_own_addr(net)) & 1)
140*4882a593Smuzhiyun break;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun prim = bb->primary_bearer;
143*4882a593Smuzhiyun if (prim != INVALID_BEARER_ID)
144*4882a593Smuzhiyun bb->bcast_support = tipc_bearer_bcast_support(net, prim);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
tipc_bcast_inc_bearer_dst_cnt(struct net * net,int bearer_id)147*4882a593Smuzhiyun void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct tipc_bc_base *bb = tipc_bc_base(net);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun tipc_bcast_lock(net);
152*4882a593Smuzhiyun bb->dests[bearer_id]++;
153*4882a593Smuzhiyun tipc_bcbase_select_primary(net);
154*4882a593Smuzhiyun tipc_bcast_unlock(net);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
tipc_bcast_dec_bearer_dst_cnt(struct net * net,int bearer_id)157*4882a593Smuzhiyun void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun struct tipc_bc_base *bb = tipc_bc_base(net);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun tipc_bcast_lock(net);
162*4882a593Smuzhiyun bb->dests[bearer_id]--;
163*4882a593Smuzhiyun tipc_bcbase_select_primary(net);
164*4882a593Smuzhiyun tipc_bcast_unlock(net);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * Note that number of reachable destinations, as indicated in the dests[]
170*4882a593Smuzhiyun * array, may transitionally differ from the number of destinations indicated
171*4882a593Smuzhiyun * in each sent buffer. We can sustain this. Excess destination nodes will
172*4882a593Smuzhiyun * drop and never acknowledge the unexpected packets, and missing destinations
173*4882a593Smuzhiyun * will either require retransmission (if they are just about to be added to
174*4882a593Smuzhiyun * the bearer), or be removed from the buffer's 'ackers' counter (if they
175*4882a593Smuzhiyun * just went down)
176*4882a593Smuzhiyun */
tipc_bcbase_xmit(struct net * net,struct sk_buff_head * xmitq)177*4882a593Smuzhiyun static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun int bearer_id;
180*4882a593Smuzhiyun struct tipc_bc_base *bb = tipc_bc_base(net);
181*4882a593Smuzhiyun struct sk_buff *skb, *_skb;
182*4882a593Smuzhiyun struct sk_buff_head _xmitq;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (skb_queue_empty(xmitq))
185*4882a593Smuzhiyun return;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* The typical case: at least one bearer has links to all nodes */
188*4882a593Smuzhiyun bearer_id = bb->primary_bearer;
189*4882a593Smuzhiyun if (bearer_id >= 0) {
190*4882a593Smuzhiyun tipc_bearer_bc_xmit(net, bearer_id, xmitq);
191*4882a593Smuzhiyun return;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* We have to transmit across all bearers */
195*4882a593Smuzhiyun __skb_queue_head_init(&_xmitq);
196*4882a593Smuzhiyun for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
197*4882a593Smuzhiyun if (!bb->dests[bearer_id])
198*4882a593Smuzhiyun continue;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun skb_queue_walk(xmitq, skb) {
201*4882a593Smuzhiyun _skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
202*4882a593Smuzhiyun if (!_skb)
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun __skb_queue_tail(&_xmitq, _skb);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun __skb_queue_purge(xmitq);
209*4882a593Smuzhiyun __skb_queue_purge(&_xmitq);
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
tipc_bcast_select_xmit_method(struct net * net,int dests,struct tipc_mc_method * method)212*4882a593Smuzhiyun static void tipc_bcast_select_xmit_method(struct net *net, int dests,
213*4882a593Smuzhiyun struct tipc_mc_method *method)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct tipc_bc_base *bb = tipc_bc_base(net);
216*4882a593Smuzhiyun unsigned long exp = method->expires;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /* Broadcast supported by used bearer/bearers? */
219*4882a593Smuzhiyun if (!bb->bcast_support) {
220*4882a593Smuzhiyun method->rcast = true;
221*4882a593Smuzhiyun return;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun /* Any destinations which don't support replicast ? */
224*4882a593Smuzhiyun if (!bb->rcast_support) {
225*4882a593Smuzhiyun method->rcast = false;
226*4882a593Smuzhiyun return;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun /* Can current method be changed ? */
229*4882a593Smuzhiyun method->expires = jiffies + TIPC_METHOD_EXPIRE;
230*4882a593Smuzhiyun if (method->mandatory)
231*4882a593Smuzhiyun return;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL) &&
234*4882a593Smuzhiyun time_before(jiffies, exp))
235*4882a593Smuzhiyun return;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* Configuration as force 'broadcast' method */
238*4882a593Smuzhiyun if (bb->force_bcast) {
239*4882a593Smuzhiyun method->rcast = false;
240*4882a593Smuzhiyun return;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun /* Configuration as force 'replicast' method */
243*4882a593Smuzhiyun if (bb->force_rcast) {
244*4882a593Smuzhiyun method->rcast = true;
245*4882a593Smuzhiyun return;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun /* Configuration as 'autoselect' or default method */
248*4882a593Smuzhiyun /* Determine method to use now */
249*4882a593Smuzhiyun method->rcast = dests <= bb->bc_threshold;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* tipc_bcast_xmit - broadcast the buffer chain to all external nodes
253*4882a593Smuzhiyun * @net: the applicable net namespace
254*4882a593Smuzhiyun * @pkts: chain of buffers containing message
255*4882a593Smuzhiyun * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0
256*4882a593Smuzhiyun * Consumes the buffer chain.
257*4882a593Smuzhiyun * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
258*4882a593Smuzhiyun */
tipc_bcast_xmit(struct net * net,struct sk_buff_head * pkts,u16 * cong_link_cnt)259*4882a593Smuzhiyun int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
260*4882a593Smuzhiyun u16 *cong_link_cnt)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct tipc_link *l = tipc_bc_sndlink(net);
263*4882a593Smuzhiyun struct sk_buff_head xmitq;
264*4882a593Smuzhiyun int rc = 0;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun __skb_queue_head_init(&xmitq);
267*4882a593Smuzhiyun tipc_bcast_lock(net);
268*4882a593Smuzhiyun if (tipc_link_bc_peers(l))
269*4882a593Smuzhiyun rc = tipc_link_xmit(l, pkts, &xmitq);
270*4882a593Smuzhiyun tipc_bcast_unlock(net);
271*4882a593Smuzhiyun tipc_bcbase_xmit(net, &xmitq);
272*4882a593Smuzhiyun __skb_queue_purge(pkts);
273*4882a593Smuzhiyun if (rc == -ELINKCONG) {
274*4882a593Smuzhiyun *cong_link_cnt = 1;
275*4882a593Smuzhiyun rc = 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun return rc;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /* tipc_rcast_xmit - replicate and send a message to given destination nodes
281*4882a593Smuzhiyun * @net: the applicable net namespace
282*4882a593Smuzhiyun * @pkts: chain of buffers containing message
283*4882a593Smuzhiyun * @dests: list of destination nodes
284*4882a593Smuzhiyun * @cong_link_cnt: returns number of congested links
285*4882a593Smuzhiyun * @cong_links: returns identities of congested links
286*4882a593Smuzhiyun * Returns 0 if success, otherwise errno
287*4882a593Smuzhiyun */
tipc_rcast_xmit(struct net * net,struct sk_buff_head * pkts,struct tipc_nlist * dests,u16 * cong_link_cnt)288*4882a593Smuzhiyun static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
289*4882a593Smuzhiyun struct tipc_nlist *dests, u16 *cong_link_cnt)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun struct tipc_dest *dst, *tmp;
292*4882a593Smuzhiyun struct sk_buff_head _pkts;
293*4882a593Smuzhiyun u32 dnode, selector;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun selector = msg_link_selector(buf_msg(skb_peek(pkts)));
296*4882a593Smuzhiyun __skb_queue_head_init(&_pkts);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun list_for_each_entry_safe(dst, tmp, &dests->list, list) {
299*4882a593Smuzhiyun dnode = dst->node;
300*4882a593Smuzhiyun if (!tipc_msg_pskb_copy(dnode, pkts, &_pkts))
301*4882a593Smuzhiyun return -ENOMEM;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* Any other return value than -ELINKCONG is ignored */
304*4882a593Smuzhiyun if (tipc_node_xmit(net, &_pkts, dnode, selector) == -ELINKCONG)
305*4882a593Smuzhiyun (*cong_link_cnt)++;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun return 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* tipc_mcast_send_sync - deliver a dummy message with SYN bit
311*4882a593Smuzhiyun * @net: the applicable net namespace
312*4882a593Smuzhiyun * @skb: socket buffer to copy
313*4882a593Smuzhiyun * @method: send method to be used
314*4882a593Smuzhiyun * @dests: destination nodes for message.
315*4882a593Smuzhiyun * Returns 0 if success, otherwise errno
316*4882a593Smuzhiyun */
tipc_mcast_send_sync(struct net * net,struct sk_buff * skb,struct tipc_mc_method * method,struct tipc_nlist * dests)317*4882a593Smuzhiyun static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
318*4882a593Smuzhiyun struct tipc_mc_method *method,
319*4882a593Smuzhiyun struct tipc_nlist *dests)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct tipc_msg *hdr, *_hdr;
322*4882a593Smuzhiyun struct sk_buff_head tmpq;
323*4882a593Smuzhiyun struct sk_buff *_skb;
324*4882a593Smuzhiyun u16 cong_link_cnt;
325*4882a593Smuzhiyun int rc = 0;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /* Is a cluster supporting with new capabilities ? */
328*4882a593Smuzhiyun if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL))
329*4882a593Smuzhiyun return 0;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun hdr = buf_msg(skb);
332*4882a593Smuzhiyun if (msg_user(hdr) == MSG_FRAGMENTER)
333*4882a593Smuzhiyun hdr = msg_inner_hdr(hdr);
334*4882a593Smuzhiyun if (msg_type(hdr) != TIPC_MCAST_MSG)
335*4882a593Smuzhiyun return 0;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* Allocate dummy message */
338*4882a593Smuzhiyun _skb = tipc_buf_acquire(MCAST_H_SIZE, GFP_KERNEL);
339*4882a593Smuzhiyun if (!_skb)
340*4882a593Smuzhiyun return -ENOMEM;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Preparing for 'synching' header */
343*4882a593Smuzhiyun msg_set_syn(hdr, 1);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* Copy skb's header into a dummy header */
346*4882a593Smuzhiyun skb_copy_to_linear_data(_skb, hdr, MCAST_H_SIZE);
347*4882a593Smuzhiyun skb_orphan(_skb);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* Reverse method for dummy message */
350*4882a593Smuzhiyun _hdr = buf_msg(_skb);
351*4882a593Smuzhiyun msg_set_size(_hdr, MCAST_H_SIZE);
352*4882a593Smuzhiyun msg_set_is_rcast(_hdr, !msg_is_rcast(hdr));
353*4882a593Smuzhiyun msg_set_errcode(_hdr, TIPC_ERR_NO_PORT);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun __skb_queue_head_init(&tmpq);
356*4882a593Smuzhiyun __skb_queue_tail(&tmpq, _skb);
357*4882a593Smuzhiyun if (method->rcast)
358*4882a593Smuzhiyun rc = tipc_bcast_xmit(net, &tmpq, &cong_link_cnt);
359*4882a593Smuzhiyun else
360*4882a593Smuzhiyun rc = tipc_rcast_xmit(net, &tmpq, dests, &cong_link_cnt);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* This queue should normally be empty by now */
363*4882a593Smuzhiyun __skb_queue_purge(&tmpq);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun return rc;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* tipc_mcast_xmit - deliver message to indicated destination nodes
369*4882a593Smuzhiyun * and to identified node local sockets
370*4882a593Smuzhiyun * @net: the applicable net namespace
371*4882a593Smuzhiyun * @pkts: chain of buffers containing message
372*4882a593Smuzhiyun * @method: send method to be used
373*4882a593Smuzhiyun * @dests: destination nodes for message.
374*4882a593Smuzhiyun * @cong_link_cnt: returns number of encountered congested destination links
375*4882a593Smuzhiyun * Consumes buffer chain.
376*4882a593Smuzhiyun * Returns 0 if success, otherwise errno
377*4882a593Smuzhiyun */
tipc_mcast_xmit(struct net * net,struct sk_buff_head * pkts,struct tipc_mc_method * method,struct tipc_nlist * dests,u16 * cong_link_cnt)378*4882a593Smuzhiyun int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
379*4882a593Smuzhiyun struct tipc_mc_method *method, struct tipc_nlist *dests,
380*4882a593Smuzhiyun u16 *cong_link_cnt)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun struct sk_buff_head inputq, localq;
383*4882a593Smuzhiyun bool rcast = method->rcast;
384*4882a593Smuzhiyun struct tipc_msg *hdr;
385*4882a593Smuzhiyun struct sk_buff *skb;
386*4882a593Smuzhiyun int rc = 0;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun skb_queue_head_init(&inputq);
389*4882a593Smuzhiyun __skb_queue_head_init(&localq);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /* Clone packets before they are consumed by next call */
392*4882a593Smuzhiyun if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
393*4882a593Smuzhiyun rc = -ENOMEM;
394*4882a593Smuzhiyun goto exit;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun /* Send according to determined transmit method */
397*4882a593Smuzhiyun if (dests->remote) {
398*4882a593Smuzhiyun tipc_bcast_select_xmit_method(net, dests->remote, method);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun skb = skb_peek(pkts);
401*4882a593Smuzhiyun hdr = buf_msg(skb);
402*4882a593Smuzhiyun if (msg_user(hdr) == MSG_FRAGMENTER)
403*4882a593Smuzhiyun hdr = msg_inner_hdr(hdr);
404*4882a593Smuzhiyun msg_set_is_rcast(hdr, method->rcast);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* Switch method ? */
407*4882a593Smuzhiyun if (rcast != method->rcast) {
408*4882a593Smuzhiyun rc = tipc_mcast_send_sync(net, skb, method, dests);
409*4882a593Smuzhiyun if (unlikely(rc)) {
410*4882a593Smuzhiyun pr_err("Unable to send SYN: method %d, rc %d\n",
411*4882a593Smuzhiyun rcast, rc);
412*4882a593Smuzhiyun goto exit;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun if (method->rcast)
417*4882a593Smuzhiyun rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
418*4882a593Smuzhiyun else
419*4882a593Smuzhiyun rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun if (dests->local) {
423*4882a593Smuzhiyun tipc_loopback_trace(net, &localq);
424*4882a593Smuzhiyun tipc_sk_mcast_rcv(net, &localq, &inputq);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun exit:
427*4882a593Smuzhiyun /* This queue should normally be empty by now */
428*4882a593Smuzhiyun __skb_queue_purge(pkts);
429*4882a593Smuzhiyun return rc;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
433*4882a593Smuzhiyun *
434*4882a593Smuzhiyun * RCU is locked, no other locks set
435*4882a593Smuzhiyun */
tipc_bcast_rcv(struct net * net,struct tipc_link * l,struct sk_buff * skb)436*4882a593Smuzhiyun int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun struct tipc_msg *hdr = buf_msg(skb);
439*4882a593Smuzhiyun struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
440*4882a593Smuzhiyun struct sk_buff_head xmitq;
441*4882a593Smuzhiyun int rc;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun __skb_queue_head_init(&xmitq);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
446*4882a593Smuzhiyun kfree_skb(skb);
447*4882a593Smuzhiyun return 0;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun tipc_bcast_lock(net);
451*4882a593Smuzhiyun if (msg_user(hdr) == BCAST_PROTOCOL)
452*4882a593Smuzhiyun rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
453*4882a593Smuzhiyun else
454*4882a593Smuzhiyun rc = tipc_link_rcv(l, skb, NULL);
455*4882a593Smuzhiyun tipc_bcast_unlock(net);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun tipc_bcbase_xmit(net, &xmitq);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* Any socket wakeup messages ? */
460*4882a593Smuzhiyun if (!skb_queue_empty(inputq))
461*4882a593Smuzhiyun tipc_sk_rcv(net, inputq);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun return rc;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
467*4882a593Smuzhiyun *
468*4882a593Smuzhiyun * RCU is locked, no other locks set
469*4882a593Smuzhiyun */
tipc_bcast_ack_rcv(struct net * net,struct tipc_link * l,struct tipc_msg * hdr)470*4882a593Smuzhiyun void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
471*4882a593Smuzhiyun struct tipc_msg *hdr)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
474*4882a593Smuzhiyun u16 acked = msg_bcast_ack(hdr);
475*4882a593Smuzhiyun struct sk_buff_head xmitq;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* Ignore bc acks sent by peer before bcast synch point was received */
478*4882a593Smuzhiyun if (msg_bc_ack_invalid(hdr))
479*4882a593Smuzhiyun return;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun __skb_queue_head_init(&xmitq);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun tipc_bcast_lock(net);
484*4882a593Smuzhiyun tipc_link_bc_ack_rcv(l, acked, 0, NULL, &xmitq, NULL);
485*4882a593Smuzhiyun tipc_bcast_unlock(net);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun tipc_bcbase_xmit(net, &xmitq);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /* Any socket wakeup messages ? */
490*4882a593Smuzhiyun if (!skb_queue_empty(inputq))
491*4882a593Smuzhiyun tipc_sk_rcv(net, inputq);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun /* tipc_bcast_synch_rcv - check and update rcv link with peer's send state
495*4882a593Smuzhiyun *
496*4882a593Smuzhiyun * RCU is locked, no other locks set
497*4882a593Smuzhiyun */
tipc_bcast_sync_rcv(struct net * net,struct tipc_link * l,struct tipc_msg * hdr,struct sk_buff_head * retrq)498*4882a593Smuzhiyun int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
499*4882a593Smuzhiyun struct tipc_msg *hdr,
500*4882a593Smuzhiyun struct sk_buff_head *retrq)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
503*4882a593Smuzhiyun struct tipc_gap_ack_blks *ga;
504*4882a593Smuzhiyun struct sk_buff_head xmitq;
505*4882a593Smuzhiyun int rc = 0;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun __skb_queue_head_init(&xmitq);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun tipc_bcast_lock(net);
510*4882a593Smuzhiyun if (msg_type(hdr) != STATE_MSG) {
511*4882a593Smuzhiyun tipc_link_bc_init_rcv(l, hdr);
512*4882a593Smuzhiyun } else if (!msg_bc_ack_invalid(hdr)) {
513*4882a593Smuzhiyun tipc_get_gap_ack_blks(&ga, l, hdr, false);
514*4882a593Smuzhiyun if (!sysctl_tipc_bc_retruni)
515*4882a593Smuzhiyun retrq = &xmitq;
516*4882a593Smuzhiyun rc = tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr),
517*4882a593Smuzhiyun msg_bc_gap(hdr), ga, &xmitq,
518*4882a593Smuzhiyun retrq);
519*4882a593Smuzhiyun rc |= tipc_link_bc_sync_rcv(l, hdr, &xmitq);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun tipc_bcast_unlock(net);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun tipc_bcbase_xmit(net, &xmitq);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /* Any socket wakeup messages ? */
526*4882a593Smuzhiyun if (!skb_queue_empty(inputq))
527*4882a593Smuzhiyun tipc_sk_rcv(net, inputq);
528*4882a593Smuzhiyun return rc;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
532*4882a593Smuzhiyun *
533*4882a593Smuzhiyun * RCU is locked, node lock is set
534*4882a593Smuzhiyun */
tipc_bcast_add_peer(struct net * net,struct tipc_link * uc_l,struct sk_buff_head * xmitq)535*4882a593Smuzhiyun void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
536*4882a593Smuzhiyun struct sk_buff_head *xmitq)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun struct tipc_link *snd_l = tipc_bc_sndlink(net);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun tipc_bcast_lock(net);
541*4882a593Smuzhiyun tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
542*4882a593Smuzhiyun tipc_bcbase_select_primary(net);
543*4882a593Smuzhiyun tipc_bcbase_calc_bc_threshold(net);
544*4882a593Smuzhiyun tipc_bcast_unlock(net);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
548*4882a593Smuzhiyun *
549*4882a593Smuzhiyun * RCU is locked, node lock is set
550*4882a593Smuzhiyun */
tipc_bcast_remove_peer(struct net * net,struct tipc_link * rcv_l)551*4882a593Smuzhiyun void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun struct tipc_link *snd_l = tipc_bc_sndlink(net);
554*4882a593Smuzhiyun struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
555*4882a593Smuzhiyun struct sk_buff_head xmitq;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun __skb_queue_head_init(&xmitq);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun tipc_bcast_lock(net);
560*4882a593Smuzhiyun tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
561*4882a593Smuzhiyun tipc_bcbase_select_primary(net);
562*4882a593Smuzhiyun tipc_bcbase_calc_bc_threshold(net);
563*4882a593Smuzhiyun tipc_bcast_unlock(net);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun tipc_bcbase_xmit(net, &xmitq);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /* Any socket wakeup messages ? */
568*4882a593Smuzhiyun if (!skb_queue_empty(inputq))
569*4882a593Smuzhiyun tipc_sk_rcv(net, inputq);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
tipc_bclink_reset_stats(struct net * net,struct tipc_link * l)572*4882a593Smuzhiyun int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun if (!l)
575*4882a593Smuzhiyun return -ENOPROTOOPT;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun tipc_bcast_lock(net);
578*4882a593Smuzhiyun tipc_link_reset_stats(l);
579*4882a593Smuzhiyun tipc_bcast_unlock(net);
580*4882a593Smuzhiyun return 0;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
tipc_bc_link_set_queue_limits(struct net * net,u32 max_win)583*4882a593Smuzhiyun static int tipc_bc_link_set_queue_limits(struct net *net, u32 max_win)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun struct tipc_link *l = tipc_bc_sndlink(net);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (!l)
588*4882a593Smuzhiyun return -ENOPROTOOPT;
589*4882a593Smuzhiyun if (max_win < BCLINK_WIN_MIN)
590*4882a593Smuzhiyun max_win = BCLINK_WIN_MIN;
591*4882a593Smuzhiyun if (max_win > TIPC_MAX_LINK_WIN)
592*4882a593Smuzhiyun return -EINVAL;
593*4882a593Smuzhiyun tipc_bcast_lock(net);
594*4882a593Smuzhiyun tipc_link_set_queue_limits(l, tipc_link_min_win(l), max_win);
595*4882a593Smuzhiyun tipc_bcast_unlock(net);
596*4882a593Smuzhiyun return 0;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
tipc_bc_link_set_broadcast_mode(struct net * net,u32 bc_mode)599*4882a593Smuzhiyun static int tipc_bc_link_set_broadcast_mode(struct net *net, u32 bc_mode)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun struct tipc_bc_base *bb = tipc_bc_base(net);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun switch (bc_mode) {
604*4882a593Smuzhiyun case BCLINK_MODE_BCAST:
605*4882a593Smuzhiyun if (!bb->bcast_support)
606*4882a593Smuzhiyun return -ENOPROTOOPT;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun bb->force_bcast = true;
609*4882a593Smuzhiyun bb->force_rcast = false;
610*4882a593Smuzhiyun break;
611*4882a593Smuzhiyun case BCLINK_MODE_RCAST:
612*4882a593Smuzhiyun if (!bb->rcast_support)
613*4882a593Smuzhiyun return -ENOPROTOOPT;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun bb->force_bcast = false;
616*4882a593Smuzhiyun bb->force_rcast = true;
617*4882a593Smuzhiyun break;
618*4882a593Smuzhiyun case BCLINK_MODE_SEL:
619*4882a593Smuzhiyun if (!bb->bcast_support || !bb->rcast_support)
620*4882a593Smuzhiyun return -ENOPROTOOPT;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun bb->force_bcast = false;
623*4882a593Smuzhiyun bb->force_rcast = false;
624*4882a593Smuzhiyun break;
625*4882a593Smuzhiyun default:
626*4882a593Smuzhiyun return -EINVAL;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun return 0;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
tipc_bc_link_set_broadcast_ratio(struct net * net,u32 bc_ratio)632*4882a593Smuzhiyun static int tipc_bc_link_set_broadcast_ratio(struct net *net, u32 bc_ratio)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun struct tipc_bc_base *bb = tipc_bc_base(net);
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun if (!bb->bcast_support || !bb->rcast_support)
637*4882a593Smuzhiyun return -ENOPROTOOPT;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun if (bc_ratio > 100 || bc_ratio <= 0)
640*4882a593Smuzhiyun return -EINVAL;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun bb->rc_ratio = bc_ratio;
643*4882a593Smuzhiyun tipc_bcast_lock(net);
644*4882a593Smuzhiyun tipc_bcbase_calc_bc_threshold(net);
645*4882a593Smuzhiyun tipc_bcast_unlock(net);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun return 0;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
tipc_nl_bc_link_set(struct net * net,struct nlattr * attrs[])650*4882a593Smuzhiyun int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun int err;
653*4882a593Smuzhiyun u32 win;
654*4882a593Smuzhiyun u32 bc_mode;
655*4882a593Smuzhiyun u32 bc_ratio;
656*4882a593Smuzhiyun struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (!attrs[TIPC_NLA_LINK_PROP])
659*4882a593Smuzhiyun return -EINVAL;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
662*4882a593Smuzhiyun if (err)
663*4882a593Smuzhiyun return err;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun if (!props[TIPC_NLA_PROP_WIN] &&
666*4882a593Smuzhiyun !props[TIPC_NLA_PROP_BROADCAST] &&
667*4882a593Smuzhiyun !props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
668*4882a593Smuzhiyun return -EOPNOTSUPP;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun if (props[TIPC_NLA_PROP_BROADCAST]) {
672*4882a593Smuzhiyun bc_mode = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST]);
673*4882a593Smuzhiyun err = tipc_bc_link_set_broadcast_mode(net, bc_mode);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (!err && props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
677*4882a593Smuzhiyun bc_ratio = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST_RATIO]);
678*4882a593Smuzhiyun err = tipc_bc_link_set_broadcast_ratio(net, bc_ratio);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun if (!err && props[TIPC_NLA_PROP_WIN]) {
682*4882a593Smuzhiyun win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
683*4882a593Smuzhiyun err = tipc_bc_link_set_queue_limits(net, win);
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun return err;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
tipc_bcast_init(struct net * net)689*4882a593Smuzhiyun int tipc_bcast_init(struct net *net)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun struct tipc_net *tn = tipc_net(net);
692*4882a593Smuzhiyun struct tipc_bc_base *bb = NULL;
693*4882a593Smuzhiyun struct tipc_link *l = NULL;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun bb = kzalloc(sizeof(*bb), GFP_KERNEL);
696*4882a593Smuzhiyun if (!bb)
697*4882a593Smuzhiyun goto enomem;
698*4882a593Smuzhiyun tn->bcbase = bb;
699*4882a593Smuzhiyun spin_lock_init(&tipc_net(net)->bclock);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun if (!tipc_link_bc_create(net, 0, 0, NULL,
702*4882a593Smuzhiyun one_page_mtu,
703*4882a593Smuzhiyun BCLINK_WIN_DEFAULT,
704*4882a593Smuzhiyun BCLINK_WIN_DEFAULT,
705*4882a593Smuzhiyun 0,
706*4882a593Smuzhiyun &bb->inputq,
707*4882a593Smuzhiyun NULL,
708*4882a593Smuzhiyun NULL,
709*4882a593Smuzhiyun &l))
710*4882a593Smuzhiyun goto enomem;
711*4882a593Smuzhiyun bb->link = l;
712*4882a593Smuzhiyun tn->bcl = l;
713*4882a593Smuzhiyun bb->rc_ratio = 10;
714*4882a593Smuzhiyun bb->rcast_support = true;
715*4882a593Smuzhiyun return 0;
716*4882a593Smuzhiyun enomem:
717*4882a593Smuzhiyun kfree(bb);
718*4882a593Smuzhiyun kfree(l);
719*4882a593Smuzhiyun return -ENOMEM;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
tipc_bcast_stop(struct net * net)722*4882a593Smuzhiyun void tipc_bcast_stop(struct net *net)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun struct tipc_net *tn = net_generic(net, tipc_net_id);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun synchronize_net();
727*4882a593Smuzhiyun kfree(tn->bcbase);
728*4882a593Smuzhiyun kfree(tn->bcl);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
tipc_nlist_init(struct tipc_nlist * nl,u32 self)731*4882a593Smuzhiyun void tipc_nlist_init(struct tipc_nlist *nl, u32 self)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun memset(nl, 0, sizeof(*nl));
734*4882a593Smuzhiyun INIT_LIST_HEAD(&nl->list);
735*4882a593Smuzhiyun nl->self = self;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
tipc_nlist_add(struct tipc_nlist * nl,u32 node)738*4882a593Smuzhiyun void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun if (node == nl->self)
741*4882a593Smuzhiyun nl->local = true;
742*4882a593Smuzhiyun else if (tipc_dest_push(&nl->list, node, 0))
743*4882a593Smuzhiyun nl->remote++;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
tipc_nlist_del(struct tipc_nlist * nl,u32 node)746*4882a593Smuzhiyun void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun if (node == nl->self)
749*4882a593Smuzhiyun nl->local = false;
750*4882a593Smuzhiyun else if (tipc_dest_del(&nl->list, node, 0))
751*4882a593Smuzhiyun nl->remote--;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
tipc_nlist_purge(struct tipc_nlist * nl)754*4882a593Smuzhiyun void tipc_nlist_purge(struct tipc_nlist *nl)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun tipc_dest_list_purge(&nl->list);
757*4882a593Smuzhiyun nl->remote = 0;
758*4882a593Smuzhiyun nl->local = false;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
tipc_bcast_get_mode(struct net * net)761*4882a593Smuzhiyun u32 tipc_bcast_get_mode(struct net *net)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun struct tipc_bc_base *bb = tipc_bc_base(net);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun if (bb->force_bcast)
766*4882a593Smuzhiyun return BCLINK_MODE_BCAST;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun if (bb->force_rcast)
769*4882a593Smuzhiyun return BCLINK_MODE_RCAST;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun if (bb->bcast_support && bb->rcast_support)
772*4882a593Smuzhiyun return BCLINK_MODE_SEL;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun return 0;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
tipc_bcast_get_broadcast_ratio(struct net * net)777*4882a593Smuzhiyun u32 tipc_bcast_get_broadcast_ratio(struct net *net)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun struct tipc_bc_base *bb = tipc_bc_base(net);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun return bb->rc_ratio;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
tipc_mcast_filter_msg(struct net * net,struct sk_buff_head * defq,struct sk_buff_head * inputq)784*4882a593Smuzhiyun void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq,
785*4882a593Smuzhiyun struct sk_buff_head *inputq)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun struct sk_buff *skb, *_skb, *tmp;
788*4882a593Smuzhiyun struct tipc_msg *hdr, *_hdr;
789*4882a593Smuzhiyun bool match = false;
790*4882a593Smuzhiyun u32 node, port;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun skb = skb_peek(inputq);
793*4882a593Smuzhiyun if (!skb)
794*4882a593Smuzhiyun return;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun hdr = buf_msg(skb);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (likely(!msg_is_syn(hdr) && skb_queue_empty(defq)))
799*4882a593Smuzhiyun return;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun node = msg_orignode(hdr);
802*4882a593Smuzhiyun if (node == tipc_own_addr(net))
803*4882a593Smuzhiyun return;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun port = msg_origport(hdr);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /* Has the twin SYN message already arrived ? */
808*4882a593Smuzhiyun skb_queue_walk(defq, _skb) {
809*4882a593Smuzhiyun _hdr = buf_msg(_skb);
810*4882a593Smuzhiyun if (msg_orignode(_hdr) != node)
811*4882a593Smuzhiyun continue;
812*4882a593Smuzhiyun if (msg_origport(_hdr) != port)
813*4882a593Smuzhiyun continue;
814*4882a593Smuzhiyun match = true;
815*4882a593Smuzhiyun break;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun if (!match) {
819*4882a593Smuzhiyun if (!msg_is_syn(hdr))
820*4882a593Smuzhiyun return;
821*4882a593Smuzhiyun __skb_dequeue(inputq);
822*4882a593Smuzhiyun __skb_queue_tail(defq, skb);
823*4882a593Smuzhiyun return;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /* Deliver non-SYN message from other link, otherwise queue it */
827*4882a593Smuzhiyun if (!msg_is_syn(hdr)) {
828*4882a593Smuzhiyun if (msg_is_rcast(hdr) != msg_is_rcast(_hdr))
829*4882a593Smuzhiyun return;
830*4882a593Smuzhiyun __skb_dequeue(inputq);
831*4882a593Smuzhiyun __skb_queue_tail(defq, skb);
832*4882a593Smuzhiyun return;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /* Queue non-SYN/SYN message from same link */
836*4882a593Smuzhiyun if (msg_is_rcast(hdr) == msg_is_rcast(_hdr)) {
837*4882a593Smuzhiyun __skb_dequeue(inputq);
838*4882a593Smuzhiyun __skb_queue_tail(defq, skb);
839*4882a593Smuzhiyun return;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun /* Matching SYN messages => return the one with data, if any */
843*4882a593Smuzhiyun __skb_unlink(_skb, defq);
844*4882a593Smuzhiyun if (msg_data_sz(hdr)) {
845*4882a593Smuzhiyun kfree_skb(_skb);
846*4882a593Smuzhiyun } else {
847*4882a593Smuzhiyun __skb_dequeue(inputq);
848*4882a593Smuzhiyun kfree_skb(skb);
849*4882a593Smuzhiyun __skb_queue_tail(inputq, _skb);
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /* Deliver subsequent non-SYN messages from same peer */
853*4882a593Smuzhiyun skb_queue_walk_safe(defq, _skb, tmp) {
854*4882a593Smuzhiyun _hdr = buf_msg(_skb);
855*4882a593Smuzhiyun if (msg_orignode(_hdr) != node)
856*4882a593Smuzhiyun continue;
857*4882a593Smuzhiyun if (msg_origport(_hdr) != port)
858*4882a593Smuzhiyun continue;
859*4882a593Smuzhiyun if (msg_is_syn(_hdr))
860*4882a593Smuzhiyun break;
861*4882a593Smuzhiyun __skb_unlink(_skb, defq);
862*4882a593Smuzhiyun __skb_queue_tail(inputq, _skb);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun }
865