xref: /OK3568_Linux_fs/kernel/net/tipc/name_distr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * net/tipc/name_distr.c: TIPC name distribution code
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2000-2006, 2014, Ericsson AB
5*4882a593Smuzhiyun  * Copyright (c) 2005, 2010-2011, Wind River Systems
6*4882a593Smuzhiyun  * All rights reserved.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
9*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions are met:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * 1. Redistributions of source code must retain the above copyright
12*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer.
13*4882a593Smuzhiyun  * 2. Redistributions in binary form must reproduce the above copyright
14*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer in the
15*4882a593Smuzhiyun  *    documentation and/or other materials provided with the distribution.
16*4882a593Smuzhiyun  * 3. Neither the names of the copyright holders nor the names of its
17*4882a593Smuzhiyun  *    contributors may be used to endorse or promote products derived from
18*4882a593Smuzhiyun  *    this software without specific prior written permission.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * Alternatively, this software may be distributed under the terms of the
21*4882a593Smuzhiyun  * GNU General Public License ("GPL") version 2 as published by the Free
22*4882a593Smuzhiyun  * Software Foundation.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25*4882a593Smuzhiyun  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26*4882a593Smuzhiyun  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27*4882a593Smuzhiyun  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28*4882a593Smuzhiyun  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29*4882a593Smuzhiyun  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30*4882a593Smuzhiyun  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31*4882a593Smuzhiyun  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32*4882a593Smuzhiyun  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33*4882a593Smuzhiyun  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34*4882a593Smuzhiyun  * POSSIBILITY OF SUCH DAMAGE.
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include "core.h"
38*4882a593Smuzhiyun #include "link.h"
39*4882a593Smuzhiyun #include "name_distr.h"
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun int sysctl_tipc_named_timeout __read_mostly = 2000;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun struct distr_queue_item {
44*4882a593Smuzhiyun 	struct distr_item i;
45*4882a593Smuzhiyun 	u32 dtype;
46*4882a593Smuzhiyun 	u32 node;
47*4882a593Smuzhiyun 	unsigned long expires;
48*4882a593Smuzhiyun 	struct list_head next;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /**
52*4882a593Smuzhiyun  * publ_to_item - add publication info to a publication message
53*4882a593Smuzhiyun  */
publ_to_item(struct distr_item * i,struct publication * p)54*4882a593Smuzhiyun static void publ_to_item(struct distr_item *i, struct publication *p)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	i->type = htonl(p->type);
57*4882a593Smuzhiyun 	i->lower = htonl(p->lower);
58*4882a593Smuzhiyun 	i->upper = htonl(p->upper);
59*4882a593Smuzhiyun 	i->port = htonl(p->port);
60*4882a593Smuzhiyun 	i->key = htonl(p->key);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /**
64*4882a593Smuzhiyun  * named_prepare_buf - allocate & initialize a publication message
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * The buffer returned is of size INT_H_SIZE + payload size
67*4882a593Smuzhiyun  */
named_prepare_buf(struct net * net,u32 type,u32 size,u32 dest)68*4882a593Smuzhiyun static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
69*4882a593Smuzhiyun 					 u32 dest)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
72*4882a593Smuzhiyun 	u32 self = tipc_own_addr(net);
73*4882a593Smuzhiyun 	struct tipc_msg *msg;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (buf != NULL) {
76*4882a593Smuzhiyun 		msg = buf_msg(buf);
77*4882a593Smuzhiyun 		tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
78*4882a593Smuzhiyun 			      type, INT_H_SIZE, dest);
79*4882a593Smuzhiyun 		msg_set_size(msg, INT_H_SIZE + size);
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 	return buf;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /**
85*4882a593Smuzhiyun  * tipc_named_publish - tell other nodes about a new publication by this node
86*4882a593Smuzhiyun  */
tipc_named_publish(struct net * net,struct publication * publ)87*4882a593Smuzhiyun struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	struct name_table *nt = tipc_name_table(net);
90*4882a593Smuzhiyun 	struct distr_item *item;
91*4882a593Smuzhiyun 	struct sk_buff *skb;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (publ->scope == TIPC_NODE_SCOPE) {
94*4882a593Smuzhiyun 		list_add_tail_rcu(&publ->binding_node, &nt->node_scope);
95*4882a593Smuzhiyun 		return NULL;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 	write_lock_bh(&nt->cluster_scope_lock);
98*4882a593Smuzhiyun 	list_add_tail(&publ->binding_node, &nt->cluster_scope);
99*4882a593Smuzhiyun 	write_unlock_bh(&nt->cluster_scope_lock);
100*4882a593Smuzhiyun 	skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
101*4882a593Smuzhiyun 	if (!skb) {
102*4882a593Smuzhiyun 		pr_warn("Publication distribution failure\n");
103*4882a593Smuzhiyun 		return NULL;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 	msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
106*4882a593Smuzhiyun 	msg_set_non_legacy(buf_msg(skb));
107*4882a593Smuzhiyun 	item = (struct distr_item *)msg_data(buf_msg(skb));
108*4882a593Smuzhiyun 	publ_to_item(item, publ);
109*4882a593Smuzhiyun 	return skb;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun  * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
114*4882a593Smuzhiyun  */
tipc_named_withdraw(struct net * net,struct publication * publ)115*4882a593Smuzhiyun struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct name_table *nt = tipc_name_table(net);
118*4882a593Smuzhiyun 	struct distr_item *item;
119*4882a593Smuzhiyun 	struct sk_buff *skb;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	write_lock_bh(&nt->cluster_scope_lock);
122*4882a593Smuzhiyun 	list_del(&publ->binding_node);
123*4882a593Smuzhiyun 	write_unlock_bh(&nt->cluster_scope_lock);
124*4882a593Smuzhiyun 	if (publ->scope == TIPC_NODE_SCOPE)
125*4882a593Smuzhiyun 		return NULL;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
128*4882a593Smuzhiyun 	if (!skb) {
129*4882a593Smuzhiyun 		pr_warn("Withdrawal distribution failure\n");
130*4882a593Smuzhiyun 		return NULL;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 	msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
133*4882a593Smuzhiyun 	msg_set_non_legacy(buf_msg(skb));
134*4882a593Smuzhiyun 	item = (struct distr_item *)msg_data(buf_msg(skb));
135*4882a593Smuzhiyun 	publ_to_item(item, publ);
136*4882a593Smuzhiyun 	return skb;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun  * named_distribute - prepare name info for bulk distribution to another node
141*4882a593Smuzhiyun  * @list: list of messages (buffers) to be returned from this function
142*4882a593Smuzhiyun  * @dnode: node to be updated
143*4882a593Smuzhiyun  * @pls: linked list of publication items to be packed into buffer chain
144*4882a593Smuzhiyun  */
named_distribute(struct net * net,struct sk_buff_head * list,u32 dnode,struct list_head * pls,u16 seqno)145*4882a593Smuzhiyun static void named_distribute(struct net *net, struct sk_buff_head *list,
146*4882a593Smuzhiyun 			     u32 dnode, struct list_head *pls, u16 seqno)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct publication *publ;
149*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
150*4882a593Smuzhiyun 	struct distr_item *item = NULL;
151*4882a593Smuzhiyun 	u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
152*4882a593Smuzhiyun 			ITEM_SIZE) * ITEM_SIZE;
153*4882a593Smuzhiyun 	u32 msg_rem = msg_dsz;
154*4882a593Smuzhiyun 	struct tipc_msg *hdr;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	list_for_each_entry(publ, pls, binding_node) {
157*4882a593Smuzhiyun 		/* Prepare next buffer: */
158*4882a593Smuzhiyun 		if (!skb) {
159*4882a593Smuzhiyun 			skb = named_prepare_buf(net, PUBLICATION, msg_rem,
160*4882a593Smuzhiyun 						dnode);
161*4882a593Smuzhiyun 			if (!skb) {
162*4882a593Smuzhiyun 				pr_warn("Bulk publication failure\n");
163*4882a593Smuzhiyun 				return;
164*4882a593Smuzhiyun 			}
165*4882a593Smuzhiyun 			hdr = buf_msg(skb);
166*4882a593Smuzhiyun 			msg_set_bc_ack_invalid(hdr, true);
167*4882a593Smuzhiyun 			msg_set_bulk(hdr);
168*4882a593Smuzhiyun 			msg_set_non_legacy(hdr);
169*4882a593Smuzhiyun 			item = (struct distr_item *)msg_data(hdr);
170*4882a593Smuzhiyun 		}
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 		/* Pack publication into message: */
173*4882a593Smuzhiyun 		publ_to_item(item, publ);
174*4882a593Smuzhiyun 		item++;
175*4882a593Smuzhiyun 		msg_rem -= ITEM_SIZE;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 		/* Append full buffer to list: */
178*4882a593Smuzhiyun 		if (!msg_rem) {
179*4882a593Smuzhiyun 			__skb_queue_tail(list, skb);
180*4882a593Smuzhiyun 			skb = NULL;
181*4882a593Smuzhiyun 			msg_rem = msg_dsz;
182*4882a593Smuzhiyun 		}
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 	if (skb) {
185*4882a593Smuzhiyun 		hdr = buf_msg(skb);
186*4882a593Smuzhiyun 		msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
187*4882a593Smuzhiyun 		skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
188*4882a593Smuzhiyun 		__skb_queue_tail(list, skb);
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun 	hdr = buf_msg(skb_peek_tail(list));
191*4882a593Smuzhiyun 	msg_set_last_bulk(hdr);
192*4882a593Smuzhiyun 	msg_set_named_seqno(hdr, seqno);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /**
196*4882a593Smuzhiyun  * tipc_named_node_up - tell specified node about all publications by this node
197*4882a593Smuzhiyun  */
tipc_named_node_up(struct net * net,u32 dnode,u16 capabilities)198*4882a593Smuzhiyun void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct name_table *nt = tipc_name_table(net);
201*4882a593Smuzhiyun 	struct tipc_net *tn = tipc_net(net);
202*4882a593Smuzhiyun 	struct sk_buff_head head;
203*4882a593Smuzhiyun 	u16 seqno;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	__skb_queue_head_init(&head);
206*4882a593Smuzhiyun 	spin_lock_bh(&tn->nametbl_lock);
207*4882a593Smuzhiyun 	if (!(capabilities & TIPC_NAMED_BCAST))
208*4882a593Smuzhiyun 		nt->rc_dests++;
209*4882a593Smuzhiyun 	seqno = nt->snd_nxt;
210*4882a593Smuzhiyun 	spin_unlock_bh(&tn->nametbl_lock);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	read_lock_bh(&nt->cluster_scope_lock);
213*4882a593Smuzhiyun 	named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
214*4882a593Smuzhiyun 	tipc_node_xmit(net, &head, dnode, 0);
215*4882a593Smuzhiyun 	read_unlock_bh(&nt->cluster_scope_lock);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun /**
219*4882a593Smuzhiyun  * tipc_publ_purge - remove publication associated with a failed node
220*4882a593Smuzhiyun  *
221*4882a593Smuzhiyun  * Invoked for each publication issued by a newly failed node.
222*4882a593Smuzhiyun  * Removes publication structure from name table & deletes it.
223*4882a593Smuzhiyun  */
tipc_publ_purge(struct net * net,struct publication * publ,u32 addr)224*4882a593Smuzhiyun static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	struct tipc_net *tn = tipc_net(net);
227*4882a593Smuzhiyun 	struct publication *p;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	spin_lock_bh(&tn->nametbl_lock);
230*4882a593Smuzhiyun 	p = tipc_nametbl_remove_publ(net, publ->type, publ->lower, publ->upper,
231*4882a593Smuzhiyun 				     publ->node, publ->key);
232*4882a593Smuzhiyun 	if (p)
233*4882a593Smuzhiyun 		tipc_node_unsubscribe(net, &p->binding_node, addr);
234*4882a593Smuzhiyun 	spin_unlock_bh(&tn->nametbl_lock);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (p != publ) {
237*4882a593Smuzhiyun 		pr_err("Unable to remove publication from failed node\n"
238*4882a593Smuzhiyun 		       " (type=%u, lower=%u, node=0x%x, port=%u, key=%u)\n",
239*4882a593Smuzhiyun 		       publ->type, publ->lower, publ->node, publ->port,
240*4882a593Smuzhiyun 		       publ->key);
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (p)
244*4882a593Smuzhiyun 		kfree_rcu(p, rcu);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun /**
248*4882a593Smuzhiyun  * tipc_dist_queue_purge - remove deferred updates from a node that went down
249*4882a593Smuzhiyun  */
tipc_dist_queue_purge(struct net * net,u32 addr)250*4882a593Smuzhiyun static void tipc_dist_queue_purge(struct net *net, u32 addr)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct tipc_net *tn = net_generic(net, tipc_net_id);
253*4882a593Smuzhiyun 	struct distr_queue_item *e, *tmp;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	spin_lock_bh(&tn->nametbl_lock);
256*4882a593Smuzhiyun 	list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
257*4882a593Smuzhiyun 		if (e->node != addr)
258*4882a593Smuzhiyun 			continue;
259*4882a593Smuzhiyun 		list_del(&e->next);
260*4882a593Smuzhiyun 		kfree(e);
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 	spin_unlock_bh(&tn->nametbl_lock);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
tipc_publ_notify(struct net * net,struct list_head * nsub_list,u32 addr,u16 capabilities)265*4882a593Smuzhiyun void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
266*4882a593Smuzhiyun 		      u32 addr, u16 capabilities)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	struct name_table *nt = tipc_name_table(net);
269*4882a593Smuzhiyun 	struct tipc_net *tn = tipc_net(net);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	struct publication *publ, *tmp;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
274*4882a593Smuzhiyun 		tipc_publ_purge(net, publ, addr);
275*4882a593Smuzhiyun 	tipc_dist_queue_purge(net, addr);
276*4882a593Smuzhiyun 	spin_lock_bh(&tn->nametbl_lock);
277*4882a593Smuzhiyun 	if (!(capabilities & TIPC_NAMED_BCAST))
278*4882a593Smuzhiyun 		nt->rc_dests--;
279*4882a593Smuzhiyun 	spin_unlock_bh(&tn->nametbl_lock);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun /**
283*4882a593Smuzhiyun  * tipc_update_nametbl - try to process a nametable update and notify
284*4882a593Smuzhiyun  *			 subscribers
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  * tipc_nametbl_lock must be held.
287*4882a593Smuzhiyun  * Returns the publication item if successful, otherwise NULL.
288*4882a593Smuzhiyun  */
tipc_update_nametbl(struct net * net,struct distr_item * i,u32 node,u32 dtype)289*4882a593Smuzhiyun static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
290*4882a593Smuzhiyun 				u32 node, u32 dtype)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	struct publication *p = NULL;
293*4882a593Smuzhiyun 	u32 lower = ntohl(i->lower);
294*4882a593Smuzhiyun 	u32 upper = ntohl(i->upper);
295*4882a593Smuzhiyun 	u32 type = ntohl(i->type);
296*4882a593Smuzhiyun 	u32 port = ntohl(i->port);
297*4882a593Smuzhiyun 	u32 key = ntohl(i->key);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	if (dtype == PUBLICATION) {
300*4882a593Smuzhiyun 		p = tipc_nametbl_insert_publ(net, type, lower, upper,
301*4882a593Smuzhiyun 					     TIPC_CLUSTER_SCOPE, node,
302*4882a593Smuzhiyun 					     port, key);
303*4882a593Smuzhiyun 		if (p) {
304*4882a593Smuzhiyun 			tipc_node_subscribe(net, &p->binding_node, node);
305*4882a593Smuzhiyun 			return true;
306*4882a593Smuzhiyun 		}
307*4882a593Smuzhiyun 	} else if (dtype == WITHDRAWAL) {
308*4882a593Smuzhiyun 		p = tipc_nametbl_remove_publ(net, type, lower,
309*4882a593Smuzhiyun 					     upper, node, key);
310*4882a593Smuzhiyun 		if (p) {
311*4882a593Smuzhiyun 			tipc_node_unsubscribe(net, &p->binding_node, node);
312*4882a593Smuzhiyun 			kfree_rcu(p, rcu);
313*4882a593Smuzhiyun 			return true;
314*4882a593Smuzhiyun 		}
315*4882a593Smuzhiyun 		pr_warn_ratelimited("Failed to remove binding %u,%u from %x\n",
316*4882a593Smuzhiyun 				    type, lower, node);
317*4882a593Smuzhiyun 	} else {
318*4882a593Smuzhiyun 		pr_warn_ratelimited("Unknown name table message received\n");
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 	return false;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
tipc_named_dequeue(struct sk_buff_head * namedq,u16 * rcv_nxt,bool * open)323*4882a593Smuzhiyun static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
324*4882a593Smuzhiyun 					  u16 *rcv_nxt, bool *open)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	struct sk_buff *skb, *tmp;
327*4882a593Smuzhiyun 	struct tipc_msg *hdr;
328*4882a593Smuzhiyun 	u16 seqno;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	spin_lock_bh(&namedq->lock);
331*4882a593Smuzhiyun 	skb_queue_walk_safe(namedq, skb, tmp) {
332*4882a593Smuzhiyun 		if (unlikely(skb_linearize(skb))) {
333*4882a593Smuzhiyun 			__skb_unlink(skb, namedq);
334*4882a593Smuzhiyun 			kfree_skb(skb);
335*4882a593Smuzhiyun 			continue;
336*4882a593Smuzhiyun 		}
337*4882a593Smuzhiyun 		hdr = buf_msg(skb);
338*4882a593Smuzhiyun 		seqno = msg_named_seqno(hdr);
339*4882a593Smuzhiyun 		if (msg_is_last_bulk(hdr)) {
340*4882a593Smuzhiyun 			*rcv_nxt = seqno;
341*4882a593Smuzhiyun 			*open = true;
342*4882a593Smuzhiyun 		}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 		if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
345*4882a593Smuzhiyun 			__skb_unlink(skb, namedq);
346*4882a593Smuzhiyun 			spin_unlock_bh(&namedq->lock);
347*4882a593Smuzhiyun 			return skb;
348*4882a593Smuzhiyun 		}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 		if (*open && (*rcv_nxt == seqno)) {
351*4882a593Smuzhiyun 			(*rcv_nxt)++;
352*4882a593Smuzhiyun 			__skb_unlink(skb, namedq);
353*4882a593Smuzhiyun 			spin_unlock_bh(&namedq->lock);
354*4882a593Smuzhiyun 			return skb;
355*4882a593Smuzhiyun 		}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 		if (less(seqno, *rcv_nxt)) {
358*4882a593Smuzhiyun 			__skb_unlink(skb, namedq);
359*4882a593Smuzhiyun 			kfree_skb(skb);
360*4882a593Smuzhiyun 			continue;
361*4882a593Smuzhiyun 		}
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 	spin_unlock_bh(&namedq->lock);
364*4882a593Smuzhiyun 	return NULL;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun /**
368*4882a593Smuzhiyun  * tipc_named_rcv - process name table update messages sent by another node
369*4882a593Smuzhiyun  */
tipc_named_rcv(struct net * net,struct sk_buff_head * namedq,u16 * rcv_nxt,bool * open)370*4882a593Smuzhiyun void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
371*4882a593Smuzhiyun 		    u16 *rcv_nxt, bool *open)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	struct tipc_net *tn = tipc_net(net);
374*4882a593Smuzhiyun 	struct distr_item *item;
375*4882a593Smuzhiyun 	struct tipc_msg *hdr;
376*4882a593Smuzhiyun 	struct sk_buff *skb;
377*4882a593Smuzhiyun 	u32 count, node;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	spin_lock_bh(&tn->nametbl_lock);
380*4882a593Smuzhiyun 	while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
381*4882a593Smuzhiyun 		hdr = buf_msg(skb);
382*4882a593Smuzhiyun 		node = msg_orignode(hdr);
383*4882a593Smuzhiyun 		item = (struct distr_item *)msg_data(hdr);
384*4882a593Smuzhiyun 		count = msg_data_sz(hdr) / ITEM_SIZE;
385*4882a593Smuzhiyun 		while (count--) {
386*4882a593Smuzhiyun 			tipc_update_nametbl(net, item, node, msg_type(hdr));
387*4882a593Smuzhiyun 			item++;
388*4882a593Smuzhiyun 		}
389*4882a593Smuzhiyun 		kfree_skb(skb);
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 	spin_unlock_bh(&tn->nametbl_lock);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /**
395*4882a593Smuzhiyun  * tipc_named_reinit - re-initialize local publications
396*4882a593Smuzhiyun  *
397*4882a593Smuzhiyun  * This routine is called whenever TIPC networking is enabled.
398*4882a593Smuzhiyun  * All name table entries published by this node are updated to reflect
399*4882a593Smuzhiyun  * the node's new network address.
400*4882a593Smuzhiyun  */
tipc_named_reinit(struct net * net)401*4882a593Smuzhiyun void tipc_named_reinit(struct net *net)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	struct name_table *nt = tipc_name_table(net);
404*4882a593Smuzhiyun 	struct tipc_net *tn = tipc_net(net);
405*4882a593Smuzhiyun 	struct publication *publ;
406*4882a593Smuzhiyun 	u32 self = tipc_own_addr(net);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	spin_lock_bh(&tn->nametbl_lock);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	list_for_each_entry_rcu(publ, &nt->node_scope, binding_node)
411*4882a593Smuzhiyun 		publ->node = self;
412*4882a593Smuzhiyun 	list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node)
413*4882a593Smuzhiyun 		publ->node = self;
414*4882a593Smuzhiyun 	nt->rc_dests = 0;
415*4882a593Smuzhiyun 	spin_unlock_bh(&tn->nametbl_lock);
416*4882a593Smuzhiyun }
417