xref: /OK3568_Linux_fs/kernel/net/hsr/hsr_netlink.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright 2011-2014 Autronica Fire and Security AS
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Author(s):
5*4882a593Smuzhiyun  *	2011-2014 Arvid Brodin, arvid.brodin@alten.se
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Routines for handling Netlink messages for HSR and PRP.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "hsr_netlink.h"
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <net/rtnetlink.h>
13*4882a593Smuzhiyun #include <net/genetlink.h>
14*4882a593Smuzhiyun #include "hsr_main.h"
15*4882a593Smuzhiyun #include "hsr_device.h"
16*4882a593Smuzhiyun #include "hsr_framereg.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
19*4882a593Smuzhiyun 	[IFLA_HSR_SLAVE1]		= { .type = NLA_U32 },
20*4882a593Smuzhiyun 	[IFLA_HSR_SLAVE2]		= { .type = NLA_U32 },
21*4882a593Smuzhiyun 	[IFLA_HSR_MULTICAST_SPEC]	= { .type = NLA_U8 },
22*4882a593Smuzhiyun 	[IFLA_HSR_VERSION]	= { .type = NLA_U8 },
23*4882a593Smuzhiyun 	[IFLA_HSR_SUPERVISION_ADDR]	= { .len = ETH_ALEN },
24*4882a593Smuzhiyun 	[IFLA_HSR_SEQ_NR]		= { .type = NLA_U16 },
25*4882a593Smuzhiyun 	[IFLA_HSR_PROTOCOL]		= { .type = NLA_U8 },
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* Here, it seems a netdevice has already been allocated for us, and the
29*4882a593Smuzhiyun  * hsr_dev_setup routine has been executed. Nice!
30*4882a593Smuzhiyun  */
hsr_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)31*4882a593Smuzhiyun static int hsr_newlink(struct net *src_net, struct net_device *dev,
32*4882a593Smuzhiyun 		       struct nlattr *tb[], struct nlattr *data[],
33*4882a593Smuzhiyun 		       struct netlink_ext_ack *extack)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	enum hsr_version proto_version;
36*4882a593Smuzhiyun 	unsigned char multicast_spec;
37*4882a593Smuzhiyun 	u8 proto = HSR_PROTOCOL_HSR;
38*4882a593Smuzhiyun 	struct net_device *link[2];
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	if (!data) {
41*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
42*4882a593Smuzhiyun 		return -EINVAL;
43*4882a593Smuzhiyun 	}
44*4882a593Smuzhiyun 	if (!data[IFLA_HSR_SLAVE1]) {
45*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
46*4882a593Smuzhiyun 		return -EINVAL;
47*4882a593Smuzhiyun 	}
48*4882a593Smuzhiyun 	link[0] = __dev_get_by_index(src_net,
49*4882a593Smuzhiyun 				     nla_get_u32(data[IFLA_HSR_SLAVE1]));
50*4882a593Smuzhiyun 	if (!link[0]) {
51*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
52*4882a593Smuzhiyun 		return -EINVAL;
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 	if (!data[IFLA_HSR_SLAVE2]) {
55*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
56*4882a593Smuzhiyun 		return -EINVAL;
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 	link[1] = __dev_get_by_index(src_net,
59*4882a593Smuzhiyun 				     nla_get_u32(data[IFLA_HSR_SLAVE2]));
60*4882a593Smuzhiyun 	if (!link[1]) {
61*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
62*4882a593Smuzhiyun 		return -EINVAL;
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (link[0] == link[1]) {
66*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
67*4882a593Smuzhiyun 		return -EINVAL;
68*4882a593Smuzhiyun 	}
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	if (!data[IFLA_HSR_MULTICAST_SPEC])
71*4882a593Smuzhiyun 		multicast_spec = 0;
72*4882a593Smuzhiyun 	else
73*4882a593Smuzhiyun 		multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (data[IFLA_HSR_PROTOCOL])
76*4882a593Smuzhiyun 		proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	if (proto >= HSR_PROTOCOL_MAX) {
79*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
80*4882a593Smuzhiyun 		return -EINVAL;
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (!data[IFLA_HSR_VERSION]) {
84*4882a593Smuzhiyun 		proto_version = HSR_V0;
85*4882a593Smuzhiyun 	} else {
86*4882a593Smuzhiyun 		if (proto == HSR_PROTOCOL_PRP) {
87*4882a593Smuzhiyun 			NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
88*4882a593Smuzhiyun 			return -EINVAL;
89*4882a593Smuzhiyun 		}
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 		proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
92*4882a593Smuzhiyun 		if (proto_version > HSR_V1) {
93*4882a593Smuzhiyun 			NL_SET_ERR_MSG_MOD(extack,
94*4882a593Smuzhiyun 					   "Only HSR version 0/1 supported");
95*4882a593Smuzhiyun 			return -EINVAL;
96*4882a593Smuzhiyun 		}
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	if (proto == HSR_PROTOCOL_PRP)
100*4882a593Smuzhiyun 		proto_version = PRP_V1;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	return hsr_dev_finalize(dev, link, multicast_spec, proto_version, extack);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
hsr_dellink(struct net_device * dev,struct list_head * head)105*4882a593Smuzhiyun static void hsr_dellink(struct net_device *dev, struct list_head *head)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct hsr_priv *hsr = netdev_priv(dev);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	del_timer_sync(&hsr->prune_timer);
110*4882a593Smuzhiyun 	del_timer_sync(&hsr->announce_timer);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	hsr_debugfs_term(hsr);
113*4882a593Smuzhiyun 	hsr_del_ports(hsr);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	hsr_del_self_node(hsr);
116*4882a593Smuzhiyun 	hsr_del_nodes(&hsr->node_db);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	unregister_netdevice_queue(dev, head);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
hsr_fill_info(struct sk_buff * skb,const struct net_device * dev)121*4882a593Smuzhiyun static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct hsr_priv *hsr = netdev_priv(dev);
124*4882a593Smuzhiyun 	u8 proto = HSR_PROTOCOL_HSR;
125*4882a593Smuzhiyun 	struct hsr_port *port;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
128*4882a593Smuzhiyun 	if (port) {
129*4882a593Smuzhiyun 		if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
130*4882a593Smuzhiyun 			goto nla_put_failure;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
134*4882a593Smuzhiyun 	if (port) {
135*4882a593Smuzhiyun 		if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
136*4882a593Smuzhiyun 			goto nla_put_failure;
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
140*4882a593Smuzhiyun 		    hsr->sup_multicast_addr) ||
141*4882a593Smuzhiyun 	    nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
142*4882a593Smuzhiyun 		goto nla_put_failure;
143*4882a593Smuzhiyun 	if (hsr->prot_version == PRP_V1)
144*4882a593Smuzhiyun 		proto = HSR_PROTOCOL_PRP;
145*4882a593Smuzhiyun 	if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
146*4882a593Smuzhiyun 		goto nla_put_failure;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return 0;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun nla_put_failure:
151*4882a593Smuzhiyun 	return -EMSGSIZE;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun static struct rtnl_link_ops hsr_link_ops __read_mostly = {
155*4882a593Smuzhiyun 	.kind		= "hsr",
156*4882a593Smuzhiyun 	.maxtype	= IFLA_HSR_MAX,
157*4882a593Smuzhiyun 	.policy		= hsr_policy,
158*4882a593Smuzhiyun 	.priv_size	= sizeof(struct hsr_priv),
159*4882a593Smuzhiyun 	.setup		= hsr_dev_setup,
160*4882a593Smuzhiyun 	.newlink	= hsr_newlink,
161*4882a593Smuzhiyun 	.dellink	= hsr_dellink,
162*4882a593Smuzhiyun 	.fill_info	= hsr_fill_info,
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /* attribute policy */
166*4882a593Smuzhiyun static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
167*4882a593Smuzhiyun 	[HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
168*4882a593Smuzhiyun 	[HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
169*4882a593Smuzhiyun 	[HSR_A_IFINDEX] = { .type = NLA_U32 },
170*4882a593Smuzhiyun 	[HSR_A_IF1_AGE] = { .type = NLA_U32 },
171*4882a593Smuzhiyun 	[HSR_A_IF2_AGE] = { .type = NLA_U32 },
172*4882a593Smuzhiyun 	[HSR_A_IF1_SEQ] = { .type = NLA_U16 },
173*4882a593Smuzhiyun 	[HSR_A_IF2_SEQ] = { .type = NLA_U16 },
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun static struct genl_family hsr_genl_family;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun static const struct genl_multicast_group hsr_mcgrps[] = {
179*4882a593Smuzhiyun 	{ .name = "hsr-network", },
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /* This is called if for some node with MAC address addr, we only get frames
183*4882a593Smuzhiyun  * over one of the slave interfaces. This would indicate an open network ring
184*4882a593Smuzhiyun  * (i.e. a link has failed somewhere).
185*4882a593Smuzhiyun  */
hsr_nl_ringerror(struct hsr_priv * hsr,unsigned char addr[ETH_ALEN],struct hsr_port * port)186*4882a593Smuzhiyun void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
187*4882a593Smuzhiyun 		      struct hsr_port *port)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct sk_buff *skb;
190*4882a593Smuzhiyun 	void *msg_head;
191*4882a593Smuzhiyun 	struct hsr_port *master;
192*4882a593Smuzhiyun 	int res;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
195*4882a593Smuzhiyun 	if (!skb)
196*4882a593Smuzhiyun 		goto fail;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
199*4882a593Smuzhiyun 			       HSR_C_RING_ERROR);
200*4882a593Smuzhiyun 	if (!msg_head)
201*4882a593Smuzhiyun 		goto nla_put_failure;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
204*4882a593Smuzhiyun 	if (res < 0)
205*4882a593Smuzhiyun 		goto nla_put_failure;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
208*4882a593Smuzhiyun 	if (res < 0)
209*4882a593Smuzhiyun 		goto nla_put_failure;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	genlmsg_end(skb, msg_head);
212*4882a593Smuzhiyun 	genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	return;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun nla_put_failure:
217*4882a593Smuzhiyun 	kfree_skb(skb);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun fail:
220*4882a593Smuzhiyun 	rcu_read_lock();
221*4882a593Smuzhiyun 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
222*4882a593Smuzhiyun 	netdev_warn(master->dev, "Could not send HSR ring error message\n");
223*4882a593Smuzhiyun 	rcu_read_unlock();
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /* This is called when we haven't heard from the node with MAC address addr for
227*4882a593Smuzhiyun  * some time (just before the node is removed from the node table/list).
228*4882a593Smuzhiyun  */
hsr_nl_nodedown(struct hsr_priv * hsr,unsigned char addr[ETH_ALEN])229*4882a593Smuzhiyun void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	struct sk_buff *skb;
232*4882a593Smuzhiyun 	void *msg_head;
233*4882a593Smuzhiyun 	struct hsr_port *master;
234*4882a593Smuzhiyun 	int res;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
237*4882a593Smuzhiyun 	if (!skb)
238*4882a593Smuzhiyun 		goto fail;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
241*4882a593Smuzhiyun 	if (!msg_head)
242*4882a593Smuzhiyun 		goto nla_put_failure;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
245*4882a593Smuzhiyun 	if (res < 0)
246*4882a593Smuzhiyun 		goto nla_put_failure;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	genlmsg_end(skb, msg_head);
249*4882a593Smuzhiyun 	genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	return;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun nla_put_failure:
254*4882a593Smuzhiyun 	kfree_skb(skb);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun fail:
257*4882a593Smuzhiyun 	rcu_read_lock();
258*4882a593Smuzhiyun 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
259*4882a593Smuzhiyun 	netdev_warn(master->dev, "Could not send HSR node down\n");
260*4882a593Smuzhiyun 	rcu_read_unlock();
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun /* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
264*4882a593Smuzhiyun  * about the status of a specific node in the network, defined by its MAC
265*4882a593Smuzhiyun  * address.
266*4882a593Smuzhiyun  *
267*4882a593Smuzhiyun  * Input: hsr ifindex, node mac address
268*4882a593Smuzhiyun  * Output: hsr ifindex, node mac address (copied from request),
269*4882a593Smuzhiyun  *	   age of latest frame from node over slave 1, slave 2 [ms]
270*4882a593Smuzhiyun  */
hsr_get_node_status(struct sk_buff * skb_in,struct genl_info * info)271*4882a593Smuzhiyun static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	/* For receiving */
274*4882a593Smuzhiyun 	struct nlattr *na;
275*4882a593Smuzhiyun 	struct net_device *hsr_dev;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/* For sending */
278*4882a593Smuzhiyun 	struct sk_buff *skb_out;
279*4882a593Smuzhiyun 	void *msg_head;
280*4882a593Smuzhiyun 	struct hsr_priv *hsr;
281*4882a593Smuzhiyun 	struct hsr_port *port;
282*4882a593Smuzhiyun 	unsigned char hsr_node_addr_b[ETH_ALEN];
283*4882a593Smuzhiyun 	int hsr_node_if1_age;
284*4882a593Smuzhiyun 	u16 hsr_node_if1_seq;
285*4882a593Smuzhiyun 	int hsr_node_if2_age;
286*4882a593Smuzhiyun 	u16 hsr_node_if2_seq;
287*4882a593Smuzhiyun 	int addr_b_ifindex;
288*4882a593Smuzhiyun 	int res;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	if (!info)
291*4882a593Smuzhiyun 		goto invalid;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	na = info->attrs[HSR_A_IFINDEX];
294*4882a593Smuzhiyun 	if (!na)
295*4882a593Smuzhiyun 		goto invalid;
296*4882a593Smuzhiyun 	na = info->attrs[HSR_A_NODE_ADDR];
297*4882a593Smuzhiyun 	if (!na)
298*4882a593Smuzhiyun 		goto invalid;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	rcu_read_lock();
301*4882a593Smuzhiyun 	hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
302*4882a593Smuzhiyun 				       nla_get_u32(info->attrs[HSR_A_IFINDEX]));
303*4882a593Smuzhiyun 	if (!hsr_dev)
304*4882a593Smuzhiyun 		goto rcu_unlock;
305*4882a593Smuzhiyun 	if (!is_hsr_master(hsr_dev))
306*4882a593Smuzhiyun 		goto rcu_unlock;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	/* Send reply */
309*4882a593Smuzhiyun 	skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
310*4882a593Smuzhiyun 	if (!skb_out) {
311*4882a593Smuzhiyun 		res = -ENOMEM;
312*4882a593Smuzhiyun 		goto fail;
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
316*4882a593Smuzhiyun 			       info->snd_seq, &hsr_genl_family, 0,
317*4882a593Smuzhiyun 			       HSR_C_SET_NODE_STATUS);
318*4882a593Smuzhiyun 	if (!msg_head) {
319*4882a593Smuzhiyun 		res = -ENOMEM;
320*4882a593Smuzhiyun 		goto nla_put_failure;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
324*4882a593Smuzhiyun 	if (res < 0)
325*4882a593Smuzhiyun 		goto nla_put_failure;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	hsr = netdev_priv(hsr_dev);
328*4882a593Smuzhiyun 	res = hsr_get_node_data(hsr,
329*4882a593Smuzhiyun 				(unsigned char *)
330*4882a593Smuzhiyun 				nla_data(info->attrs[HSR_A_NODE_ADDR]),
331*4882a593Smuzhiyun 					 hsr_node_addr_b,
332*4882a593Smuzhiyun 					 &addr_b_ifindex,
333*4882a593Smuzhiyun 					 &hsr_node_if1_age,
334*4882a593Smuzhiyun 					 &hsr_node_if1_seq,
335*4882a593Smuzhiyun 					 &hsr_node_if2_age,
336*4882a593Smuzhiyun 					 &hsr_node_if2_seq);
337*4882a593Smuzhiyun 	if (res < 0)
338*4882a593Smuzhiyun 		goto nla_put_failure;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
341*4882a593Smuzhiyun 		      nla_data(info->attrs[HSR_A_NODE_ADDR]));
342*4882a593Smuzhiyun 	if (res < 0)
343*4882a593Smuzhiyun 		goto nla_put_failure;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if (addr_b_ifindex > -1) {
346*4882a593Smuzhiyun 		res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
347*4882a593Smuzhiyun 			      hsr_node_addr_b);
348*4882a593Smuzhiyun 		if (res < 0)
349*4882a593Smuzhiyun 			goto nla_put_failure;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
352*4882a593Smuzhiyun 				  addr_b_ifindex);
353*4882a593Smuzhiyun 		if (res < 0)
354*4882a593Smuzhiyun 			goto nla_put_failure;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
358*4882a593Smuzhiyun 	if (res < 0)
359*4882a593Smuzhiyun 		goto nla_put_failure;
360*4882a593Smuzhiyun 	res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
361*4882a593Smuzhiyun 	if (res < 0)
362*4882a593Smuzhiyun 		goto nla_put_failure;
363*4882a593Smuzhiyun 	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
364*4882a593Smuzhiyun 	if (port)
365*4882a593Smuzhiyun 		res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
366*4882a593Smuzhiyun 				  port->dev->ifindex);
367*4882a593Smuzhiyun 	if (res < 0)
368*4882a593Smuzhiyun 		goto nla_put_failure;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
371*4882a593Smuzhiyun 	if (res < 0)
372*4882a593Smuzhiyun 		goto nla_put_failure;
373*4882a593Smuzhiyun 	res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
374*4882a593Smuzhiyun 	if (res < 0)
375*4882a593Smuzhiyun 		goto nla_put_failure;
376*4882a593Smuzhiyun 	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
377*4882a593Smuzhiyun 	if (port)
378*4882a593Smuzhiyun 		res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
379*4882a593Smuzhiyun 				  port->dev->ifindex);
380*4882a593Smuzhiyun 	if (res < 0)
381*4882a593Smuzhiyun 		goto nla_put_failure;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	rcu_read_unlock();
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	genlmsg_end(skb_out, msg_head);
386*4882a593Smuzhiyun 	genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	return 0;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun rcu_unlock:
391*4882a593Smuzhiyun 	rcu_read_unlock();
392*4882a593Smuzhiyun invalid:
393*4882a593Smuzhiyun 	netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
394*4882a593Smuzhiyun 	return 0;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun nla_put_failure:
397*4882a593Smuzhiyun 	kfree_skb(skb_out);
398*4882a593Smuzhiyun 	/* Fall through */
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun fail:
401*4882a593Smuzhiyun 	rcu_read_unlock();
402*4882a593Smuzhiyun 	return res;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /* Get a list of MacAddressA of all nodes known to this node (including self).
406*4882a593Smuzhiyun  */
hsr_get_node_list(struct sk_buff * skb_in,struct genl_info * info)407*4882a593Smuzhiyun static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	unsigned char addr[ETH_ALEN];
410*4882a593Smuzhiyun 	struct net_device *hsr_dev;
411*4882a593Smuzhiyun 	struct sk_buff *skb_out;
412*4882a593Smuzhiyun 	struct hsr_priv *hsr;
413*4882a593Smuzhiyun 	bool restart = false;
414*4882a593Smuzhiyun 	struct nlattr *na;
415*4882a593Smuzhiyun 	void *pos = NULL;
416*4882a593Smuzhiyun 	void *msg_head;
417*4882a593Smuzhiyun 	int res;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if (!info)
420*4882a593Smuzhiyun 		goto invalid;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	na = info->attrs[HSR_A_IFINDEX];
423*4882a593Smuzhiyun 	if (!na)
424*4882a593Smuzhiyun 		goto invalid;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	rcu_read_lock();
427*4882a593Smuzhiyun 	hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
428*4882a593Smuzhiyun 				       nla_get_u32(info->attrs[HSR_A_IFINDEX]));
429*4882a593Smuzhiyun 	if (!hsr_dev)
430*4882a593Smuzhiyun 		goto rcu_unlock;
431*4882a593Smuzhiyun 	if (!is_hsr_master(hsr_dev))
432*4882a593Smuzhiyun 		goto rcu_unlock;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun restart:
435*4882a593Smuzhiyun 	/* Send reply */
436*4882a593Smuzhiyun 	skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
437*4882a593Smuzhiyun 	if (!skb_out) {
438*4882a593Smuzhiyun 		res = -ENOMEM;
439*4882a593Smuzhiyun 		goto fail;
440*4882a593Smuzhiyun 	}
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
443*4882a593Smuzhiyun 			       info->snd_seq, &hsr_genl_family, 0,
444*4882a593Smuzhiyun 			       HSR_C_SET_NODE_LIST);
445*4882a593Smuzhiyun 	if (!msg_head) {
446*4882a593Smuzhiyun 		res = -ENOMEM;
447*4882a593Smuzhiyun 		goto nla_put_failure;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (!restart) {
451*4882a593Smuzhiyun 		res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
452*4882a593Smuzhiyun 		if (res < 0)
453*4882a593Smuzhiyun 			goto nla_put_failure;
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	hsr = netdev_priv(hsr_dev);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	if (!pos)
459*4882a593Smuzhiyun 		pos = hsr_get_next_node(hsr, NULL, addr);
460*4882a593Smuzhiyun 	while (pos) {
461*4882a593Smuzhiyun 		res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
462*4882a593Smuzhiyun 		if (res < 0) {
463*4882a593Smuzhiyun 			if (res == -EMSGSIZE) {
464*4882a593Smuzhiyun 				genlmsg_end(skb_out, msg_head);
465*4882a593Smuzhiyun 				genlmsg_unicast(genl_info_net(info), skb_out,
466*4882a593Smuzhiyun 						info->snd_portid);
467*4882a593Smuzhiyun 				restart = true;
468*4882a593Smuzhiyun 				goto restart;
469*4882a593Smuzhiyun 			}
470*4882a593Smuzhiyun 			goto nla_put_failure;
471*4882a593Smuzhiyun 		}
472*4882a593Smuzhiyun 		pos = hsr_get_next_node(hsr, pos, addr);
473*4882a593Smuzhiyun 	}
474*4882a593Smuzhiyun 	rcu_read_unlock();
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	genlmsg_end(skb_out, msg_head);
477*4882a593Smuzhiyun 	genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	return 0;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun rcu_unlock:
482*4882a593Smuzhiyun 	rcu_read_unlock();
483*4882a593Smuzhiyun invalid:
484*4882a593Smuzhiyun 	netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
485*4882a593Smuzhiyun 	return 0;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun nla_put_failure:
488*4882a593Smuzhiyun 	nlmsg_free(skb_out);
489*4882a593Smuzhiyun 	/* Fall through */
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun fail:
492*4882a593Smuzhiyun 	rcu_read_unlock();
493*4882a593Smuzhiyun 	return res;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun static const struct genl_small_ops hsr_ops[] = {
497*4882a593Smuzhiyun 	{
498*4882a593Smuzhiyun 		.cmd = HSR_C_GET_NODE_STATUS,
499*4882a593Smuzhiyun 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
500*4882a593Smuzhiyun 		.flags = 0,
501*4882a593Smuzhiyun 		.doit = hsr_get_node_status,
502*4882a593Smuzhiyun 		.dumpit = NULL,
503*4882a593Smuzhiyun 	},
504*4882a593Smuzhiyun 	{
505*4882a593Smuzhiyun 		.cmd = HSR_C_GET_NODE_LIST,
506*4882a593Smuzhiyun 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
507*4882a593Smuzhiyun 		.flags = 0,
508*4882a593Smuzhiyun 		.doit = hsr_get_node_list,
509*4882a593Smuzhiyun 		.dumpit = NULL,
510*4882a593Smuzhiyun 	},
511*4882a593Smuzhiyun };
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun static struct genl_family hsr_genl_family __ro_after_init = {
514*4882a593Smuzhiyun 	.hdrsize = 0,
515*4882a593Smuzhiyun 	.name = "HSR",
516*4882a593Smuzhiyun 	.version = 1,
517*4882a593Smuzhiyun 	.maxattr = HSR_A_MAX,
518*4882a593Smuzhiyun 	.policy = hsr_genl_policy,
519*4882a593Smuzhiyun 	.netnsok = true,
520*4882a593Smuzhiyun 	.module = THIS_MODULE,
521*4882a593Smuzhiyun 	.small_ops = hsr_ops,
522*4882a593Smuzhiyun 	.n_small_ops = ARRAY_SIZE(hsr_ops),
523*4882a593Smuzhiyun 	.mcgrps = hsr_mcgrps,
524*4882a593Smuzhiyun 	.n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
525*4882a593Smuzhiyun };
526*4882a593Smuzhiyun 
hsr_netlink_init(void)527*4882a593Smuzhiyun int __init hsr_netlink_init(void)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	int rc;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	rc = rtnl_link_register(&hsr_link_ops);
532*4882a593Smuzhiyun 	if (rc)
533*4882a593Smuzhiyun 		goto fail_rtnl_link_register;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	rc = genl_register_family(&hsr_genl_family);
536*4882a593Smuzhiyun 	if (rc)
537*4882a593Smuzhiyun 		goto fail_genl_register_family;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	hsr_debugfs_create_root();
540*4882a593Smuzhiyun 	return 0;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun fail_genl_register_family:
543*4882a593Smuzhiyun 	rtnl_link_unregister(&hsr_link_ops);
544*4882a593Smuzhiyun fail_rtnl_link_register:
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	return rc;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun 
hsr_netlink_exit(void)549*4882a593Smuzhiyun void __exit hsr_netlink_exit(void)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	genl_unregister_family(&hsr_genl_family);
552*4882a593Smuzhiyun 	rtnl_link_unregister(&hsr_link_ops);
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun MODULE_ALIAS_RTNL_LINK("hsr");
556