xref: /OK3568_Linux_fs/kernel/net/batman-adv/soft-interface.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (C) 2007-2020  B.A.T.M.A.N. contributors:
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Marek Lindner, Simon Wunderlich
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "soft-interface.h"
8*4882a593Smuzhiyun #include "main.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/atomic.h>
11*4882a593Smuzhiyun #include <linux/byteorder/generic.h>
12*4882a593Smuzhiyun #include <linux/cache.h>
13*4882a593Smuzhiyun #include <linux/compiler.h>
14*4882a593Smuzhiyun #include <linux/cpumask.h>
15*4882a593Smuzhiyun #include <linux/errno.h>
16*4882a593Smuzhiyun #include <linux/etherdevice.h>
17*4882a593Smuzhiyun #include <linux/ethtool.h>
18*4882a593Smuzhiyun #include <linux/gfp.h>
19*4882a593Smuzhiyun #include <linux/if_ether.h>
20*4882a593Smuzhiyun #include <linux/if_vlan.h>
21*4882a593Smuzhiyun #include <linux/jiffies.h>
22*4882a593Smuzhiyun #include <linux/kernel.h>
23*4882a593Smuzhiyun #include <linux/kref.h>
24*4882a593Smuzhiyun #include <linux/list.h>
25*4882a593Smuzhiyun #include <linux/lockdep.h>
26*4882a593Smuzhiyun #include <linux/netdevice.h>
27*4882a593Smuzhiyun #include <linux/netlink.h>
28*4882a593Smuzhiyun #include <linux/percpu.h>
29*4882a593Smuzhiyun #include <linux/printk.h>
30*4882a593Smuzhiyun #include <linux/random.h>
31*4882a593Smuzhiyun #include <linux/rculist.h>
32*4882a593Smuzhiyun #include <linux/rcupdate.h>
33*4882a593Smuzhiyun #include <linux/rtnetlink.h>
34*4882a593Smuzhiyun #include <linux/skbuff.h>
35*4882a593Smuzhiyun #include <linux/slab.h>
36*4882a593Smuzhiyun #include <linux/socket.h>
37*4882a593Smuzhiyun #include <linux/spinlock.h>
38*4882a593Smuzhiyun #include <linux/stddef.h>
39*4882a593Smuzhiyun #include <linux/string.h>
40*4882a593Smuzhiyun #include <linux/types.h>
41*4882a593Smuzhiyun #include <uapi/linux/batadv_packet.h>
42*4882a593Smuzhiyun #include <uapi/linux/batman_adv.h>
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #include "bat_algo.h"
45*4882a593Smuzhiyun #include "bridge_loop_avoidance.h"
46*4882a593Smuzhiyun #include "debugfs.h"
47*4882a593Smuzhiyun #include "distributed-arp-table.h"
48*4882a593Smuzhiyun #include "gateway_client.h"
49*4882a593Smuzhiyun #include "hard-interface.h"
50*4882a593Smuzhiyun #include "multicast.h"
51*4882a593Smuzhiyun #include "network-coding.h"
52*4882a593Smuzhiyun #include "originator.h"
53*4882a593Smuzhiyun #include "send.h"
54*4882a593Smuzhiyun #include "sysfs.h"
55*4882a593Smuzhiyun #include "translation-table.h"
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /**
58*4882a593Smuzhiyun  * batadv_skb_head_push() - Increase header size and move (push) head pointer
59*4882a593Smuzhiyun  * @skb: packet buffer which should be modified
60*4882a593Smuzhiyun  * @len: number of bytes to add
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * Return: 0 on success or negative error number in case of failure
63*4882a593Smuzhiyun  */
batadv_skb_head_push(struct sk_buff * skb,unsigned int len)64*4882a593Smuzhiyun int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	int result;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	/* TODO: We must check if we can release all references to non-payload
69*4882a593Smuzhiyun 	 * data using __skb_header_release in our skbs to allow skb_cow_header
70*4882a593Smuzhiyun 	 * to work optimally. This means that those skbs are not allowed to read
71*4882a593Smuzhiyun 	 * or write any data which is before the current position of skb->data
72*4882a593Smuzhiyun 	 * after that call and thus allow other skbs with the same data buffer
73*4882a593Smuzhiyun 	 * to write freely in that area.
74*4882a593Smuzhiyun 	 */
75*4882a593Smuzhiyun 	result = skb_cow_head(skb, len);
76*4882a593Smuzhiyun 	if (result < 0)
77*4882a593Smuzhiyun 		return result;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	skb_push(skb, len);
80*4882a593Smuzhiyun 	return 0;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
batadv_interface_open(struct net_device * dev)83*4882a593Smuzhiyun static int batadv_interface_open(struct net_device *dev)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	netif_start_queue(dev);
86*4882a593Smuzhiyun 	return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
batadv_interface_release(struct net_device * dev)89*4882a593Smuzhiyun static int batadv_interface_release(struct net_device *dev)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	netif_stop_queue(dev);
92*4882a593Smuzhiyun 	return 0;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun  * batadv_sum_counter() - Sum the cpu-local counters for index 'idx'
97*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
98*4882a593Smuzhiyun  * @idx: index of counter to sum up
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  * Return: sum of all cpu-local counters
101*4882a593Smuzhiyun  */
batadv_sum_counter(struct batadv_priv * bat_priv,size_t idx)102*4882a593Smuzhiyun static u64 batadv_sum_counter(struct batadv_priv *bat_priv,  size_t idx)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	u64 *counters, sum = 0;
105*4882a593Smuzhiyun 	int cpu;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
108*4882a593Smuzhiyun 		counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
109*4882a593Smuzhiyun 		sum += counters[idx];
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return sum;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
batadv_interface_stats(struct net_device * dev)115*4882a593Smuzhiyun static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = netdev_priv(dev);
118*4882a593Smuzhiyun 	struct net_device_stats *stats = &dev->stats;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
121*4882a593Smuzhiyun 	stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
122*4882a593Smuzhiyun 	stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
123*4882a593Smuzhiyun 	stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
124*4882a593Smuzhiyun 	stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
125*4882a593Smuzhiyun 	return stats;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
batadv_interface_set_mac_addr(struct net_device * dev,void * p)128*4882a593Smuzhiyun static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = netdev_priv(dev);
131*4882a593Smuzhiyun 	struct batadv_softif_vlan *vlan;
132*4882a593Smuzhiyun 	struct sockaddr *addr = p;
133*4882a593Smuzhiyun 	u8 old_addr[ETH_ALEN];
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (!is_valid_ether_addr(addr->sa_data))
136*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	ether_addr_copy(old_addr, dev->dev_addr);
139*4882a593Smuzhiyun 	ether_addr_copy(dev->dev_addr, addr->sa_data);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* only modify transtable if it has been initialized before */
142*4882a593Smuzhiyun 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
143*4882a593Smuzhiyun 		return 0;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	rcu_read_lock();
146*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
147*4882a593Smuzhiyun 		batadv_tt_local_remove(bat_priv, old_addr, vlan->vid,
148*4882a593Smuzhiyun 				       "mac address changed", false);
149*4882a593Smuzhiyun 		batadv_tt_local_add(dev, addr->sa_data, vlan->vid,
150*4882a593Smuzhiyun 				    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 	rcu_read_unlock();
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	return 0;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
batadv_interface_change_mtu(struct net_device * dev,int new_mtu)157*4882a593Smuzhiyun static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	/* check ranges */
160*4882a593Smuzhiyun 	if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev))
161*4882a593Smuzhiyun 		return -EINVAL;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	dev->mtu = new_mtu;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /**
169*4882a593Smuzhiyun  * batadv_interface_set_rx_mode() - set the rx mode of a device
170*4882a593Smuzhiyun  * @dev: registered network device to modify
171*4882a593Smuzhiyun  *
172*4882a593Smuzhiyun  * We do not actually need to set any rx filters for the virtual batman
173*4882a593Smuzhiyun  * soft interface. However a dummy handler enables a user to set static
174*4882a593Smuzhiyun  * multicast listeners for instance.
175*4882a593Smuzhiyun  */
batadv_interface_set_rx_mode(struct net_device * dev)176*4882a593Smuzhiyun static void batadv_interface_set_rx_mode(struct net_device *dev)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
batadv_interface_tx(struct sk_buff * skb,struct net_device * soft_iface)180*4882a593Smuzhiyun static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
181*4882a593Smuzhiyun 				       struct net_device *soft_iface)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	struct ethhdr *ethhdr;
184*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
185*4882a593Smuzhiyun 	struct batadv_hard_iface *primary_if = NULL;
186*4882a593Smuzhiyun 	struct batadv_bcast_packet *bcast_packet;
187*4882a593Smuzhiyun 	static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
188*4882a593Smuzhiyun 					      0x00, 0x00};
189*4882a593Smuzhiyun 	static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
190*4882a593Smuzhiyun 					       0x00, 0x00};
191*4882a593Smuzhiyun 	enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO;
192*4882a593Smuzhiyun 	u8 *dst_hint = NULL, chaddr[ETH_ALEN];
193*4882a593Smuzhiyun 	struct vlan_ethhdr *vhdr;
194*4882a593Smuzhiyun 	unsigned int header_len = 0;
195*4882a593Smuzhiyun 	int data_len = skb->len, ret;
196*4882a593Smuzhiyun 	unsigned long brd_delay = 1;
197*4882a593Smuzhiyun 	bool do_bcast = false, client_added;
198*4882a593Smuzhiyun 	unsigned short vid;
199*4882a593Smuzhiyun 	u32 seqno;
200*4882a593Smuzhiyun 	int gw_mode;
201*4882a593Smuzhiyun 	enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE;
202*4882a593Smuzhiyun 	struct batadv_orig_node *mcast_single_orig = NULL;
203*4882a593Smuzhiyun 	int mcast_is_routable = 0;
204*4882a593Smuzhiyun 	int network_offset = ETH_HLEN;
205*4882a593Smuzhiyun 	__be16 proto;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
208*4882a593Smuzhiyun 		goto dropped;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/* reset control block to avoid left overs from previous users */
211*4882a593Smuzhiyun 	memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	netif_trans_update(soft_iface);
214*4882a593Smuzhiyun 	vid = batadv_get_vid(skb, 0);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	skb_reset_mac_header(skb);
217*4882a593Smuzhiyun 	ethhdr = eth_hdr(skb);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	proto = ethhdr->h_proto;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	switch (ntohs(proto)) {
222*4882a593Smuzhiyun 	case ETH_P_8021Q:
223*4882a593Smuzhiyun 		if (!pskb_may_pull(skb, sizeof(*vhdr)))
224*4882a593Smuzhiyun 			goto dropped;
225*4882a593Smuzhiyun 		vhdr = vlan_eth_hdr(skb);
226*4882a593Smuzhiyun 		proto = vhdr->h_vlan_encapsulated_proto;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 		/* drop batman-in-batman packets to prevent loops */
229*4882a593Smuzhiyun 		if (proto != htons(ETH_P_BATMAN)) {
230*4882a593Smuzhiyun 			network_offset += VLAN_HLEN;
231*4882a593Smuzhiyun 			break;
232*4882a593Smuzhiyun 		}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 		fallthrough;
235*4882a593Smuzhiyun 	case ETH_P_BATMAN:
236*4882a593Smuzhiyun 		goto dropped;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	skb_set_network_header(skb, network_offset);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (batadv_bla_tx(bat_priv, skb, vid))
242*4882a593Smuzhiyun 		goto dropped;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/* skb->data might have been reallocated by batadv_bla_tx() */
245*4882a593Smuzhiyun 	ethhdr = eth_hdr(skb);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/* Register the client MAC in the transtable */
248*4882a593Smuzhiyun 	if (!is_multicast_ether_addr(ethhdr->h_source) &&
249*4882a593Smuzhiyun 	    !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) {
250*4882a593Smuzhiyun 		client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
251*4882a593Smuzhiyun 						   vid, skb->skb_iif,
252*4882a593Smuzhiyun 						   skb->mark);
253*4882a593Smuzhiyun 		if (!client_added)
254*4882a593Smuzhiyun 			goto dropped;
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* Snoop address candidates from DHCPACKs for early DAT filling */
258*4882a593Smuzhiyun 	batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* don't accept stp packets. STP does not help in meshes.
261*4882a593Smuzhiyun 	 * better use the bridge loop avoidance ...
262*4882a593Smuzhiyun 	 *
263*4882a593Smuzhiyun 	 * The same goes for ECTP sent at least by some Cisco Switches,
264*4882a593Smuzhiyun 	 * it might confuse the mesh when used with bridge loop avoidance.
265*4882a593Smuzhiyun 	 */
266*4882a593Smuzhiyun 	if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
267*4882a593Smuzhiyun 		goto dropped;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
270*4882a593Smuzhiyun 		goto dropped;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	gw_mode = atomic_read(&bat_priv->gw.mode);
273*4882a593Smuzhiyun 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
274*4882a593Smuzhiyun 		/* if gw mode is off, broadcast every packet */
275*4882a593Smuzhiyun 		if (gw_mode == BATADV_GW_MODE_OFF) {
276*4882a593Smuzhiyun 			do_bcast = true;
277*4882a593Smuzhiyun 			goto send;
278*4882a593Smuzhiyun 		}
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		dhcp_rcp = batadv_gw_dhcp_recipient_get(skb, &header_len,
281*4882a593Smuzhiyun 							chaddr);
282*4882a593Smuzhiyun 		/* skb->data may have been modified by
283*4882a593Smuzhiyun 		 * batadv_gw_dhcp_recipient_get()
284*4882a593Smuzhiyun 		 */
285*4882a593Smuzhiyun 		ethhdr = eth_hdr(skb);
286*4882a593Smuzhiyun 		/* if gw_mode is on, broadcast any non-DHCP message.
287*4882a593Smuzhiyun 		 * All the DHCP packets are going to be sent as unicast
288*4882a593Smuzhiyun 		 */
289*4882a593Smuzhiyun 		if (dhcp_rcp == BATADV_DHCP_NO) {
290*4882a593Smuzhiyun 			do_bcast = true;
291*4882a593Smuzhiyun 			goto send;
292*4882a593Smuzhiyun 		}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 		if (dhcp_rcp == BATADV_DHCP_TO_CLIENT)
295*4882a593Smuzhiyun 			dst_hint = chaddr;
296*4882a593Smuzhiyun 		else if ((gw_mode == BATADV_GW_MODE_SERVER) &&
297*4882a593Smuzhiyun 			 (dhcp_rcp == BATADV_DHCP_TO_SERVER))
298*4882a593Smuzhiyun 			/* gateways should not forward any DHCP message if
299*4882a593Smuzhiyun 			 * directed to a DHCP server
300*4882a593Smuzhiyun 			 */
301*4882a593Smuzhiyun 			goto dropped;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun send:
304*4882a593Smuzhiyun 		if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
305*4882a593Smuzhiyun 			forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
306*4882a593Smuzhiyun 							   &mcast_single_orig,
307*4882a593Smuzhiyun 							   &mcast_is_routable);
308*4882a593Smuzhiyun 			if (forw_mode == BATADV_FORW_NONE)
309*4882a593Smuzhiyun 				goto dropped;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 			if (forw_mode == BATADV_FORW_SINGLE ||
312*4882a593Smuzhiyun 			    forw_mode == BATADV_FORW_SOME)
313*4882a593Smuzhiyun 				do_bcast = false;
314*4882a593Smuzhiyun 		}
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	batadv_skb_set_priority(skb, 0);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	/* ethernet packet should be broadcasted */
320*4882a593Smuzhiyun 	if (do_bcast) {
321*4882a593Smuzhiyun 		primary_if = batadv_primary_if_get_selected(bat_priv);
322*4882a593Smuzhiyun 		if (!primary_if)
323*4882a593Smuzhiyun 			goto dropped;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 		/* in case of ARP request, we do not immediately broadcasti the
326*4882a593Smuzhiyun 		 * packet, instead we first wait for DAT to try to retrieve the
327*4882a593Smuzhiyun 		 * correct ARP entry
328*4882a593Smuzhiyun 		 */
329*4882a593Smuzhiyun 		if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
330*4882a593Smuzhiyun 			brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 		if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
333*4882a593Smuzhiyun 			goto dropped;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 		bcast_packet = (struct batadv_bcast_packet *)skb->data;
336*4882a593Smuzhiyun 		bcast_packet->version = BATADV_COMPAT_VERSION;
337*4882a593Smuzhiyun 		bcast_packet->ttl = BATADV_TTL;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		/* batman packet type: broadcast */
340*4882a593Smuzhiyun 		bcast_packet->packet_type = BATADV_BCAST;
341*4882a593Smuzhiyun 		bcast_packet->reserved = 0;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		/* hw address of first interface is the orig mac because only
344*4882a593Smuzhiyun 		 * this mac is known throughout the mesh
345*4882a593Smuzhiyun 		 */
346*4882a593Smuzhiyun 		ether_addr_copy(bcast_packet->orig,
347*4882a593Smuzhiyun 				primary_if->net_dev->dev_addr);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		/* set broadcast sequence number */
350*4882a593Smuzhiyun 		seqno = atomic_inc_return(&bat_priv->bcast_seqno);
351*4882a593Smuzhiyun 		bcast_packet->seqno = htonl(seqno);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 		batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay, true);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 		/* a copy is stored in the bcast list, therefore removing
356*4882a593Smuzhiyun 		 * the original skb.
357*4882a593Smuzhiyun 		 */
358*4882a593Smuzhiyun 		consume_skb(skb);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/* unicast packet */
361*4882a593Smuzhiyun 	} else {
362*4882a593Smuzhiyun 		/* DHCP packets going to a server will use the GW feature */
363*4882a593Smuzhiyun 		if (dhcp_rcp == BATADV_DHCP_TO_SERVER) {
364*4882a593Smuzhiyun 			ret = batadv_gw_out_of_range(bat_priv, skb);
365*4882a593Smuzhiyun 			if (ret)
366*4882a593Smuzhiyun 				goto dropped;
367*4882a593Smuzhiyun 			ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
368*4882a593Smuzhiyun 		} else if (mcast_single_orig) {
369*4882a593Smuzhiyun 			ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
370*4882a593Smuzhiyun 							  mcast_single_orig);
371*4882a593Smuzhiyun 		} else if (forw_mode == BATADV_FORW_SOME) {
372*4882a593Smuzhiyun 			ret = batadv_mcast_forw_send(bat_priv, skb, vid,
373*4882a593Smuzhiyun 						     mcast_is_routable);
374*4882a593Smuzhiyun 		} else {
375*4882a593Smuzhiyun 			if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
376*4882a593Smuzhiyun 								  skb))
377*4882a593Smuzhiyun 				goto dropped;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 			batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 			ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint,
382*4882a593Smuzhiyun 						     vid);
383*4882a593Smuzhiyun 		}
384*4882a593Smuzhiyun 		if (ret != NET_XMIT_SUCCESS)
385*4882a593Smuzhiyun 			goto dropped_freed;
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	batadv_inc_counter(bat_priv, BATADV_CNT_TX);
389*4882a593Smuzhiyun 	batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
390*4882a593Smuzhiyun 	goto end;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun dropped:
393*4882a593Smuzhiyun 	kfree_skb(skb);
394*4882a593Smuzhiyun dropped_freed:
395*4882a593Smuzhiyun 	batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
396*4882a593Smuzhiyun end:
397*4882a593Smuzhiyun 	if (mcast_single_orig)
398*4882a593Smuzhiyun 		batadv_orig_node_put(mcast_single_orig);
399*4882a593Smuzhiyun 	if (primary_if)
400*4882a593Smuzhiyun 		batadv_hardif_put(primary_if);
401*4882a593Smuzhiyun 	return NETDEV_TX_OK;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun /**
405*4882a593Smuzhiyun  * batadv_interface_rx() - receive ethernet frame on local batman-adv interface
406*4882a593Smuzhiyun  * @soft_iface: local interface which will receive the ethernet frame
407*4882a593Smuzhiyun  * @skb: ethernet frame for @soft_iface
408*4882a593Smuzhiyun  * @hdr_size: size of already parsed batman-adv header
409*4882a593Smuzhiyun  * @orig_node: originator from which the batman-adv packet was sent
410*4882a593Smuzhiyun  *
411*4882a593Smuzhiyun  * Sends an ethernet frame to the receive path of the local @soft_iface.
412*4882a593Smuzhiyun  * skb->data has still point to the batman-adv header with the size @hdr_size.
413*4882a593Smuzhiyun  * The caller has to have parsed this header already and made sure that at least
414*4882a593Smuzhiyun  * @hdr_size bytes are still available for pull in @skb.
415*4882a593Smuzhiyun  *
416*4882a593Smuzhiyun  * The packet may still get dropped. This can happen when the encapsulated
417*4882a593Smuzhiyun  * ethernet frame is invalid or contains again an batman-adv packet. Also
418*4882a593Smuzhiyun  * unicast packets will be dropped directly when it was sent between two
419*4882a593Smuzhiyun  * isolated clients.
420*4882a593Smuzhiyun  */
batadv_interface_rx(struct net_device * soft_iface,struct sk_buff * skb,int hdr_size,struct batadv_orig_node * orig_node)421*4882a593Smuzhiyun void batadv_interface_rx(struct net_device *soft_iface,
422*4882a593Smuzhiyun 			 struct sk_buff *skb, int hdr_size,
423*4882a593Smuzhiyun 			 struct batadv_orig_node *orig_node)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	struct batadv_bcast_packet *batadv_bcast_packet;
426*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
427*4882a593Smuzhiyun 	struct vlan_ethhdr *vhdr;
428*4882a593Smuzhiyun 	struct ethhdr *ethhdr;
429*4882a593Smuzhiyun 	unsigned short vid;
430*4882a593Smuzhiyun 	int packet_type;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
433*4882a593Smuzhiyun 	packet_type = batadv_bcast_packet->packet_type;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	skb_pull_rcsum(skb, hdr_size);
436*4882a593Smuzhiyun 	skb_reset_mac_header(skb);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	/* clean the netfilter state now that the batman-adv header has been
439*4882a593Smuzhiyun 	 * removed
440*4882a593Smuzhiyun 	 */
441*4882a593Smuzhiyun 	nf_reset_ct(skb);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
444*4882a593Smuzhiyun 		goto dropped;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	vid = batadv_get_vid(skb, 0);
447*4882a593Smuzhiyun 	ethhdr = eth_hdr(skb);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	switch (ntohs(ethhdr->h_proto)) {
450*4882a593Smuzhiyun 	case ETH_P_8021Q:
451*4882a593Smuzhiyun 		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
452*4882a593Smuzhiyun 			goto dropped;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		vhdr = (struct vlan_ethhdr *)skb->data;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		/* drop batman-in-batman packets to prevent loops */
457*4882a593Smuzhiyun 		if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN))
458*4882a593Smuzhiyun 			break;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		fallthrough;
461*4882a593Smuzhiyun 	case ETH_P_BATMAN:
462*4882a593Smuzhiyun 		goto dropped;
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	/* skb->dev & skb->pkt_type are set here */
466*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, soft_iface);
467*4882a593Smuzhiyun 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
470*4882a593Smuzhiyun 	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
471*4882a593Smuzhiyun 			   skb->len + ETH_HLEN);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/* Let the bridge loop avoidance check the packet. If will
474*4882a593Smuzhiyun 	 * not handle it, we can safely push it up.
475*4882a593Smuzhiyun 	 */
476*4882a593Smuzhiyun 	if (batadv_bla_rx(bat_priv, skb, vid, packet_type))
477*4882a593Smuzhiyun 		goto out;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	if (orig_node)
480*4882a593Smuzhiyun 		batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
481*4882a593Smuzhiyun 						     ethhdr->h_source, vid);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
484*4882a593Smuzhiyun 		/* set the mark on broadcast packets if AP isolation is ON and
485*4882a593Smuzhiyun 		 * the packet is coming from an "isolated" client
486*4882a593Smuzhiyun 		 */
487*4882a593Smuzhiyun 		if (batadv_vlan_ap_isola_get(bat_priv, vid) &&
488*4882a593Smuzhiyun 		    batadv_tt_global_is_isolated(bat_priv, ethhdr->h_source,
489*4882a593Smuzhiyun 						 vid)) {
490*4882a593Smuzhiyun 			/* save bits in skb->mark not covered by the mask and
491*4882a593Smuzhiyun 			 * apply the mark on the rest
492*4882a593Smuzhiyun 			 */
493*4882a593Smuzhiyun 			skb->mark &= ~bat_priv->isolation_mark_mask;
494*4882a593Smuzhiyun 			skb->mark |= bat_priv->isolation_mark;
495*4882a593Smuzhiyun 		}
496*4882a593Smuzhiyun 	} else if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source,
497*4882a593Smuzhiyun 					 ethhdr->h_dest, vid)) {
498*4882a593Smuzhiyun 		goto dropped;
499*4882a593Smuzhiyun 	}
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	netif_rx(skb);
502*4882a593Smuzhiyun 	goto out;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun dropped:
505*4882a593Smuzhiyun 	kfree_skb(skb);
506*4882a593Smuzhiyun out:
507*4882a593Smuzhiyun 	return;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun /**
511*4882a593Smuzhiyun  * batadv_softif_vlan_release() - release vlan from lists and queue for free
512*4882a593Smuzhiyun  *  after rcu grace period
513*4882a593Smuzhiyun  * @ref: kref pointer of the vlan object
514*4882a593Smuzhiyun  */
batadv_softif_vlan_release(struct kref * ref)515*4882a593Smuzhiyun void batadv_softif_vlan_release(struct kref *ref)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	struct batadv_softif_vlan *vlan;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	vlan = container_of(ref, struct batadv_softif_vlan, refcount);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
522*4882a593Smuzhiyun 	hlist_del_rcu(&vlan->list);
523*4882a593Smuzhiyun 	spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	kfree_rcu(vlan, rcu);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun /**
529*4882a593Smuzhiyun  * batadv_softif_vlan_get() - get the vlan object for a specific vid
530*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
531*4882a593Smuzhiyun  * @vid: the identifier of the vlan object to retrieve
532*4882a593Smuzhiyun  *
533*4882a593Smuzhiyun  * Return: the private data of the vlan matching the vid passed as argument or
534*4882a593Smuzhiyun  * NULL otherwise. The refcounter of the returned object is incremented by 1.
535*4882a593Smuzhiyun  */
batadv_softif_vlan_get(struct batadv_priv * bat_priv,unsigned short vid)536*4882a593Smuzhiyun struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
537*4882a593Smuzhiyun 						  unsigned short vid)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	rcu_read_lock();
542*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
543*4882a593Smuzhiyun 		if (vlan_tmp->vid != vid)
544*4882a593Smuzhiyun 			continue;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 		if (!kref_get_unless_zero(&vlan_tmp->refcount))
547*4882a593Smuzhiyun 			continue;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 		vlan = vlan_tmp;
550*4882a593Smuzhiyun 		break;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 	rcu_read_unlock();
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	return vlan;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun /**
558*4882a593Smuzhiyun  * batadv_softif_create_vlan() - allocate the needed resources for a new vlan
559*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
560*4882a593Smuzhiyun  * @vid: the VLAN identifier
561*4882a593Smuzhiyun  *
562*4882a593Smuzhiyun  * Return: 0 on success, a negative error otherwise.
563*4882a593Smuzhiyun  */
batadv_softif_create_vlan(struct batadv_priv * bat_priv,unsigned short vid)564*4882a593Smuzhiyun int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun 	struct batadv_softif_vlan *vlan;
567*4882a593Smuzhiyun 	int err;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	spin_lock_bh(&bat_priv->softif_vlan_list_lock);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	vlan = batadv_softif_vlan_get(bat_priv, vid);
572*4882a593Smuzhiyun 	if (vlan) {
573*4882a593Smuzhiyun 		batadv_softif_vlan_put(vlan);
574*4882a593Smuzhiyun 		spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
575*4882a593Smuzhiyun 		return -EEXIST;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
579*4882a593Smuzhiyun 	if (!vlan) {
580*4882a593Smuzhiyun 		spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
581*4882a593Smuzhiyun 		return -ENOMEM;
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	vlan->bat_priv = bat_priv;
585*4882a593Smuzhiyun 	vlan->vid = vid;
586*4882a593Smuzhiyun 	kref_init(&vlan->refcount);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	atomic_set(&vlan->ap_isolation, 0);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	kref_get(&vlan->refcount);
591*4882a593Smuzhiyun 	hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
592*4882a593Smuzhiyun 	spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	/* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
595*4882a593Smuzhiyun 	 * sleeping behavior of the sysfs functions and the fs_reclaim lock
596*4882a593Smuzhiyun 	 */
597*4882a593Smuzhiyun 	err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
598*4882a593Smuzhiyun 	if (err) {
599*4882a593Smuzhiyun 		/* ref for the function */
600*4882a593Smuzhiyun 		batadv_softif_vlan_put(vlan);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 		/* ref for the list */
603*4882a593Smuzhiyun 		batadv_softif_vlan_put(vlan);
604*4882a593Smuzhiyun 		return err;
605*4882a593Smuzhiyun 	}
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	/* add a new TT local entry. This one will be marked with the NOPURGE
608*4882a593Smuzhiyun 	 * flag
609*4882a593Smuzhiyun 	 */
610*4882a593Smuzhiyun 	batadv_tt_local_add(bat_priv->soft_iface,
611*4882a593Smuzhiyun 			    bat_priv->soft_iface->dev_addr, vid,
612*4882a593Smuzhiyun 			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	/* don't return reference to new softif_vlan */
615*4882a593Smuzhiyun 	batadv_softif_vlan_put(vlan);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	return 0;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun /**
621*4882a593Smuzhiyun  * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
622*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
623*4882a593Smuzhiyun  * @vlan: the object to remove
624*4882a593Smuzhiyun  */
batadv_softif_destroy_vlan(struct batadv_priv * bat_priv,struct batadv_softif_vlan * vlan)625*4882a593Smuzhiyun static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
626*4882a593Smuzhiyun 				       struct batadv_softif_vlan *vlan)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	/* explicitly remove the associated TT local entry because it is marked
629*4882a593Smuzhiyun 	 * with the NOPURGE flag
630*4882a593Smuzhiyun 	 */
631*4882a593Smuzhiyun 	batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
632*4882a593Smuzhiyun 			       vlan->vid, "vlan interface destroyed", false);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	batadv_sysfs_del_vlan(bat_priv, vlan);
635*4882a593Smuzhiyun 	batadv_softif_vlan_put(vlan);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun /**
639*4882a593Smuzhiyun  * batadv_interface_add_vid() - ndo_add_vid API implementation
640*4882a593Smuzhiyun  * @dev: the netdev of the mesh interface
641*4882a593Smuzhiyun  * @proto: protocol of the vlan id
642*4882a593Smuzhiyun  * @vid: identifier of the new vlan
643*4882a593Smuzhiyun  *
644*4882a593Smuzhiyun  * Set up all the internal structures for handling the new vlan on top of the
645*4882a593Smuzhiyun  * mesh interface
646*4882a593Smuzhiyun  *
647*4882a593Smuzhiyun  * Return: 0 on success or a negative error code in case of failure.
648*4882a593Smuzhiyun  */
batadv_interface_add_vid(struct net_device * dev,__be16 proto,unsigned short vid)649*4882a593Smuzhiyun static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
650*4882a593Smuzhiyun 				    unsigned short vid)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = netdev_priv(dev);
653*4882a593Smuzhiyun 	struct batadv_softif_vlan *vlan;
654*4882a593Smuzhiyun 	int ret;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	/* only 802.1Q vlans are supported.
657*4882a593Smuzhiyun 	 * batman-adv does not know how to handle other types
658*4882a593Smuzhiyun 	 */
659*4882a593Smuzhiyun 	if (proto != htons(ETH_P_8021Q))
660*4882a593Smuzhiyun 		return -EINVAL;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	vid |= BATADV_VLAN_HAS_TAG;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	/* if a new vlan is getting created and it already exists, it means that
665*4882a593Smuzhiyun 	 * it was not deleted yet. batadv_softif_vlan_get() increases the
666*4882a593Smuzhiyun 	 * refcount in order to revive the object.
667*4882a593Smuzhiyun 	 *
668*4882a593Smuzhiyun 	 * if it does not exist then create it.
669*4882a593Smuzhiyun 	 */
670*4882a593Smuzhiyun 	vlan = batadv_softif_vlan_get(bat_priv, vid);
671*4882a593Smuzhiyun 	if (!vlan)
672*4882a593Smuzhiyun 		return batadv_softif_create_vlan(bat_priv, vid);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	/* recreate the sysfs object if it was already destroyed (and it should
675*4882a593Smuzhiyun 	 * be since we received a kill_vid() for this vlan
676*4882a593Smuzhiyun 	 */
677*4882a593Smuzhiyun 	if (!vlan->kobj) {
678*4882a593Smuzhiyun 		ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
679*4882a593Smuzhiyun 		if (ret) {
680*4882a593Smuzhiyun 			batadv_softif_vlan_put(vlan);
681*4882a593Smuzhiyun 			return ret;
682*4882a593Smuzhiyun 		}
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	/* add a new TT local entry. This one will be marked with the NOPURGE
686*4882a593Smuzhiyun 	 * flag. This must be added again, even if the vlan object already
687*4882a593Smuzhiyun 	 * exists, because the entry was deleted by kill_vid()
688*4882a593Smuzhiyun 	 */
689*4882a593Smuzhiyun 	batadv_tt_local_add(bat_priv->soft_iface,
690*4882a593Smuzhiyun 			    bat_priv->soft_iface->dev_addr, vid,
691*4882a593Smuzhiyun 			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	return 0;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun /**
697*4882a593Smuzhiyun  * batadv_interface_kill_vid() - ndo_kill_vid API implementation
698*4882a593Smuzhiyun  * @dev: the netdev of the mesh interface
699*4882a593Smuzhiyun  * @proto: protocol of the vlan id
700*4882a593Smuzhiyun  * @vid: identifier of the deleted vlan
701*4882a593Smuzhiyun  *
702*4882a593Smuzhiyun  * Destroy all the internal structures used to handle the vlan identified by vid
703*4882a593Smuzhiyun  * on top of the mesh interface
704*4882a593Smuzhiyun  *
705*4882a593Smuzhiyun  * Return: 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
706*4882a593Smuzhiyun  * or -ENOENT if the specified vlan id wasn't registered.
707*4882a593Smuzhiyun  */
batadv_interface_kill_vid(struct net_device * dev,__be16 proto,unsigned short vid)708*4882a593Smuzhiyun static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
709*4882a593Smuzhiyun 				     unsigned short vid)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = netdev_priv(dev);
712*4882a593Smuzhiyun 	struct batadv_softif_vlan *vlan;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	/* only 802.1Q vlans are supported. batman-adv does not know how to
715*4882a593Smuzhiyun 	 * handle other types
716*4882a593Smuzhiyun 	 */
717*4882a593Smuzhiyun 	if (proto != htons(ETH_P_8021Q))
718*4882a593Smuzhiyun 		return -EINVAL;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
721*4882a593Smuzhiyun 	if (!vlan)
722*4882a593Smuzhiyun 		return -ENOENT;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	batadv_softif_destroy_vlan(bat_priv, vlan);
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	/* finally free the vlan object */
727*4882a593Smuzhiyun 	batadv_softif_vlan_put(vlan);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	return 0;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun /* batman-adv network devices have devices nesting below it and are a special
733*4882a593Smuzhiyun  * "super class" of normal network devices; split their locks off into a
734*4882a593Smuzhiyun  * separate class since they always nest.
735*4882a593Smuzhiyun  */
736*4882a593Smuzhiyun static struct lock_class_key batadv_netdev_xmit_lock_key;
737*4882a593Smuzhiyun static struct lock_class_key batadv_netdev_addr_lock_key;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun /**
740*4882a593Smuzhiyun  * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
741*4882a593Smuzhiyun  * @dev: device which owns the tx queue
742*4882a593Smuzhiyun  * @txq: tx queue to modify
743*4882a593Smuzhiyun  * @_unused: always NULL
744*4882a593Smuzhiyun  */
batadv_set_lockdep_class_one(struct net_device * dev,struct netdev_queue * txq,void * _unused)745*4882a593Smuzhiyun static void batadv_set_lockdep_class_one(struct net_device *dev,
746*4882a593Smuzhiyun 					 struct netdev_queue *txq,
747*4882a593Smuzhiyun 					 void *_unused)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun 	lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun /**
753*4882a593Smuzhiyun  * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
754*4882a593Smuzhiyun  * @dev: network device to modify
755*4882a593Smuzhiyun  */
batadv_set_lockdep_class(struct net_device * dev)756*4882a593Smuzhiyun static void batadv_set_lockdep_class(struct net_device *dev)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun 	lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
759*4882a593Smuzhiyun 	netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun /**
763*4882a593Smuzhiyun  * batadv_softif_init_late() - late stage initialization of soft interface
764*4882a593Smuzhiyun  * @dev: registered network device to modify
765*4882a593Smuzhiyun  *
766*4882a593Smuzhiyun  * Return: error code on failures
767*4882a593Smuzhiyun  */
batadv_softif_init_late(struct net_device * dev)768*4882a593Smuzhiyun static int batadv_softif_init_late(struct net_device *dev)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun 	struct batadv_priv *bat_priv;
771*4882a593Smuzhiyun 	u32 random_seqno;
772*4882a593Smuzhiyun 	int ret;
773*4882a593Smuzhiyun 	size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	batadv_set_lockdep_class(dev);
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	bat_priv = netdev_priv(dev);
778*4882a593Smuzhiyun 	bat_priv->soft_iface = dev;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	/* batadv_interface_stats() needs to be available as soon as
781*4882a593Smuzhiyun 	 * register_netdevice() has been called
782*4882a593Smuzhiyun 	 */
783*4882a593Smuzhiyun 	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64));
784*4882a593Smuzhiyun 	if (!bat_priv->bat_counters)
785*4882a593Smuzhiyun 		return -ENOMEM;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	atomic_set(&bat_priv->aggregated_ogms, 1);
788*4882a593Smuzhiyun 	atomic_set(&bat_priv->bonding, 0);
789*4882a593Smuzhiyun #ifdef CONFIG_BATMAN_ADV_BLA
790*4882a593Smuzhiyun 	atomic_set(&bat_priv->bridge_loop_avoidance, 1);
791*4882a593Smuzhiyun #endif
792*4882a593Smuzhiyun #ifdef CONFIG_BATMAN_ADV_DAT
793*4882a593Smuzhiyun 	atomic_set(&bat_priv->distributed_arp_table, 1);
794*4882a593Smuzhiyun #endif
795*4882a593Smuzhiyun #ifdef CONFIG_BATMAN_ADV_MCAST
796*4882a593Smuzhiyun 	atomic_set(&bat_priv->multicast_mode, 1);
797*4882a593Smuzhiyun 	atomic_set(&bat_priv->multicast_fanout, 16);
798*4882a593Smuzhiyun 	atomic_set(&bat_priv->mcast.num_want_all_unsnoopables, 0);
799*4882a593Smuzhiyun 	atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0);
800*4882a593Smuzhiyun 	atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
801*4882a593Smuzhiyun #endif
802*4882a593Smuzhiyun 	atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
803*4882a593Smuzhiyun 	atomic_set(&bat_priv->gw.bandwidth_down, 100);
804*4882a593Smuzhiyun 	atomic_set(&bat_priv->gw.bandwidth_up, 20);
805*4882a593Smuzhiyun 	atomic_set(&bat_priv->orig_interval, 1000);
806*4882a593Smuzhiyun 	atomic_set(&bat_priv->hop_penalty, 30);
807*4882a593Smuzhiyun #ifdef CONFIG_BATMAN_ADV_DEBUG
808*4882a593Smuzhiyun 	atomic_set(&bat_priv->log_level, 0);
809*4882a593Smuzhiyun #endif
810*4882a593Smuzhiyun 	atomic_set(&bat_priv->fragmentation, 1);
811*4882a593Smuzhiyun 	atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
812*4882a593Smuzhiyun 	atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
813*4882a593Smuzhiyun 	atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
816*4882a593Smuzhiyun 	atomic_set(&bat_priv->bcast_seqno, 1);
817*4882a593Smuzhiyun 	atomic_set(&bat_priv->tt.vn, 0);
818*4882a593Smuzhiyun 	atomic_set(&bat_priv->tt.local_changes, 0);
819*4882a593Smuzhiyun 	atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
820*4882a593Smuzhiyun #ifdef CONFIG_BATMAN_ADV_BLA
821*4882a593Smuzhiyun 	atomic_set(&bat_priv->bla.num_requests, 0);
822*4882a593Smuzhiyun #endif
823*4882a593Smuzhiyun 	atomic_set(&bat_priv->tp_num, 0);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	bat_priv->tt.last_changeset = NULL;
826*4882a593Smuzhiyun 	bat_priv->tt.last_changeset_len = 0;
827*4882a593Smuzhiyun 	bat_priv->isolation_mark = 0;
828*4882a593Smuzhiyun 	bat_priv->isolation_mark_mask = 0;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	/* randomize initial seqno to avoid collision */
831*4882a593Smuzhiyun 	get_random_bytes(&random_seqno, sizeof(random_seqno));
832*4882a593Smuzhiyun 	atomic_set(&bat_priv->frag_seqno, random_seqno);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	bat_priv->primary_if = NULL;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	batadv_nc_init_bat_priv(bat_priv);
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	ret = batadv_algo_select(bat_priv, batadv_routing_algo);
839*4882a593Smuzhiyun 	if (ret < 0)
840*4882a593Smuzhiyun 		goto free_bat_counters;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	ret = batadv_debugfs_add_meshif(dev);
843*4882a593Smuzhiyun 	if (ret < 0)
844*4882a593Smuzhiyun 		goto free_bat_counters;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	ret = batadv_mesh_init(dev);
847*4882a593Smuzhiyun 	if (ret < 0)
848*4882a593Smuzhiyun 		goto unreg_debugfs;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	return 0;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun unreg_debugfs:
853*4882a593Smuzhiyun 	batadv_debugfs_del_meshif(dev);
854*4882a593Smuzhiyun free_bat_counters:
855*4882a593Smuzhiyun 	free_percpu(bat_priv->bat_counters);
856*4882a593Smuzhiyun 	bat_priv->bat_counters = NULL;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	return ret;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun /**
862*4882a593Smuzhiyun  * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface
863*4882a593Smuzhiyun  * @dev: batadv_soft_interface used as master interface
864*4882a593Smuzhiyun  * @slave_dev: net_device which should become the slave interface
865*4882a593Smuzhiyun  * @extack: extended ACK report struct
866*4882a593Smuzhiyun  *
867*4882a593Smuzhiyun  * Return: 0 if successful or error otherwise.
868*4882a593Smuzhiyun  */
batadv_softif_slave_add(struct net_device * dev,struct net_device * slave_dev,struct netlink_ext_ack * extack)869*4882a593Smuzhiyun static int batadv_softif_slave_add(struct net_device *dev,
870*4882a593Smuzhiyun 				   struct net_device *slave_dev,
871*4882a593Smuzhiyun 				   struct netlink_ext_ack *extack)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun 	struct batadv_hard_iface *hard_iface;
874*4882a593Smuzhiyun 	struct net *net = dev_net(dev);
875*4882a593Smuzhiyun 	int ret = -EINVAL;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	hard_iface = batadv_hardif_get_by_netdev(slave_dev);
878*4882a593Smuzhiyun 	if (!hard_iface || hard_iface->soft_iface)
879*4882a593Smuzhiyun 		goto out;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	ret = batadv_hardif_enable_interface(hard_iface, net, dev->name);
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun out:
884*4882a593Smuzhiyun 	if (hard_iface)
885*4882a593Smuzhiyun 		batadv_hardif_put(hard_iface);
886*4882a593Smuzhiyun 	return ret;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun /**
890*4882a593Smuzhiyun  * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface
891*4882a593Smuzhiyun  * @dev: batadv_soft_interface used as master interface
892*4882a593Smuzhiyun  * @slave_dev: net_device which should be removed from the master interface
893*4882a593Smuzhiyun  *
894*4882a593Smuzhiyun  * Return: 0 if successful or error otherwise.
895*4882a593Smuzhiyun  */
batadv_softif_slave_del(struct net_device * dev,struct net_device * slave_dev)896*4882a593Smuzhiyun static int batadv_softif_slave_del(struct net_device *dev,
897*4882a593Smuzhiyun 				   struct net_device *slave_dev)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun 	struct batadv_hard_iface *hard_iface;
900*4882a593Smuzhiyun 	int ret = -EINVAL;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	hard_iface = batadv_hardif_get_by_netdev(slave_dev);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (!hard_iface || hard_iface->soft_iface != dev)
905*4882a593Smuzhiyun 		goto out;
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	batadv_hardif_disable_interface(hard_iface, BATADV_IF_CLEANUP_KEEP);
908*4882a593Smuzhiyun 	ret = 0;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun out:
911*4882a593Smuzhiyun 	if (hard_iface)
912*4882a593Smuzhiyun 		batadv_hardif_put(hard_iface);
913*4882a593Smuzhiyun 	return ret;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun static const struct net_device_ops batadv_netdev_ops = {
917*4882a593Smuzhiyun 	.ndo_init = batadv_softif_init_late,
918*4882a593Smuzhiyun 	.ndo_open = batadv_interface_open,
919*4882a593Smuzhiyun 	.ndo_stop = batadv_interface_release,
920*4882a593Smuzhiyun 	.ndo_get_stats = batadv_interface_stats,
921*4882a593Smuzhiyun 	.ndo_vlan_rx_add_vid = batadv_interface_add_vid,
922*4882a593Smuzhiyun 	.ndo_vlan_rx_kill_vid = batadv_interface_kill_vid,
923*4882a593Smuzhiyun 	.ndo_set_mac_address = batadv_interface_set_mac_addr,
924*4882a593Smuzhiyun 	.ndo_change_mtu = batadv_interface_change_mtu,
925*4882a593Smuzhiyun 	.ndo_set_rx_mode = batadv_interface_set_rx_mode,
926*4882a593Smuzhiyun 	.ndo_start_xmit = batadv_interface_tx,
927*4882a593Smuzhiyun 	.ndo_validate_addr = eth_validate_addr,
928*4882a593Smuzhiyun 	.ndo_add_slave = batadv_softif_slave_add,
929*4882a593Smuzhiyun 	.ndo_del_slave = batadv_softif_slave_del,
930*4882a593Smuzhiyun };
931*4882a593Smuzhiyun 
batadv_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)932*4882a593Smuzhiyun static void batadv_get_drvinfo(struct net_device *dev,
933*4882a593Smuzhiyun 			       struct ethtool_drvinfo *info)
934*4882a593Smuzhiyun {
935*4882a593Smuzhiyun 	strscpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver));
936*4882a593Smuzhiyun 	strscpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version));
937*4882a593Smuzhiyun 	strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
938*4882a593Smuzhiyun 	strscpy(info->bus_info, "batman", sizeof(info->bus_info));
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun /* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
942*4882a593Smuzhiyun  * Declare each description string in struct.name[] to get fixed sized buffer
943*4882a593Smuzhiyun  * and compile time checking for strings longer than ETH_GSTRING_LEN.
944*4882a593Smuzhiyun  */
945*4882a593Smuzhiyun static const struct {
946*4882a593Smuzhiyun 	const char name[ETH_GSTRING_LEN];
947*4882a593Smuzhiyun } batadv_counters_strings[] = {
948*4882a593Smuzhiyun 	{ "tx" },
949*4882a593Smuzhiyun 	{ "tx_bytes" },
950*4882a593Smuzhiyun 	{ "tx_dropped" },
951*4882a593Smuzhiyun 	{ "rx" },
952*4882a593Smuzhiyun 	{ "rx_bytes" },
953*4882a593Smuzhiyun 	{ "forward" },
954*4882a593Smuzhiyun 	{ "forward_bytes" },
955*4882a593Smuzhiyun 	{ "mgmt_tx" },
956*4882a593Smuzhiyun 	{ "mgmt_tx_bytes" },
957*4882a593Smuzhiyun 	{ "mgmt_rx" },
958*4882a593Smuzhiyun 	{ "mgmt_rx_bytes" },
959*4882a593Smuzhiyun 	{ "frag_tx" },
960*4882a593Smuzhiyun 	{ "frag_tx_bytes" },
961*4882a593Smuzhiyun 	{ "frag_rx" },
962*4882a593Smuzhiyun 	{ "frag_rx_bytes" },
963*4882a593Smuzhiyun 	{ "frag_fwd" },
964*4882a593Smuzhiyun 	{ "frag_fwd_bytes" },
965*4882a593Smuzhiyun 	{ "tt_request_tx" },
966*4882a593Smuzhiyun 	{ "tt_request_rx" },
967*4882a593Smuzhiyun 	{ "tt_response_tx" },
968*4882a593Smuzhiyun 	{ "tt_response_rx" },
969*4882a593Smuzhiyun 	{ "tt_roam_adv_tx" },
970*4882a593Smuzhiyun 	{ "tt_roam_adv_rx" },
971*4882a593Smuzhiyun #ifdef CONFIG_BATMAN_ADV_DAT
972*4882a593Smuzhiyun 	{ "dat_get_tx" },
973*4882a593Smuzhiyun 	{ "dat_get_rx" },
974*4882a593Smuzhiyun 	{ "dat_put_tx" },
975*4882a593Smuzhiyun 	{ "dat_put_rx" },
976*4882a593Smuzhiyun 	{ "dat_cached_reply_tx" },
977*4882a593Smuzhiyun #endif
978*4882a593Smuzhiyun #ifdef CONFIG_BATMAN_ADV_NC
979*4882a593Smuzhiyun 	{ "nc_code" },
980*4882a593Smuzhiyun 	{ "nc_code_bytes" },
981*4882a593Smuzhiyun 	{ "nc_recode" },
982*4882a593Smuzhiyun 	{ "nc_recode_bytes" },
983*4882a593Smuzhiyun 	{ "nc_buffer" },
984*4882a593Smuzhiyun 	{ "nc_decode" },
985*4882a593Smuzhiyun 	{ "nc_decode_bytes" },
986*4882a593Smuzhiyun 	{ "nc_decode_failed" },
987*4882a593Smuzhiyun 	{ "nc_sniffed" },
988*4882a593Smuzhiyun #endif
989*4882a593Smuzhiyun };
990*4882a593Smuzhiyun 
batadv_get_strings(struct net_device * dev,u32 stringset,u8 * data)991*4882a593Smuzhiyun static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	if (stringset == ETH_SS_STATS)
994*4882a593Smuzhiyun 		memcpy(data, batadv_counters_strings,
995*4882a593Smuzhiyun 		       sizeof(batadv_counters_strings));
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun 
batadv_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)998*4882a593Smuzhiyun static void batadv_get_ethtool_stats(struct net_device *dev,
999*4882a593Smuzhiyun 				     struct ethtool_stats *stats, u64 *data)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = netdev_priv(dev);
1002*4882a593Smuzhiyun 	int i;
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	for (i = 0; i < BATADV_CNT_NUM; i++)
1005*4882a593Smuzhiyun 		data[i] = batadv_sum_counter(bat_priv, i);
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun 
batadv_get_sset_count(struct net_device * dev,int stringset)1008*4882a593Smuzhiyun static int batadv_get_sset_count(struct net_device *dev, int stringset)
1009*4882a593Smuzhiyun {
1010*4882a593Smuzhiyun 	if (stringset == ETH_SS_STATS)
1011*4882a593Smuzhiyun 		return BATADV_CNT_NUM;
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	return -EOPNOTSUPP;
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun static const struct ethtool_ops batadv_ethtool_ops = {
1017*4882a593Smuzhiyun 	.get_drvinfo = batadv_get_drvinfo,
1018*4882a593Smuzhiyun 	.get_link = ethtool_op_get_link,
1019*4882a593Smuzhiyun 	.get_strings = batadv_get_strings,
1020*4882a593Smuzhiyun 	.get_ethtool_stats = batadv_get_ethtool_stats,
1021*4882a593Smuzhiyun 	.get_sset_count = batadv_get_sset_count,
1022*4882a593Smuzhiyun };
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun /**
1025*4882a593Smuzhiyun  * batadv_softif_free() - Deconstructor of batadv_soft_interface
1026*4882a593Smuzhiyun  * @dev: Device to cleanup and remove
1027*4882a593Smuzhiyun  */
batadv_softif_free(struct net_device * dev)1028*4882a593Smuzhiyun static void batadv_softif_free(struct net_device *dev)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun 	batadv_debugfs_del_meshif(dev);
1031*4882a593Smuzhiyun 	batadv_mesh_free(dev);
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	/* some scheduled RCU callbacks need the bat_priv struct to accomplish
1034*4882a593Smuzhiyun 	 * their tasks. Wait for them all to be finished before freeing the
1035*4882a593Smuzhiyun 	 * netdev and its private data (bat_priv)
1036*4882a593Smuzhiyun 	 */
1037*4882a593Smuzhiyun 	rcu_barrier();
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun /**
1041*4882a593Smuzhiyun  * batadv_softif_init_early() - early stage initialization of soft interface
1042*4882a593Smuzhiyun  * @dev: registered network device to modify
1043*4882a593Smuzhiyun  */
batadv_softif_init_early(struct net_device * dev)1044*4882a593Smuzhiyun static void batadv_softif_init_early(struct net_device *dev)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun 	ether_setup(dev);
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	dev->netdev_ops = &batadv_netdev_ops;
1049*4882a593Smuzhiyun 	dev->needs_free_netdev = true;
1050*4882a593Smuzhiyun 	dev->priv_destructor = batadv_softif_free;
1051*4882a593Smuzhiyun 	dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL;
1052*4882a593Smuzhiyun 	dev->features |= NETIF_F_LLTX;
1053*4882a593Smuzhiyun 	dev->priv_flags |= IFF_NO_QUEUE;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	/* can't call min_mtu, because the needed variables
1056*4882a593Smuzhiyun 	 * have not been initialized yet
1057*4882a593Smuzhiyun 	 */
1058*4882a593Smuzhiyun 	dev->mtu = ETH_DATA_LEN;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	/* generate random address */
1061*4882a593Smuzhiyun 	eth_hw_addr_random(dev);
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	dev->ethtool_ops = &batadv_ethtool_ops;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun /**
1067*4882a593Smuzhiyun  * batadv_softif_create() - Create and register soft interface
1068*4882a593Smuzhiyun  * @net: the applicable net namespace
1069*4882a593Smuzhiyun  * @name: name of the new soft interface
1070*4882a593Smuzhiyun  *
1071*4882a593Smuzhiyun  * Return: newly allocated soft_interface, NULL on errors
1072*4882a593Smuzhiyun  */
batadv_softif_create(struct net * net,const char * name)1073*4882a593Smuzhiyun struct net_device *batadv_softif_create(struct net *net, const char *name)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun 	struct net_device *soft_iface;
1076*4882a593Smuzhiyun 	int ret;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	soft_iface = alloc_netdev(sizeof(struct batadv_priv), name,
1079*4882a593Smuzhiyun 				  NET_NAME_UNKNOWN, batadv_softif_init_early);
1080*4882a593Smuzhiyun 	if (!soft_iface)
1081*4882a593Smuzhiyun 		return NULL;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	dev_net_set(soft_iface, net);
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	soft_iface->rtnl_link_ops = &batadv_link_ops;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	ret = register_netdevice(soft_iface);
1088*4882a593Smuzhiyun 	if (ret < 0) {
1089*4882a593Smuzhiyun 		pr_err("Unable to register the batman interface '%s': %i\n",
1090*4882a593Smuzhiyun 		       name, ret);
1091*4882a593Smuzhiyun 		free_netdev(soft_iface);
1092*4882a593Smuzhiyun 		return NULL;
1093*4882a593Smuzhiyun 	}
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	return soft_iface;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun /**
1099*4882a593Smuzhiyun  * batadv_softif_destroy_sysfs() - deletion of batadv_soft_interface via sysfs
1100*4882a593Smuzhiyun  * @soft_iface: the to-be-removed batman-adv interface
1101*4882a593Smuzhiyun  */
batadv_softif_destroy_sysfs(struct net_device * soft_iface)1102*4882a593Smuzhiyun void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
1105*4882a593Smuzhiyun 	struct batadv_softif_vlan *vlan;
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	ASSERT_RTNL();
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	/* destroy the "untagged" VLAN */
1110*4882a593Smuzhiyun 	vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
1111*4882a593Smuzhiyun 	if (vlan) {
1112*4882a593Smuzhiyun 		batadv_softif_destroy_vlan(bat_priv, vlan);
1113*4882a593Smuzhiyun 		batadv_softif_vlan_put(vlan);
1114*4882a593Smuzhiyun 	}
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	batadv_sysfs_del_meshif(soft_iface);
1117*4882a593Smuzhiyun 	unregister_netdevice(soft_iface);
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun /**
1121*4882a593Smuzhiyun  * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via
1122*4882a593Smuzhiyun  *  netlink
1123*4882a593Smuzhiyun  * @soft_iface: the to-be-removed batman-adv interface
1124*4882a593Smuzhiyun  * @head: list pointer
1125*4882a593Smuzhiyun  */
batadv_softif_destroy_netlink(struct net_device * soft_iface,struct list_head * head)1126*4882a593Smuzhiyun static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
1127*4882a593Smuzhiyun 					  struct list_head *head)
1128*4882a593Smuzhiyun {
1129*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
1130*4882a593Smuzhiyun 	struct batadv_hard_iface *hard_iface;
1131*4882a593Smuzhiyun 	struct batadv_softif_vlan *vlan;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
1134*4882a593Smuzhiyun 		if (hard_iface->soft_iface == soft_iface)
1135*4882a593Smuzhiyun 			batadv_hardif_disable_interface(hard_iface,
1136*4882a593Smuzhiyun 							BATADV_IF_CLEANUP_KEEP);
1137*4882a593Smuzhiyun 	}
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	/* destroy the "untagged" VLAN */
1140*4882a593Smuzhiyun 	vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
1141*4882a593Smuzhiyun 	if (vlan) {
1142*4882a593Smuzhiyun 		batadv_softif_destroy_vlan(bat_priv, vlan);
1143*4882a593Smuzhiyun 		batadv_softif_vlan_put(vlan);
1144*4882a593Smuzhiyun 	}
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	batadv_sysfs_del_meshif(soft_iface);
1147*4882a593Smuzhiyun 	unregister_netdevice_queue(soft_iface, head);
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun /**
1151*4882a593Smuzhiyun  * batadv_softif_is_valid() - Check whether device is a batadv soft interface
1152*4882a593Smuzhiyun  * @net_dev: device which should be checked
1153*4882a593Smuzhiyun  *
1154*4882a593Smuzhiyun  * Return: true when net_dev is a batman-adv interface, false otherwise
1155*4882a593Smuzhiyun  */
batadv_softif_is_valid(const struct net_device * net_dev)1156*4882a593Smuzhiyun bool batadv_softif_is_valid(const struct net_device *net_dev)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun 	if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
1159*4882a593Smuzhiyun 		return true;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	return false;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun struct rtnl_link_ops batadv_link_ops __read_mostly = {
1165*4882a593Smuzhiyun 	.kind		= "batadv",
1166*4882a593Smuzhiyun 	.priv_size	= sizeof(struct batadv_priv),
1167*4882a593Smuzhiyun 	.setup		= batadv_softif_init_early,
1168*4882a593Smuzhiyun 	.dellink	= batadv_softif_destroy_netlink,
1169*4882a593Smuzhiyun };
1170