xref: /OK3568_Linux_fs/kernel/drivers/net/vrf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * vrf.c: device driver to encapsulate a VRF space
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2015 Cumulus Networks. All rights reserved.
6*4882a593Smuzhiyun  * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
7*4882a593Smuzhiyun  * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Based on dummy, team and ipvlan drivers
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/netdevice.h>
15*4882a593Smuzhiyun #include <linux/etherdevice.h>
16*4882a593Smuzhiyun #include <linux/ip.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/moduleparam.h>
19*4882a593Smuzhiyun #include <linux/netfilter.h>
20*4882a593Smuzhiyun #include <linux/rtnetlink.h>
21*4882a593Smuzhiyun #include <net/rtnetlink.h>
22*4882a593Smuzhiyun #include <linux/u64_stats_sync.h>
23*4882a593Smuzhiyun #include <linux/hashtable.h>
24*4882a593Smuzhiyun #include <linux/spinlock_types.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <linux/inetdevice.h>
27*4882a593Smuzhiyun #include <net/arp.h>
28*4882a593Smuzhiyun #include <net/ip.h>
29*4882a593Smuzhiyun #include <net/ip_fib.h>
30*4882a593Smuzhiyun #include <net/ip6_fib.h>
31*4882a593Smuzhiyun #include <net/ip6_route.h>
32*4882a593Smuzhiyun #include <net/route.h>
33*4882a593Smuzhiyun #include <net/addrconf.h>
34*4882a593Smuzhiyun #include <net/l3mdev.h>
35*4882a593Smuzhiyun #include <net/fib_rules.h>
36*4882a593Smuzhiyun #include <net/netns/generic.h>
37*4882a593Smuzhiyun #include <net/netfilter/nf_conntrack.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define DRV_NAME	"vrf"
40*4882a593Smuzhiyun #define DRV_VERSION	"1.1"
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define FIB_RULE_PREF  1000       /* default preference for FIB rules */
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define HT_MAP_BITS	4
45*4882a593Smuzhiyun #define HASH_INITVAL	((u32)0xcafef00d)
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun struct  vrf_map {
48*4882a593Smuzhiyun 	DECLARE_HASHTABLE(ht, HT_MAP_BITS);
49*4882a593Smuzhiyun 	spinlock_t vmap_lock;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	/* shared_tables:
52*4882a593Smuzhiyun 	 * count how many distinct tables do not comply with the strict mode
53*4882a593Smuzhiyun 	 * requirement.
54*4882a593Smuzhiyun 	 * shared_tables value must be 0 in order to enable the strict mode.
55*4882a593Smuzhiyun 	 *
56*4882a593Smuzhiyun 	 * example of the evolution of shared_tables:
57*4882a593Smuzhiyun 	 *                                                        | time
58*4882a593Smuzhiyun 	 * add  vrf0 --> table 100        shared_tables = 0       | t0
59*4882a593Smuzhiyun 	 * add  vrf1 --> table 101        shared_tables = 0       | t1
60*4882a593Smuzhiyun 	 * add  vrf2 --> table 100        shared_tables = 1       | t2
61*4882a593Smuzhiyun 	 * add  vrf3 --> table 100        shared_tables = 1       | t3
62*4882a593Smuzhiyun 	 * add  vrf4 --> table 101        shared_tables = 2       v t4
63*4882a593Smuzhiyun 	 *
64*4882a593Smuzhiyun 	 * shared_tables is a "step function" (or "staircase function")
65*4882a593Smuzhiyun 	 * and it is increased by one when the second vrf is associated to a
66*4882a593Smuzhiyun 	 * table.
67*4882a593Smuzhiyun 	 *
68*4882a593Smuzhiyun 	 * at t2, vrf0 and vrf2 are bound to table 100: shared_tables = 1.
69*4882a593Smuzhiyun 	 *
70*4882a593Smuzhiyun 	 * at t3, another dev (vrf3) is bound to the same table 100 but the
71*4882a593Smuzhiyun 	 * value of shared_tables is still 1.
72*4882a593Smuzhiyun 	 * This means that no matter how many new vrfs will register on the
73*4882a593Smuzhiyun 	 * table 100, the shared_tables will not increase (considering only
74*4882a593Smuzhiyun 	 * table 100).
75*4882a593Smuzhiyun 	 *
76*4882a593Smuzhiyun 	 * at t4, vrf4 is bound to table 101, and shared_tables = 2.
77*4882a593Smuzhiyun 	 *
78*4882a593Smuzhiyun 	 * Looking at the value of shared_tables we can immediately know if
79*4882a593Smuzhiyun 	 * the strict_mode can or cannot be enforced. Indeed, strict_mode
80*4882a593Smuzhiyun 	 * can be enforced iff shared_tables = 0.
81*4882a593Smuzhiyun 	 *
82*4882a593Smuzhiyun 	 * Conversely, shared_tables is decreased when a vrf is de-associated
83*4882a593Smuzhiyun 	 * from a table with exactly two associated vrfs.
84*4882a593Smuzhiyun 	 */
85*4882a593Smuzhiyun 	u32 shared_tables;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	bool strict_mode;
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun struct vrf_map_elem {
91*4882a593Smuzhiyun 	struct hlist_node hnode;
92*4882a593Smuzhiyun 	struct list_head vrf_list;  /* VRFs registered to this table */
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	u32 table_id;
95*4882a593Smuzhiyun 	int users;
96*4882a593Smuzhiyun 	int ifindex;
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun static unsigned int vrf_net_id;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /* per netns vrf data */
102*4882a593Smuzhiyun struct netns_vrf {
103*4882a593Smuzhiyun 	/* protected by rtnl lock */
104*4882a593Smuzhiyun 	bool add_fib_rules;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	struct vrf_map vmap;
107*4882a593Smuzhiyun 	struct ctl_table_header	*ctl_hdr;
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun struct net_vrf {
111*4882a593Smuzhiyun 	struct rtable __rcu	*rth;
112*4882a593Smuzhiyun 	struct rt6_info	__rcu	*rt6;
113*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
114*4882a593Smuzhiyun 	struct fib6_table	*fib6_table;
115*4882a593Smuzhiyun #endif
116*4882a593Smuzhiyun 	u32                     tb_id;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	struct list_head	me_list;   /* entry in vrf_map_elem */
119*4882a593Smuzhiyun 	int			ifindex;
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun struct pcpu_dstats {
123*4882a593Smuzhiyun 	u64			tx_pkts;
124*4882a593Smuzhiyun 	u64			tx_bytes;
125*4882a593Smuzhiyun 	u64			tx_drps;
126*4882a593Smuzhiyun 	u64			rx_pkts;
127*4882a593Smuzhiyun 	u64			rx_bytes;
128*4882a593Smuzhiyun 	u64			rx_drps;
129*4882a593Smuzhiyun 	struct u64_stats_sync	syncp;
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun 
vrf_rx_stats(struct net_device * dev,int len)132*4882a593Smuzhiyun static void vrf_rx_stats(struct net_device *dev, int len)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	u64_stats_update_begin(&dstats->syncp);
137*4882a593Smuzhiyun 	dstats->rx_pkts++;
138*4882a593Smuzhiyun 	dstats->rx_bytes += len;
139*4882a593Smuzhiyun 	u64_stats_update_end(&dstats->syncp);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
vrf_tx_error(struct net_device * vrf_dev,struct sk_buff * skb)142*4882a593Smuzhiyun static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	vrf_dev->stats.tx_errors++;
145*4882a593Smuzhiyun 	kfree_skb(skb);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
vrf_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)148*4882a593Smuzhiyun static void vrf_get_stats64(struct net_device *dev,
149*4882a593Smuzhiyun 			    struct rtnl_link_stats64 *stats)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	int i;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	for_each_possible_cpu(i) {
154*4882a593Smuzhiyun 		const struct pcpu_dstats *dstats;
155*4882a593Smuzhiyun 		u64 tbytes, tpkts, tdrops, rbytes, rpkts;
156*4882a593Smuzhiyun 		unsigned int start;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		dstats = per_cpu_ptr(dev->dstats, i);
159*4882a593Smuzhiyun 		do {
160*4882a593Smuzhiyun 			start = u64_stats_fetch_begin_irq(&dstats->syncp);
161*4882a593Smuzhiyun 			tbytes = dstats->tx_bytes;
162*4882a593Smuzhiyun 			tpkts = dstats->tx_pkts;
163*4882a593Smuzhiyun 			tdrops = dstats->tx_drps;
164*4882a593Smuzhiyun 			rbytes = dstats->rx_bytes;
165*4882a593Smuzhiyun 			rpkts = dstats->rx_pkts;
166*4882a593Smuzhiyun 		} while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
167*4882a593Smuzhiyun 		stats->tx_bytes += tbytes;
168*4882a593Smuzhiyun 		stats->tx_packets += tpkts;
169*4882a593Smuzhiyun 		stats->tx_dropped += tdrops;
170*4882a593Smuzhiyun 		stats->rx_bytes += rbytes;
171*4882a593Smuzhiyun 		stats->rx_packets += rpkts;
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
netns_vrf_map(struct net * net)175*4882a593Smuzhiyun static struct vrf_map *netns_vrf_map(struct net *net)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	return &nn_vrf->vmap;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
netns_vrf_map_by_dev(struct net_device * dev)182*4882a593Smuzhiyun static struct vrf_map *netns_vrf_map_by_dev(struct net_device *dev)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	return netns_vrf_map(dev_net(dev));
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
vrf_map_elem_get_vrf_ifindex(struct vrf_map_elem * me)187*4882a593Smuzhiyun static int vrf_map_elem_get_vrf_ifindex(struct vrf_map_elem *me)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct list_head *me_head = &me->vrf_list;
190*4882a593Smuzhiyun 	struct net_vrf *vrf;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	if (list_empty(me_head))
193*4882a593Smuzhiyun 		return -ENODEV;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	vrf = list_first_entry(me_head, struct net_vrf, me_list);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	return vrf->ifindex;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
vrf_map_elem_alloc(gfp_t flags)200*4882a593Smuzhiyun static struct vrf_map_elem *vrf_map_elem_alloc(gfp_t flags)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	struct vrf_map_elem *me;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	me = kmalloc(sizeof(*me), flags);
205*4882a593Smuzhiyun 	if (!me)
206*4882a593Smuzhiyun 		return NULL;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	return me;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
vrf_map_elem_free(struct vrf_map_elem * me)211*4882a593Smuzhiyun static void vrf_map_elem_free(struct vrf_map_elem *me)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	kfree(me);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
vrf_map_elem_init(struct vrf_map_elem * me,int table_id,int ifindex,int users)216*4882a593Smuzhiyun static void vrf_map_elem_init(struct vrf_map_elem *me, int table_id,
217*4882a593Smuzhiyun 			      int ifindex, int users)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	me->table_id = table_id;
220*4882a593Smuzhiyun 	me->ifindex = ifindex;
221*4882a593Smuzhiyun 	me->users = users;
222*4882a593Smuzhiyun 	INIT_LIST_HEAD(&me->vrf_list);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
vrf_map_lookup_elem(struct vrf_map * vmap,u32 table_id)225*4882a593Smuzhiyun static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap,
226*4882a593Smuzhiyun 						u32 table_id)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	struct vrf_map_elem *me;
229*4882a593Smuzhiyun 	u32 key;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	key = jhash_1word(table_id, HASH_INITVAL);
232*4882a593Smuzhiyun 	hash_for_each_possible(vmap->ht, me, hnode, key) {
233*4882a593Smuzhiyun 		if (me->table_id == table_id)
234*4882a593Smuzhiyun 			return me;
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	return NULL;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
vrf_map_add_elem(struct vrf_map * vmap,struct vrf_map_elem * me)240*4882a593Smuzhiyun static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	u32 table_id = me->table_id;
243*4882a593Smuzhiyun 	u32 key;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	key = jhash_1word(table_id, HASH_INITVAL);
246*4882a593Smuzhiyun 	hash_add(vmap->ht, &me->hnode, key);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
vrf_map_del_elem(struct vrf_map_elem * me)249*4882a593Smuzhiyun static void vrf_map_del_elem(struct vrf_map_elem *me)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	hash_del(&me->hnode);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
vrf_map_lock(struct vrf_map * vmap)254*4882a593Smuzhiyun static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	spin_lock(&vmap->vmap_lock);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
vrf_map_unlock(struct vrf_map * vmap)259*4882a593Smuzhiyun static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	spin_unlock(&vmap->vmap_lock);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun /* called with rtnl lock held */
265*4882a593Smuzhiyun static int
vrf_map_register_dev(struct net_device * dev,struct netlink_ext_ack * extack)266*4882a593Smuzhiyun vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	struct vrf_map *vmap = netns_vrf_map_by_dev(dev);
269*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(dev);
270*4882a593Smuzhiyun 	struct vrf_map_elem *new_me, *me;
271*4882a593Smuzhiyun 	u32 table_id = vrf->tb_id;
272*4882a593Smuzhiyun 	bool free_new_me = false;
273*4882a593Smuzhiyun 	int users;
274*4882a593Smuzhiyun 	int res;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/* we pre-allocate elements used in the spin-locked section (so that we
277*4882a593Smuzhiyun 	 * keep the spinlock as short as possibile).
278*4882a593Smuzhiyun 	 */
279*4882a593Smuzhiyun 	new_me = vrf_map_elem_alloc(GFP_KERNEL);
280*4882a593Smuzhiyun 	if (!new_me)
281*4882a593Smuzhiyun 		return -ENOMEM;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	vrf_map_elem_init(new_me, table_id, dev->ifindex, 0);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	vrf_map_lock(vmap);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	me = vrf_map_lookup_elem(vmap, table_id);
288*4882a593Smuzhiyun 	if (!me) {
289*4882a593Smuzhiyun 		me = new_me;
290*4882a593Smuzhiyun 		vrf_map_add_elem(vmap, me);
291*4882a593Smuzhiyun 		goto link_vrf;
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	/* we already have an entry in the vrf_map, so it means there is (at
295*4882a593Smuzhiyun 	 * least) a vrf registered on the specific table.
296*4882a593Smuzhiyun 	 */
297*4882a593Smuzhiyun 	free_new_me = true;
298*4882a593Smuzhiyun 	if (vmap->strict_mode) {
299*4882a593Smuzhiyun 		/* vrfs cannot share the same table */
300*4882a593Smuzhiyun 		NL_SET_ERR_MSG(extack, "Table is used by another VRF");
301*4882a593Smuzhiyun 		res = -EBUSY;
302*4882a593Smuzhiyun 		goto unlock;
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun link_vrf:
306*4882a593Smuzhiyun 	users = ++me->users;
307*4882a593Smuzhiyun 	if (users == 2)
308*4882a593Smuzhiyun 		++vmap->shared_tables;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	list_add(&vrf->me_list, &me->vrf_list);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	res = 0;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun unlock:
315*4882a593Smuzhiyun 	vrf_map_unlock(vmap);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/* clean-up, if needed */
318*4882a593Smuzhiyun 	if (free_new_me)
319*4882a593Smuzhiyun 		vrf_map_elem_free(new_me);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	return res;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun /* called with rtnl lock held */
vrf_map_unregister_dev(struct net_device * dev)325*4882a593Smuzhiyun static void vrf_map_unregister_dev(struct net_device *dev)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	struct vrf_map *vmap = netns_vrf_map_by_dev(dev);
328*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(dev);
329*4882a593Smuzhiyun 	u32 table_id = vrf->tb_id;
330*4882a593Smuzhiyun 	struct vrf_map_elem *me;
331*4882a593Smuzhiyun 	int users;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	vrf_map_lock(vmap);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	me = vrf_map_lookup_elem(vmap, table_id);
336*4882a593Smuzhiyun 	if (!me)
337*4882a593Smuzhiyun 		goto unlock;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	list_del(&vrf->me_list);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	users = --me->users;
342*4882a593Smuzhiyun 	if (users == 1) {
343*4882a593Smuzhiyun 		--vmap->shared_tables;
344*4882a593Smuzhiyun 	} else if (users == 0) {
345*4882a593Smuzhiyun 		vrf_map_del_elem(me);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 		/* no one will refer to this element anymore */
348*4882a593Smuzhiyun 		vrf_map_elem_free(me);
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun unlock:
352*4882a593Smuzhiyun 	vrf_map_unlock(vmap);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun /* return the vrf device index associated with the table_id */
vrf_ifindex_lookup_by_table_id(struct net * net,u32 table_id)356*4882a593Smuzhiyun static int vrf_ifindex_lookup_by_table_id(struct net *net, u32 table_id)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	struct vrf_map *vmap = netns_vrf_map(net);
359*4882a593Smuzhiyun 	struct vrf_map_elem *me;
360*4882a593Smuzhiyun 	int ifindex;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	vrf_map_lock(vmap);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	if (!vmap->strict_mode) {
365*4882a593Smuzhiyun 		ifindex = -EPERM;
366*4882a593Smuzhiyun 		goto unlock;
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	me = vrf_map_lookup_elem(vmap, table_id);
370*4882a593Smuzhiyun 	if (!me) {
371*4882a593Smuzhiyun 		ifindex = -ENODEV;
372*4882a593Smuzhiyun 		goto unlock;
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	ifindex = vrf_map_elem_get_vrf_ifindex(me);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun unlock:
378*4882a593Smuzhiyun 	vrf_map_unlock(vmap);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	return ifindex;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun /* by default VRF devices do not have a qdisc and are expected
384*4882a593Smuzhiyun  * to be created with only a single queue.
385*4882a593Smuzhiyun  */
qdisc_tx_is_default(const struct net_device * dev)386*4882a593Smuzhiyun static bool qdisc_tx_is_default(const struct net_device *dev)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	struct netdev_queue *txq;
389*4882a593Smuzhiyun 	struct Qdisc *qdisc;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	if (dev->num_tx_queues > 1)
392*4882a593Smuzhiyun 		return false;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	txq = netdev_get_tx_queue(dev, 0);
395*4882a593Smuzhiyun 	qdisc = rcu_access_pointer(txq->qdisc);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	return !qdisc->enqueue;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun /* Local traffic destined to local address. Reinsert the packet to rx
401*4882a593Smuzhiyun  * path, similar to loopback handling.
402*4882a593Smuzhiyun  */
vrf_local_xmit(struct sk_buff * skb,struct net_device * dev,struct dst_entry * dst)403*4882a593Smuzhiyun static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
404*4882a593Smuzhiyun 			  struct dst_entry *dst)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	int len = skb->len;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	skb_orphan(skb);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	skb_dst_set(skb, dst);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	/* set pkt_type to avoid skb hitting packet taps twice -
413*4882a593Smuzhiyun 	 * once on Tx and again in Rx processing
414*4882a593Smuzhiyun 	 */
415*4882a593Smuzhiyun 	skb->pkt_type = PACKET_LOOPBACK;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, dev);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if (likely(netif_rx(skb) == NET_RX_SUCCESS))
420*4882a593Smuzhiyun 		vrf_rx_stats(dev, len);
421*4882a593Smuzhiyun 	else
422*4882a593Smuzhiyun 		this_cpu_inc(dev->dstats->rx_drps);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	return NETDEV_TX_OK;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
vrf_nf_set_untracked(struct sk_buff * skb)427*4882a593Smuzhiyun static void vrf_nf_set_untracked(struct sk_buff *skb)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	if (skb_get_nfct(skb) == 0)
430*4882a593Smuzhiyun 		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
vrf_nf_reset_ct(struct sk_buff * skb)433*4882a593Smuzhiyun static void vrf_nf_reset_ct(struct sk_buff *skb)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	if (skb_get_nfct(skb) == IP_CT_UNTRACKED)
436*4882a593Smuzhiyun 		nf_reset_ct(skb);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
vrf_ip6_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)440*4882a593Smuzhiyun static int vrf_ip6_local_out(struct net *net, struct sock *sk,
441*4882a593Smuzhiyun 			     struct sk_buff *skb)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	int err;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	vrf_nf_reset_ct(skb);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
448*4882a593Smuzhiyun 		      sk, skb, NULL, skb_dst(skb)->dev, dst_output);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (likely(err == 1))
451*4882a593Smuzhiyun 		err = dst_output(net, sk, skb);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	return err;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun 
vrf_process_v6_outbound(struct sk_buff * skb,struct net_device * dev)456*4882a593Smuzhiyun static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
457*4882a593Smuzhiyun 					   struct net_device *dev)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	const struct ipv6hdr *iph;
460*4882a593Smuzhiyun 	struct net *net = dev_net(skb->dev);
461*4882a593Smuzhiyun 	struct flowi6 fl6;
462*4882a593Smuzhiyun 	int ret = NET_XMIT_DROP;
463*4882a593Smuzhiyun 	struct dst_entry *dst;
464*4882a593Smuzhiyun 	struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
467*4882a593Smuzhiyun 		goto err;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	iph = ipv6_hdr(skb);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	memset(&fl6, 0, sizeof(fl6));
472*4882a593Smuzhiyun 	/* needed to match OIF rule */
473*4882a593Smuzhiyun 	fl6.flowi6_oif = dev->ifindex;
474*4882a593Smuzhiyun 	fl6.flowi6_iif = LOOPBACK_IFINDEX;
475*4882a593Smuzhiyun 	fl6.daddr = iph->daddr;
476*4882a593Smuzhiyun 	fl6.saddr = iph->saddr;
477*4882a593Smuzhiyun 	fl6.flowlabel = ip6_flowinfo(iph);
478*4882a593Smuzhiyun 	fl6.flowi6_mark = skb->mark;
479*4882a593Smuzhiyun 	fl6.flowi6_proto = iph->nexthdr;
480*4882a593Smuzhiyun 	fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
483*4882a593Smuzhiyun 	if (IS_ERR(dst) || dst == dst_null)
484*4882a593Smuzhiyun 		goto err;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	skb_dst_drop(skb);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	/* if dst.dev is loopback or the VRF device again this is locally
489*4882a593Smuzhiyun 	 * originated traffic destined to a local address. Short circuit
490*4882a593Smuzhiyun 	 * to Rx path
491*4882a593Smuzhiyun 	 */
492*4882a593Smuzhiyun 	if (dst->dev == dev)
493*4882a593Smuzhiyun 		return vrf_local_xmit(skb, dev, dst);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	skb_dst_set(skb, dst);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	/* strip the ethernet header added for pass through VRF device */
498*4882a593Smuzhiyun 	__skb_pull(skb, skb_network_offset(skb));
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
501*4882a593Smuzhiyun 	ret = vrf_ip6_local_out(net, skb->sk, skb);
502*4882a593Smuzhiyun 	if (unlikely(net_xmit_eval(ret)))
503*4882a593Smuzhiyun 		dev->stats.tx_errors++;
504*4882a593Smuzhiyun 	else
505*4882a593Smuzhiyun 		ret = NET_XMIT_SUCCESS;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	return ret;
508*4882a593Smuzhiyun err:
509*4882a593Smuzhiyun 	vrf_tx_error(dev, skb);
510*4882a593Smuzhiyun 	return NET_XMIT_DROP;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun #else
vrf_process_v6_outbound(struct sk_buff * skb,struct net_device * dev)513*4882a593Smuzhiyun static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
514*4882a593Smuzhiyun 					   struct net_device *dev)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	vrf_tx_error(dev, skb);
517*4882a593Smuzhiyun 	return NET_XMIT_DROP;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun #endif
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
vrf_ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)522*4882a593Smuzhiyun static int vrf_ip_local_out(struct net *net, struct sock *sk,
523*4882a593Smuzhiyun 			    struct sk_buff *skb)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	int err;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	vrf_nf_reset_ct(skb);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
530*4882a593Smuzhiyun 		      skb, NULL, skb_dst(skb)->dev, dst_output);
531*4882a593Smuzhiyun 	if (likely(err == 1))
532*4882a593Smuzhiyun 		err = dst_output(net, sk, skb);
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	return err;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun 
vrf_process_v4_outbound(struct sk_buff * skb,struct net_device * vrf_dev)537*4882a593Smuzhiyun static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
538*4882a593Smuzhiyun 					   struct net_device *vrf_dev)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	struct iphdr *ip4h;
541*4882a593Smuzhiyun 	int ret = NET_XMIT_DROP;
542*4882a593Smuzhiyun 	struct flowi4 fl4;
543*4882a593Smuzhiyun 	struct net *net = dev_net(vrf_dev);
544*4882a593Smuzhiyun 	struct rtable *rt;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
547*4882a593Smuzhiyun 		goto err;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	ip4h = ip_hdr(skb);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	memset(&fl4, 0, sizeof(fl4));
552*4882a593Smuzhiyun 	/* needed to match OIF rule */
553*4882a593Smuzhiyun 	fl4.flowi4_oif = vrf_dev->ifindex;
554*4882a593Smuzhiyun 	fl4.flowi4_iif = LOOPBACK_IFINDEX;
555*4882a593Smuzhiyun 	fl4.flowi4_tos = RT_TOS(ip4h->tos);
556*4882a593Smuzhiyun 	fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
557*4882a593Smuzhiyun 	fl4.flowi4_proto = ip4h->protocol;
558*4882a593Smuzhiyun 	fl4.daddr = ip4h->daddr;
559*4882a593Smuzhiyun 	fl4.saddr = ip4h->saddr;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	rt = ip_route_output_flow(net, &fl4, NULL);
562*4882a593Smuzhiyun 	if (IS_ERR(rt))
563*4882a593Smuzhiyun 		goto err;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	skb_dst_drop(skb);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	/* if dst.dev is loopback or the VRF device again this is locally
568*4882a593Smuzhiyun 	 * originated traffic destined to a local address. Short circuit
569*4882a593Smuzhiyun 	 * to Rx path
570*4882a593Smuzhiyun 	 */
571*4882a593Smuzhiyun 	if (rt->dst.dev == vrf_dev)
572*4882a593Smuzhiyun 		return vrf_local_xmit(skb, vrf_dev, &rt->dst);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	skb_dst_set(skb, &rt->dst);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	/* strip the ethernet header added for pass through VRF device */
577*4882a593Smuzhiyun 	__skb_pull(skb, skb_network_offset(skb));
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	if (!ip4h->saddr) {
580*4882a593Smuzhiyun 		ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
581*4882a593Smuzhiyun 					       RT_SCOPE_LINK);
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
585*4882a593Smuzhiyun 	ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
586*4882a593Smuzhiyun 	if (unlikely(net_xmit_eval(ret)))
587*4882a593Smuzhiyun 		vrf_dev->stats.tx_errors++;
588*4882a593Smuzhiyun 	else
589*4882a593Smuzhiyun 		ret = NET_XMIT_SUCCESS;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun out:
592*4882a593Smuzhiyun 	return ret;
593*4882a593Smuzhiyun err:
594*4882a593Smuzhiyun 	vrf_tx_error(vrf_dev, skb);
595*4882a593Smuzhiyun 	goto out;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
is_ip_tx_frame(struct sk_buff * skb,struct net_device * dev)598*4882a593Smuzhiyun static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	switch (skb->protocol) {
601*4882a593Smuzhiyun 	case htons(ETH_P_IP):
602*4882a593Smuzhiyun 		return vrf_process_v4_outbound(skb, dev);
603*4882a593Smuzhiyun 	case htons(ETH_P_IPV6):
604*4882a593Smuzhiyun 		return vrf_process_v6_outbound(skb, dev);
605*4882a593Smuzhiyun 	default:
606*4882a593Smuzhiyun 		vrf_tx_error(dev, skb);
607*4882a593Smuzhiyun 		return NET_XMIT_DROP;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun 
vrf_xmit(struct sk_buff * skb,struct net_device * dev)611*4882a593Smuzhiyun static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	int len = skb->len;
614*4882a593Smuzhiyun 	netdev_tx_t ret = is_ip_tx_frame(skb, dev);
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
617*4882a593Smuzhiyun 		struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 		u64_stats_update_begin(&dstats->syncp);
620*4882a593Smuzhiyun 		dstats->tx_pkts++;
621*4882a593Smuzhiyun 		dstats->tx_bytes += len;
622*4882a593Smuzhiyun 		u64_stats_update_end(&dstats->syncp);
623*4882a593Smuzhiyun 	} else {
624*4882a593Smuzhiyun 		this_cpu_inc(dev->dstats->tx_drps);
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	return ret;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun 
vrf_finish_direct(struct sk_buff * skb)630*4882a593Smuzhiyun static void vrf_finish_direct(struct sk_buff *skb)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun 	struct net_device *vrf_dev = skb->dev;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	if (!list_empty(&vrf_dev->ptype_all) &&
635*4882a593Smuzhiyun 	    likely(skb_headroom(skb) >= ETH_HLEN)) {
636*4882a593Smuzhiyun 		struct ethhdr *eth = skb_push(skb, ETH_HLEN);
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 		ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
639*4882a593Smuzhiyun 		eth_zero_addr(eth->h_dest);
640*4882a593Smuzhiyun 		eth->h_proto = skb->protocol;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 		rcu_read_lock_bh();
643*4882a593Smuzhiyun 		dev_queue_xmit_nit(skb, vrf_dev);
644*4882a593Smuzhiyun 		rcu_read_unlock_bh();
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 		skb_pull(skb, ETH_HLEN);
647*4882a593Smuzhiyun 	}
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	vrf_nf_reset_ct(skb);
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
653*4882a593Smuzhiyun /* modelled after ip6_finish_output2 */
vrf_finish_output6(struct net * net,struct sock * sk,struct sk_buff * skb)654*4882a593Smuzhiyun static int vrf_finish_output6(struct net *net, struct sock *sk,
655*4882a593Smuzhiyun 			      struct sk_buff *skb)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	struct dst_entry *dst = skb_dst(skb);
658*4882a593Smuzhiyun 	struct net_device *dev = dst->dev;
659*4882a593Smuzhiyun 	const struct in6_addr *nexthop;
660*4882a593Smuzhiyun 	struct neighbour *neigh;
661*4882a593Smuzhiyun 	int ret;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	vrf_nf_reset_ct(skb);
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	skb->protocol = htons(ETH_P_IPV6);
666*4882a593Smuzhiyun 	skb->dev = dev;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	rcu_read_lock_bh();
669*4882a593Smuzhiyun 	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
670*4882a593Smuzhiyun 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
671*4882a593Smuzhiyun 	if (unlikely(!neigh))
672*4882a593Smuzhiyun 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
673*4882a593Smuzhiyun 	if (!IS_ERR(neigh)) {
674*4882a593Smuzhiyun 		sock_confirm_neigh(skb, neigh);
675*4882a593Smuzhiyun 		ret = neigh_output(neigh, skb, false);
676*4882a593Smuzhiyun 		rcu_read_unlock_bh();
677*4882a593Smuzhiyun 		return ret;
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 	rcu_read_unlock_bh();
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	IP6_INC_STATS(dev_net(dst->dev),
682*4882a593Smuzhiyun 		      ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
683*4882a593Smuzhiyun 	kfree_skb(skb);
684*4882a593Smuzhiyun 	return -EINVAL;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun /* modelled after ip6_output */
vrf_output6(struct net * net,struct sock * sk,struct sk_buff * skb)688*4882a593Smuzhiyun static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
691*4882a593Smuzhiyun 			    net, sk, skb, NULL, skb_dst(skb)->dev,
692*4882a593Smuzhiyun 			    vrf_finish_output6,
693*4882a593Smuzhiyun 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun /* set dst on skb to send packet to us via dev_xmit path. Allows
697*4882a593Smuzhiyun  * packet to go through device based features such as qdisc, netfilter
698*4882a593Smuzhiyun  * hooks and packet sockets with skb->dev set to vrf device.
699*4882a593Smuzhiyun  */
vrf_ip6_out_redirect(struct net_device * vrf_dev,struct sk_buff * skb)700*4882a593Smuzhiyun static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
701*4882a593Smuzhiyun 					    struct sk_buff *skb)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(vrf_dev);
704*4882a593Smuzhiyun 	struct dst_entry *dst = NULL;
705*4882a593Smuzhiyun 	struct rt6_info *rt6;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	rcu_read_lock();
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	rt6 = rcu_dereference(vrf->rt6);
710*4882a593Smuzhiyun 	if (likely(rt6)) {
711*4882a593Smuzhiyun 		dst = &rt6->dst;
712*4882a593Smuzhiyun 		dst_hold(dst);
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	rcu_read_unlock();
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	if (unlikely(!dst)) {
718*4882a593Smuzhiyun 		vrf_tx_error(vrf_dev, skb);
719*4882a593Smuzhiyun 		return NULL;
720*4882a593Smuzhiyun 	}
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	skb_dst_drop(skb);
723*4882a593Smuzhiyun 	skb_dst_set(skb, dst);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	return skb;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun 
vrf_output6_direct_finish(struct net * net,struct sock * sk,struct sk_buff * skb)728*4882a593Smuzhiyun static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
729*4882a593Smuzhiyun 				     struct sk_buff *skb)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun 	vrf_finish_direct(skb);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	return vrf_ip6_local_out(net, sk, skb);
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun 
vrf_output6_direct(struct net * net,struct sock * sk,struct sk_buff * skb)736*4882a593Smuzhiyun static int vrf_output6_direct(struct net *net, struct sock *sk,
737*4882a593Smuzhiyun 			      struct sk_buff *skb)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	int err = 1;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	skb->protocol = htons(ETH_P_IPV6);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	if (!(IPCB(skb)->flags & IPSKB_REROUTED))
744*4882a593Smuzhiyun 		err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
745*4882a593Smuzhiyun 			      NULL, skb->dev, vrf_output6_direct_finish);
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	if (likely(err == 1))
748*4882a593Smuzhiyun 		vrf_finish_direct(skb);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	return err;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun 
vrf_ip6_out_direct_finish(struct net * net,struct sock * sk,struct sk_buff * skb)753*4882a593Smuzhiyun static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
754*4882a593Smuzhiyun 				     struct sk_buff *skb)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun 	int err;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	err = vrf_output6_direct(net, sk, skb);
759*4882a593Smuzhiyun 	if (likely(err == 1))
760*4882a593Smuzhiyun 		err = vrf_ip6_local_out(net, sk, skb);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	return err;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun 
vrf_ip6_out_direct(struct net_device * vrf_dev,struct sock * sk,struct sk_buff * skb)765*4882a593Smuzhiyun static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
766*4882a593Smuzhiyun 					  struct sock *sk,
767*4882a593Smuzhiyun 					  struct sk_buff *skb)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 	struct net *net = dev_net(vrf_dev);
770*4882a593Smuzhiyun 	int err;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	skb->dev = vrf_dev;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
775*4882a593Smuzhiyun 		      skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	if (likely(err == 1))
778*4882a593Smuzhiyun 		err = vrf_output6_direct(net, sk, skb);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	if (likely(err == 1))
781*4882a593Smuzhiyun 		return skb;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	return NULL;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun 
vrf_ip6_out(struct net_device * vrf_dev,struct sock * sk,struct sk_buff * skb)786*4882a593Smuzhiyun static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
787*4882a593Smuzhiyun 				   struct sock *sk,
788*4882a593Smuzhiyun 				   struct sk_buff *skb)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun 	/* don't divert link scope packets */
791*4882a593Smuzhiyun 	if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
792*4882a593Smuzhiyun 		return skb;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	vrf_nf_set_untracked(skb);
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	if (qdisc_tx_is_default(vrf_dev) ||
797*4882a593Smuzhiyun 	    IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
798*4882a593Smuzhiyun 		return vrf_ip6_out_direct(vrf_dev, sk, skb);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	return vrf_ip6_out_redirect(vrf_dev, skb);
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun /* holding rtnl */
vrf_rt6_release(struct net_device * dev,struct net_vrf * vrf)804*4882a593Smuzhiyun static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
807*4882a593Smuzhiyun 	struct net *net = dev_net(dev);
808*4882a593Smuzhiyun 	struct dst_entry *dst;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	RCU_INIT_POINTER(vrf->rt6, NULL);
811*4882a593Smuzhiyun 	synchronize_rcu();
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	/* move dev in dst's to loopback so this VRF device can be deleted
814*4882a593Smuzhiyun 	 * - based on dst_ifdown
815*4882a593Smuzhiyun 	 */
816*4882a593Smuzhiyun 	if (rt6) {
817*4882a593Smuzhiyun 		dst = &rt6->dst;
818*4882a593Smuzhiyun 		dev_put(dst->dev);
819*4882a593Smuzhiyun 		dst->dev = net->loopback_dev;
820*4882a593Smuzhiyun 		dev_hold(dst->dev);
821*4882a593Smuzhiyun 		dst_release(dst);
822*4882a593Smuzhiyun 	}
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun 
vrf_rt6_create(struct net_device * dev)825*4882a593Smuzhiyun static int vrf_rt6_create(struct net_device *dev)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun 	int flags = DST_NOPOLICY | DST_NOXFRM;
828*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(dev);
829*4882a593Smuzhiyun 	struct net *net = dev_net(dev);
830*4882a593Smuzhiyun 	struct rt6_info *rt6;
831*4882a593Smuzhiyun 	int rc = -ENOMEM;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	/* IPv6 can be CONFIG enabled and then disabled runtime */
834*4882a593Smuzhiyun 	if (!ipv6_mod_enabled())
835*4882a593Smuzhiyun 		return 0;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	vrf->fib6_table = fib6_new_table(net, vrf->tb_id);
838*4882a593Smuzhiyun 	if (!vrf->fib6_table)
839*4882a593Smuzhiyun 		goto out;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	/* create a dst for routing packets out a VRF device */
842*4882a593Smuzhiyun 	rt6 = ip6_dst_alloc(net, dev, flags);
843*4882a593Smuzhiyun 	if (!rt6)
844*4882a593Smuzhiyun 		goto out;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	rt6->dst.output	= vrf_output6;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	rcu_assign_pointer(vrf->rt6, rt6);
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	rc = 0;
851*4882a593Smuzhiyun out:
852*4882a593Smuzhiyun 	return rc;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun #else
vrf_ip6_out(struct net_device * vrf_dev,struct sock * sk,struct sk_buff * skb)855*4882a593Smuzhiyun static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
856*4882a593Smuzhiyun 				   struct sock *sk,
857*4882a593Smuzhiyun 				   struct sk_buff *skb)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun 	return skb;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
vrf_rt6_release(struct net_device * dev,struct net_vrf * vrf)862*4882a593Smuzhiyun static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun 
vrf_rt6_create(struct net_device * dev)866*4882a593Smuzhiyun static int vrf_rt6_create(struct net_device *dev)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun 	return 0;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun #endif
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun /* modelled after ip_finish_output2 */
vrf_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)873*4882a593Smuzhiyun static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	struct dst_entry *dst = skb_dst(skb);
876*4882a593Smuzhiyun 	struct rtable *rt = (struct rtable *)dst;
877*4882a593Smuzhiyun 	struct net_device *dev = dst->dev;
878*4882a593Smuzhiyun 	unsigned int hh_len = LL_RESERVED_SPACE(dev);
879*4882a593Smuzhiyun 	struct neighbour *neigh;
880*4882a593Smuzhiyun 	bool is_v6gw = false;
881*4882a593Smuzhiyun 	int ret = -EINVAL;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	vrf_nf_reset_ct(skb);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	/* Be paranoid, rather than too clever. */
886*4882a593Smuzhiyun 	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
887*4882a593Smuzhiyun 		struct sk_buff *skb2;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
890*4882a593Smuzhiyun 		if (!skb2) {
891*4882a593Smuzhiyun 			ret = -ENOMEM;
892*4882a593Smuzhiyun 			goto err;
893*4882a593Smuzhiyun 		}
894*4882a593Smuzhiyun 		if (skb->sk)
895*4882a593Smuzhiyun 			skb_set_owner_w(skb2, skb->sk);
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 		consume_skb(skb);
898*4882a593Smuzhiyun 		skb = skb2;
899*4882a593Smuzhiyun 	}
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	rcu_read_lock_bh();
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
904*4882a593Smuzhiyun 	if (!IS_ERR(neigh)) {
905*4882a593Smuzhiyun 		sock_confirm_neigh(skb, neigh);
906*4882a593Smuzhiyun 		/* if crossing protocols, can not use the cached header */
907*4882a593Smuzhiyun 		ret = neigh_output(neigh, skb, is_v6gw);
908*4882a593Smuzhiyun 		rcu_read_unlock_bh();
909*4882a593Smuzhiyun 		return ret;
910*4882a593Smuzhiyun 	}
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	rcu_read_unlock_bh();
913*4882a593Smuzhiyun err:
914*4882a593Smuzhiyun 	vrf_tx_error(skb->dev, skb);
915*4882a593Smuzhiyun 	return ret;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun 
vrf_output(struct net * net,struct sock * sk,struct sk_buff * skb)918*4882a593Smuzhiyun static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun 	struct net_device *dev = skb_dst(skb)->dev;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	skb->dev = dev;
925*4882a593Smuzhiyun 	skb->protocol = htons(ETH_P_IP);
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
928*4882a593Smuzhiyun 			    net, sk, skb, NULL, dev,
929*4882a593Smuzhiyun 			    vrf_finish_output,
930*4882a593Smuzhiyun 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun /* set dst on skb to send packet to us via dev_xmit path. Allows
934*4882a593Smuzhiyun  * packet to go through device based features such as qdisc, netfilter
935*4882a593Smuzhiyun  * hooks and packet sockets with skb->dev set to vrf device.
936*4882a593Smuzhiyun  */
vrf_ip_out_redirect(struct net_device * vrf_dev,struct sk_buff * skb)937*4882a593Smuzhiyun static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
938*4882a593Smuzhiyun 					   struct sk_buff *skb)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(vrf_dev);
941*4882a593Smuzhiyun 	struct dst_entry *dst = NULL;
942*4882a593Smuzhiyun 	struct rtable *rth;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	rcu_read_lock();
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	rth = rcu_dereference(vrf->rth);
947*4882a593Smuzhiyun 	if (likely(rth)) {
948*4882a593Smuzhiyun 		dst = &rth->dst;
949*4882a593Smuzhiyun 		dst_hold(dst);
950*4882a593Smuzhiyun 	}
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	rcu_read_unlock();
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	if (unlikely(!dst)) {
955*4882a593Smuzhiyun 		vrf_tx_error(vrf_dev, skb);
956*4882a593Smuzhiyun 		return NULL;
957*4882a593Smuzhiyun 	}
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	skb_dst_drop(skb);
960*4882a593Smuzhiyun 	skb_dst_set(skb, dst);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	return skb;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun 
vrf_output_direct_finish(struct net * net,struct sock * sk,struct sk_buff * skb)965*4882a593Smuzhiyun static int vrf_output_direct_finish(struct net *net, struct sock *sk,
966*4882a593Smuzhiyun 				    struct sk_buff *skb)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun 	vrf_finish_direct(skb);
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	return vrf_ip_local_out(net, sk, skb);
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun 
vrf_output_direct(struct net * net,struct sock * sk,struct sk_buff * skb)973*4882a593Smuzhiyun static int vrf_output_direct(struct net *net, struct sock *sk,
974*4882a593Smuzhiyun 			     struct sk_buff *skb)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun 	int err = 1;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	skb->protocol = htons(ETH_P_IP);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	if (!(IPCB(skb)->flags & IPSKB_REROUTED))
981*4882a593Smuzhiyun 		err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
982*4882a593Smuzhiyun 			      NULL, skb->dev, vrf_output_direct_finish);
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	if (likely(err == 1))
985*4882a593Smuzhiyun 		vrf_finish_direct(skb);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	return err;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
vrf_ip_out_direct_finish(struct net * net,struct sock * sk,struct sk_buff * skb)990*4882a593Smuzhiyun static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
991*4882a593Smuzhiyun 				    struct sk_buff *skb)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	int err;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	err = vrf_output_direct(net, sk, skb);
996*4882a593Smuzhiyun 	if (likely(err == 1))
997*4882a593Smuzhiyun 		err = vrf_ip_local_out(net, sk, skb);
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	return err;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun 
vrf_ip_out_direct(struct net_device * vrf_dev,struct sock * sk,struct sk_buff * skb)1002*4882a593Smuzhiyun static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
1003*4882a593Smuzhiyun 					 struct sock *sk,
1004*4882a593Smuzhiyun 					 struct sk_buff *skb)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun 	struct net *net = dev_net(vrf_dev);
1007*4882a593Smuzhiyun 	int err;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	skb->dev = vrf_dev;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
1012*4882a593Smuzhiyun 		      skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	if (likely(err == 1))
1015*4882a593Smuzhiyun 		err = vrf_output_direct(net, sk, skb);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	if (likely(err == 1))
1018*4882a593Smuzhiyun 		return skb;
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	return NULL;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun 
vrf_ip_out(struct net_device * vrf_dev,struct sock * sk,struct sk_buff * skb)1023*4882a593Smuzhiyun static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
1024*4882a593Smuzhiyun 				  struct sock *sk,
1025*4882a593Smuzhiyun 				  struct sk_buff *skb)
1026*4882a593Smuzhiyun {
1027*4882a593Smuzhiyun 	/* don't divert multicast or local broadcast */
1028*4882a593Smuzhiyun 	if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
1029*4882a593Smuzhiyun 	    ipv4_is_lbcast(ip_hdr(skb)->daddr))
1030*4882a593Smuzhiyun 		return skb;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	vrf_nf_set_untracked(skb);
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	if (qdisc_tx_is_default(vrf_dev) ||
1035*4882a593Smuzhiyun 	    IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
1036*4882a593Smuzhiyun 		return vrf_ip_out_direct(vrf_dev, sk, skb);
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	return vrf_ip_out_redirect(vrf_dev, skb);
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun /* called with rcu lock held */
vrf_l3_out(struct net_device * vrf_dev,struct sock * sk,struct sk_buff * skb,u16 proto)1042*4882a593Smuzhiyun static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
1043*4882a593Smuzhiyun 				  struct sock *sk,
1044*4882a593Smuzhiyun 				  struct sk_buff *skb,
1045*4882a593Smuzhiyun 				  u16 proto)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	switch (proto) {
1048*4882a593Smuzhiyun 	case AF_INET:
1049*4882a593Smuzhiyun 		return vrf_ip_out(vrf_dev, sk, skb);
1050*4882a593Smuzhiyun 	case AF_INET6:
1051*4882a593Smuzhiyun 		return vrf_ip6_out(vrf_dev, sk, skb);
1052*4882a593Smuzhiyun 	}
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	return skb;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun /* holding rtnl */
vrf_rtable_release(struct net_device * dev,struct net_vrf * vrf)1058*4882a593Smuzhiyun static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun 	struct rtable *rth = rtnl_dereference(vrf->rth);
1061*4882a593Smuzhiyun 	struct net *net = dev_net(dev);
1062*4882a593Smuzhiyun 	struct dst_entry *dst;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	RCU_INIT_POINTER(vrf->rth, NULL);
1065*4882a593Smuzhiyun 	synchronize_rcu();
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	/* move dev in dst's to loopback so this VRF device can be deleted
1068*4882a593Smuzhiyun 	 * - based on dst_ifdown
1069*4882a593Smuzhiyun 	 */
1070*4882a593Smuzhiyun 	if (rth) {
1071*4882a593Smuzhiyun 		dst = &rth->dst;
1072*4882a593Smuzhiyun 		dev_put(dst->dev);
1073*4882a593Smuzhiyun 		dst->dev = net->loopback_dev;
1074*4882a593Smuzhiyun 		dev_hold(dst->dev);
1075*4882a593Smuzhiyun 		dst_release(dst);
1076*4882a593Smuzhiyun 	}
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun 
vrf_rtable_create(struct net_device * dev)1079*4882a593Smuzhiyun static int vrf_rtable_create(struct net_device *dev)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(dev);
1082*4882a593Smuzhiyun 	struct rtable *rth;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	if (!fib_new_table(dev_net(dev), vrf->tb_id))
1085*4882a593Smuzhiyun 		return -ENOMEM;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	/* create a dst for routing packets out through a VRF device */
1088*4882a593Smuzhiyun 	rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1);
1089*4882a593Smuzhiyun 	if (!rth)
1090*4882a593Smuzhiyun 		return -ENOMEM;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	rth->dst.output	= vrf_output;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	rcu_assign_pointer(vrf->rth, rth);
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	return 0;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun /**************************** device handling ********************/
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun /* cycle interface to flush neighbor cache and move routes across tables */
cycle_netdev(struct net_device * dev,struct netlink_ext_ack * extack)1102*4882a593Smuzhiyun static void cycle_netdev(struct net_device *dev,
1103*4882a593Smuzhiyun 			 struct netlink_ext_ack *extack)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun 	unsigned int flags = dev->flags;
1106*4882a593Smuzhiyun 	int ret;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	if (!netif_running(dev))
1109*4882a593Smuzhiyun 		return;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	ret = dev_change_flags(dev, flags & ~IFF_UP, extack);
1112*4882a593Smuzhiyun 	if (ret >= 0)
1113*4882a593Smuzhiyun 		ret = dev_change_flags(dev, flags, extack);
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	if (ret < 0) {
1116*4882a593Smuzhiyun 		netdev_err(dev,
1117*4882a593Smuzhiyun 			   "Failed to cycle device %s; route tables might be wrong!\n",
1118*4882a593Smuzhiyun 			   dev->name);
1119*4882a593Smuzhiyun 	}
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun 
do_vrf_add_slave(struct net_device * dev,struct net_device * port_dev,struct netlink_ext_ack * extack)1122*4882a593Smuzhiyun static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
1123*4882a593Smuzhiyun 			    struct netlink_ext_ack *extack)
1124*4882a593Smuzhiyun {
1125*4882a593Smuzhiyun 	int ret;
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	/* do not allow loopback device to be enslaved to a VRF.
1128*4882a593Smuzhiyun 	 * The vrf device acts as the loopback for the vrf.
1129*4882a593Smuzhiyun 	 */
1130*4882a593Smuzhiyun 	if (port_dev == dev_net(dev)->loopback_dev) {
1131*4882a593Smuzhiyun 		NL_SET_ERR_MSG(extack,
1132*4882a593Smuzhiyun 			       "Can not enslave loopback device to a VRF");
1133*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1134*4882a593Smuzhiyun 	}
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
1137*4882a593Smuzhiyun 	ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack);
1138*4882a593Smuzhiyun 	if (ret < 0)
1139*4882a593Smuzhiyun 		goto err;
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	cycle_netdev(port_dev, extack);
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	return 0;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun err:
1146*4882a593Smuzhiyun 	port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
1147*4882a593Smuzhiyun 	return ret;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun 
vrf_add_slave(struct net_device * dev,struct net_device * port_dev,struct netlink_ext_ack * extack)1150*4882a593Smuzhiyun static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
1151*4882a593Smuzhiyun 			 struct netlink_ext_ack *extack)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun 	if (netif_is_l3_master(port_dev)) {
1154*4882a593Smuzhiyun 		NL_SET_ERR_MSG(extack,
1155*4882a593Smuzhiyun 			       "Can not enslave an L3 master device to a VRF");
1156*4882a593Smuzhiyun 		return -EINVAL;
1157*4882a593Smuzhiyun 	}
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	if (netif_is_l3_slave(port_dev))
1160*4882a593Smuzhiyun 		return -EINVAL;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	return do_vrf_add_slave(dev, port_dev, extack);
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun /* inverse of do_vrf_add_slave */
do_vrf_del_slave(struct net_device * dev,struct net_device * port_dev)1166*4882a593Smuzhiyun static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun 	netdev_upper_dev_unlink(port_dev, dev);
1169*4882a593Smuzhiyun 	port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	cycle_netdev(port_dev, NULL);
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	return 0;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun 
vrf_del_slave(struct net_device * dev,struct net_device * port_dev)1176*4882a593Smuzhiyun static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun 	return do_vrf_del_slave(dev, port_dev);
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun 
vrf_dev_uninit(struct net_device * dev)1181*4882a593Smuzhiyun static void vrf_dev_uninit(struct net_device *dev)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(dev);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	vrf_rtable_release(dev, vrf);
1186*4882a593Smuzhiyun 	vrf_rt6_release(dev, vrf);
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	free_percpu(dev->dstats);
1189*4882a593Smuzhiyun 	dev->dstats = NULL;
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun 
vrf_dev_init(struct net_device * dev)1192*4882a593Smuzhiyun static int vrf_dev_init(struct net_device *dev)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(dev);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
1197*4882a593Smuzhiyun 	if (!dev->dstats)
1198*4882a593Smuzhiyun 		goto out_nomem;
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	/* create the default dst which points back to us */
1201*4882a593Smuzhiyun 	if (vrf_rtable_create(dev) != 0)
1202*4882a593Smuzhiyun 		goto out_stats;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	if (vrf_rt6_create(dev) != 0)
1205*4882a593Smuzhiyun 		goto out_rth;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	dev->flags = IFF_MASTER | IFF_NOARP;
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	/* similarly, oper state is irrelevant; set to up to avoid confusion */
1210*4882a593Smuzhiyun 	dev->operstate = IF_OPER_UP;
1211*4882a593Smuzhiyun 	netdev_lockdep_set_classes(dev);
1212*4882a593Smuzhiyun 	return 0;
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun out_rth:
1215*4882a593Smuzhiyun 	vrf_rtable_release(dev, vrf);
1216*4882a593Smuzhiyun out_stats:
1217*4882a593Smuzhiyun 	free_percpu(dev->dstats);
1218*4882a593Smuzhiyun 	dev->dstats = NULL;
1219*4882a593Smuzhiyun out_nomem:
1220*4882a593Smuzhiyun 	return -ENOMEM;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun static const struct net_device_ops vrf_netdev_ops = {
1224*4882a593Smuzhiyun 	.ndo_init		= vrf_dev_init,
1225*4882a593Smuzhiyun 	.ndo_uninit		= vrf_dev_uninit,
1226*4882a593Smuzhiyun 	.ndo_start_xmit		= vrf_xmit,
1227*4882a593Smuzhiyun 	.ndo_set_mac_address	= eth_mac_addr,
1228*4882a593Smuzhiyun 	.ndo_get_stats64	= vrf_get_stats64,
1229*4882a593Smuzhiyun 	.ndo_add_slave		= vrf_add_slave,
1230*4882a593Smuzhiyun 	.ndo_del_slave		= vrf_del_slave,
1231*4882a593Smuzhiyun };
1232*4882a593Smuzhiyun 
vrf_fib_table(const struct net_device * dev)1233*4882a593Smuzhiyun static u32 vrf_fib_table(const struct net_device *dev)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(dev);
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	return vrf->tb_id;
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun 
vrf_rcv_finish(struct net * net,struct sock * sk,struct sk_buff * skb)1240*4882a593Smuzhiyun static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun 	kfree_skb(skb);
1243*4882a593Smuzhiyun 	return 0;
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun 
vrf_rcv_nfhook(u8 pf,unsigned int hook,struct sk_buff * skb,struct net_device * dev)1246*4882a593Smuzhiyun static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
1247*4882a593Smuzhiyun 				      struct sk_buff *skb,
1248*4882a593Smuzhiyun 				      struct net_device *dev)
1249*4882a593Smuzhiyun {
1250*4882a593Smuzhiyun 	struct net *net = dev_net(dev);
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
1253*4882a593Smuzhiyun 		skb = NULL;    /* kfree_skb(skb) handled by nf code */
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	return skb;
1256*4882a593Smuzhiyun }
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
1259*4882a593Smuzhiyun /* neighbor handling is done with actual device; do not want
1260*4882a593Smuzhiyun  * to flip skb->dev for those ndisc packets. This really fails
1261*4882a593Smuzhiyun  * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
1262*4882a593Smuzhiyun  * a start.
1263*4882a593Smuzhiyun  */
ipv6_ndisc_frame(const struct sk_buff * skb)1264*4882a593Smuzhiyun static bool ipv6_ndisc_frame(const struct sk_buff *skb)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun 	const struct ipv6hdr *iph = ipv6_hdr(skb);
1267*4882a593Smuzhiyun 	bool rc = false;
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 	if (iph->nexthdr == NEXTHDR_ICMP) {
1270*4882a593Smuzhiyun 		const struct icmp6hdr *icmph;
1271*4882a593Smuzhiyun 		struct icmp6hdr _icmph;
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 		icmph = skb_header_pointer(skb, sizeof(*iph),
1274*4882a593Smuzhiyun 					   sizeof(_icmph), &_icmph);
1275*4882a593Smuzhiyun 		if (!icmph)
1276*4882a593Smuzhiyun 			goto out;
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 		switch (icmph->icmp6_type) {
1279*4882a593Smuzhiyun 		case NDISC_ROUTER_SOLICITATION:
1280*4882a593Smuzhiyun 		case NDISC_ROUTER_ADVERTISEMENT:
1281*4882a593Smuzhiyun 		case NDISC_NEIGHBOUR_SOLICITATION:
1282*4882a593Smuzhiyun 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
1283*4882a593Smuzhiyun 		case NDISC_REDIRECT:
1284*4882a593Smuzhiyun 			rc = true;
1285*4882a593Smuzhiyun 			break;
1286*4882a593Smuzhiyun 		}
1287*4882a593Smuzhiyun 	}
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun out:
1290*4882a593Smuzhiyun 	return rc;
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun 
vrf_ip6_route_lookup(struct net * net,const struct net_device * dev,struct flowi6 * fl6,int ifindex,const struct sk_buff * skb,int flags)1293*4882a593Smuzhiyun static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
1294*4882a593Smuzhiyun 					     const struct net_device *dev,
1295*4882a593Smuzhiyun 					     struct flowi6 *fl6,
1296*4882a593Smuzhiyun 					     int ifindex,
1297*4882a593Smuzhiyun 					     const struct sk_buff *skb,
1298*4882a593Smuzhiyun 					     int flags)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(dev);
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags);
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun 
vrf_ip6_input_dst(struct sk_buff * skb,struct net_device * vrf_dev,int ifindex)1305*4882a593Smuzhiyun static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
1306*4882a593Smuzhiyun 			      int ifindex)
1307*4882a593Smuzhiyun {
1308*4882a593Smuzhiyun 	const struct ipv6hdr *iph = ipv6_hdr(skb);
1309*4882a593Smuzhiyun 	struct flowi6 fl6 = {
1310*4882a593Smuzhiyun 		.flowi6_iif     = ifindex,
1311*4882a593Smuzhiyun 		.flowi6_mark    = skb->mark,
1312*4882a593Smuzhiyun 		.flowi6_proto   = iph->nexthdr,
1313*4882a593Smuzhiyun 		.daddr          = iph->daddr,
1314*4882a593Smuzhiyun 		.saddr          = iph->saddr,
1315*4882a593Smuzhiyun 		.flowlabel      = ip6_flowinfo(iph),
1316*4882a593Smuzhiyun 	};
1317*4882a593Smuzhiyun 	struct net *net = dev_net(vrf_dev);
1318*4882a593Smuzhiyun 	struct rt6_info *rt6;
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
1321*4882a593Smuzhiyun 				   RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
1322*4882a593Smuzhiyun 	if (unlikely(!rt6))
1323*4882a593Smuzhiyun 		return;
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
1326*4882a593Smuzhiyun 		return;
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	skb_dst_set(skb, &rt6->dst);
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun 
vrf_ip6_rcv(struct net_device * vrf_dev,struct sk_buff * skb)1331*4882a593Smuzhiyun static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1332*4882a593Smuzhiyun 				   struct sk_buff *skb)
1333*4882a593Smuzhiyun {
1334*4882a593Smuzhiyun 	int orig_iif = skb->skb_iif;
1335*4882a593Smuzhiyun 	bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
1336*4882a593Smuzhiyun 	bool is_ndisc = ipv6_ndisc_frame(skb);
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	/* loopback, multicast & non-ND link-local traffic; do not push through
1339*4882a593Smuzhiyun 	 * packet taps again. Reset pkt_type for upper layers to process skb.
1340*4882a593Smuzhiyun 	 * For strict packets with a source LLA, determine the dst using the
1341*4882a593Smuzhiyun 	 * original ifindex.
1342*4882a593Smuzhiyun 	 */
1343*4882a593Smuzhiyun 	if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
1344*4882a593Smuzhiyun 		skb->dev = vrf_dev;
1345*4882a593Smuzhiyun 		skb->skb_iif = vrf_dev->ifindex;
1346*4882a593Smuzhiyun 		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 		if (skb->pkt_type == PACKET_LOOPBACK)
1349*4882a593Smuzhiyun 			skb->pkt_type = PACKET_HOST;
1350*4882a593Smuzhiyun 		else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)
1351*4882a593Smuzhiyun 			vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 		goto out;
1354*4882a593Smuzhiyun 	}
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	/* if packet is NDISC then keep the ingress interface */
1357*4882a593Smuzhiyun 	if (!is_ndisc) {
1358*4882a593Smuzhiyun 		vrf_rx_stats(vrf_dev, skb->len);
1359*4882a593Smuzhiyun 		skb->dev = vrf_dev;
1360*4882a593Smuzhiyun 		skb->skb_iif = vrf_dev->ifindex;
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 		if (!list_empty(&vrf_dev->ptype_all)) {
1363*4882a593Smuzhiyun 			skb_push(skb, skb->mac_len);
1364*4882a593Smuzhiyun 			dev_queue_xmit_nit(skb, vrf_dev);
1365*4882a593Smuzhiyun 			skb_pull(skb, skb->mac_len);
1366*4882a593Smuzhiyun 		}
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1369*4882a593Smuzhiyun 	}
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 	if (need_strict)
1372*4882a593Smuzhiyun 		vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
1375*4882a593Smuzhiyun out:
1376*4882a593Smuzhiyun 	return skb;
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun #else
vrf_ip6_rcv(struct net_device * vrf_dev,struct sk_buff * skb)1380*4882a593Smuzhiyun static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1381*4882a593Smuzhiyun 				   struct sk_buff *skb)
1382*4882a593Smuzhiyun {
1383*4882a593Smuzhiyun 	return skb;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun #endif
1386*4882a593Smuzhiyun 
vrf_ip_rcv(struct net_device * vrf_dev,struct sk_buff * skb)1387*4882a593Smuzhiyun static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
1388*4882a593Smuzhiyun 				  struct sk_buff *skb)
1389*4882a593Smuzhiyun {
1390*4882a593Smuzhiyun 	skb->dev = vrf_dev;
1391*4882a593Smuzhiyun 	skb->skb_iif = vrf_dev->ifindex;
1392*4882a593Smuzhiyun 	IPCB(skb)->flags |= IPSKB_L3SLAVE;
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	if (ipv4_is_multicast(ip_hdr(skb)->daddr))
1395*4882a593Smuzhiyun 		goto out;
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 	/* loopback traffic; do not push through packet taps again.
1398*4882a593Smuzhiyun 	 * Reset pkt_type for upper layers to process skb
1399*4882a593Smuzhiyun 	 */
1400*4882a593Smuzhiyun 	if (skb->pkt_type == PACKET_LOOPBACK) {
1401*4882a593Smuzhiyun 		skb->pkt_type = PACKET_HOST;
1402*4882a593Smuzhiyun 		goto out;
1403*4882a593Smuzhiyun 	}
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 	vrf_rx_stats(vrf_dev, skb->len);
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	if (!list_empty(&vrf_dev->ptype_all)) {
1408*4882a593Smuzhiyun 		skb_push(skb, skb->mac_len);
1409*4882a593Smuzhiyun 		dev_queue_xmit_nit(skb, vrf_dev);
1410*4882a593Smuzhiyun 		skb_pull(skb, skb->mac_len);
1411*4882a593Smuzhiyun 	}
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
1414*4882a593Smuzhiyun out:
1415*4882a593Smuzhiyun 	return skb;
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun /* called with rcu lock held */
vrf_l3_rcv(struct net_device * vrf_dev,struct sk_buff * skb,u16 proto)1419*4882a593Smuzhiyun static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
1420*4882a593Smuzhiyun 				  struct sk_buff *skb,
1421*4882a593Smuzhiyun 				  u16 proto)
1422*4882a593Smuzhiyun {
1423*4882a593Smuzhiyun 	switch (proto) {
1424*4882a593Smuzhiyun 	case AF_INET:
1425*4882a593Smuzhiyun 		return vrf_ip_rcv(vrf_dev, skb);
1426*4882a593Smuzhiyun 	case AF_INET6:
1427*4882a593Smuzhiyun 		return vrf_ip6_rcv(vrf_dev, skb);
1428*4882a593Smuzhiyun 	}
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 	return skb;
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
1434*4882a593Smuzhiyun /* send to link-local or multicast address via interface enslaved to
1435*4882a593Smuzhiyun  * VRF device. Force lookup to VRF table without changing flow struct
1436*4882a593Smuzhiyun  * Note: Caller to this function must hold rcu_read_lock() and no refcnt
1437*4882a593Smuzhiyun  * is taken on the dst by this function.
1438*4882a593Smuzhiyun  */
vrf_link_scope_lookup(const struct net_device * dev,struct flowi6 * fl6)1439*4882a593Smuzhiyun static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
1440*4882a593Smuzhiyun 					      struct flowi6 *fl6)
1441*4882a593Smuzhiyun {
1442*4882a593Smuzhiyun 	struct net *net = dev_net(dev);
1443*4882a593Smuzhiyun 	int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_DST_NOREF;
1444*4882a593Smuzhiyun 	struct dst_entry *dst = NULL;
1445*4882a593Smuzhiyun 	struct rt6_info *rt;
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	/* VRF device does not have a link-local address and
1448*4882a593Smuzhiyun 	 * sending packets to link-local or mcast addresses over
1449*4882a593Smuzhiyun 	 * a VRF device does not make sense
1450*4882a593Smuzhiyun 	 */
1451*4882a593Smuzhiyun 	if (fl6->flowi6_oif == dev->ifindex) {
1452*4882a593Smuzhiyun 		dst = &net->ipv6.ip6_null_entry->dst;
1453*4882a593Smuzhiyun 		return dst;
1454*4882a593Smuzhiyun 	}
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 	if (!ipv6_addr_any(&fl6->saddr))
1457*4882a593Smuzhiyun 		flags |= RT6_LOOKUP_F_HAS_SADDR;
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags);
1460*4882a593Smuzhiyun 	if (rt)
1461*4882a593Smuzhiyun 		dst = &rt->dst;
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	return dst;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun #endif
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun static const struct l3mdev_ops vrf_l3mdev_ops = {
1468*4882a593Smuzhiyun 	.l3mdev_fib_table	= vrf_fib_table,
1469*4882a593Smuzhiyun 	.l3mdev_l3_rcv		= vrf_l3_rcv,
1470*4882a593Smuzhiyun 	.l3mdev_l3_out		= vrf_l3_out,
1471*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
1472*4882a593Smuzhiyun 	.l3mdev_link_scope_lookup = vrf_link_scope_lookup,
1473*4882a593Smuzhiyun #endif
1474*4882a593Smuzhiyun };
1475*4882a593Smuzhiyun 
vrf_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1476*4882a593Smuzhiyun static void vrf_get_drvinfo(struct net_device *dev,
1477*4882a593Smuzhiyun 			    struct ethtool_drvinfo *info)
1478*4882a593Smuzhiyun {
1479*4882a593Smuzhiyun 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1480*4882a593Smuzhiyun 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun static const struct ethtool_ops vrf_ethtool_ops = {
1484*4882a593Smuzhiyun 	.get_drvinfo	= vrf_get_drvinfo,
1485*4882a593Smuzhiyun };
1486*4882a593Smuzhiyun 
vrf_fib_rule_nl_size(void)1487*4882a593Smuzhiyun static inline size_t vrf_fib_rule_nl_size(void)
1488*4882a593Smuzhiyun {
1489*4882a593Smuzhiyun 	size_t sz;
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 	sz  = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1492*4882a593Smuzhiyun 	sz += nla_total_size(sizeof(u8));	/* FRA_L3MDEV */
1493*4882a593Smuzhiyun 	sz += nla_total_size(sizeof(u32));	/* FRA_PRIORITY */
1494*4882a593Smuzhiyun 	sz += nla_total_size(sizeof(u8));       /* FRA_PROTOCOL */
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	return sz;
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun 
vrf_fib_rule(const struct net_device * dev,__u8 family,bool add_it)1499*4882a593Smuzhiyun static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1500*4882a593Smuzhiyun {
1501*4882a593Smuzhiyun 	struct fib_rule_hdr *frh;
1502*4882a593Smuzhiyun 	struct nlmsghdr *nlh;
1503*4882a593Smuzhiyun 	struct sk_buff *skb;
1504*4882a593Smuzhiyun 	int err;
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun 	if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) &&
1507*4882a593Smuzhiyun 	    !ipv6_mod_enabled())
1508*4882a593Smuzhiyun 		return 0;
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1511*4882a593Smuzhiyun 	if (!skb)
1512*4882a593Smuzhiyun 		return -ENOMEM;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1515*4882a593Smuzhiyun 	if (!nlh)
1516*4882a593Smuzhiyun 		goto nla_put_failure;
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	/* rule only needs to appear once */
1519*4882a593Smuzhiyun 	nlh->nlmsg_flags |= NLM_F_EXCL;
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 	frh = nlmsg_data(nlh);
1522*4882a593Smuzhiyun 	memset(frh, 0, sizeof(*frh));
1523*4882a593Smuzhiyun 	frh->family = family;
1524*4882a593Smuzhiyun 	frh->action = FR_ACT_TO_TBL;
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun 	if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL))
1527*4882a593Smuzhiyun 		goto nla_put_failure;
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	if (nla_put_u8(skb, FRA_L3MDEV, 1))
1530*4882a593Smuzhiyun 		goto nla_put_failure;
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 	if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1533*4882a593Smuzhiyun 		goto nla_put_failure;
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	nlmsg_end(skb, nlh);
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	/* fib_nl_{new,del}rule handling looks for net from skb->sk */
1538*4882a593Smuzhiyun 	skb->sk = dev_net(dev)->rtnl;
1539*4882a593Smuzhiyun 	if (add_it) {
1540*4882a593Smuzhiyun 		err = fib_nl_newrule(skb, nlh, NULL);
1541*4882a593Smuzhiyun 		if (err == -EEXIST)
1542*4882a593Smuzhiyun 			err = 0;
1543*4882a593Smuzhiyun 	} else {
1544*4882a593Smuzhiyun 		err = fib_nl_delrule(skb, nlh, NULL);
1545*4882a593Smuzhiyun 		if (err == -ENOENT)
1546*4882a593Smuzhiyun 			err = 0;
1547*4882a593Smuzhiyun 	}
1548*4882a593Smuzhiyun 	nlmsg_free(skb);
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	return err;
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun nla_put_failure:
1553*4882a593Smuzhiyun 	nlmsg_free(skb);
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	return -EMSGSIZE;
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun 
vrf_add_fib_rules(const struct net_device * dev)1558*4882a593Smuzhiyun static int vrf_add_fib_rules(const struct net_device *dev)
1559*4882a593Smuzhiyun {
1560*4882a593Smuzhiyun 	int err;
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 	err = vrf_fib_rule(dev, AF_INET,  true);
1563*4882a593Smuzhiyun 	if (err < 0)
1564*4882a593Smuzhiyun 		goto out_err;
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	err = vrf_fib_rule(dev, AF_INET6, true);
1567*4882a593Smuzhiyun 	if (err < 0)
1568*4882a593Smuzhiyun 		goto ipv6_err;
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1571*4882a593Smuzhiyun 	err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true);
1572*4882a593Smuzhiyun 	if (err < 0)
1573*4882a593Smuzhiyun 		goto ipmr_err;
1574*4882a593Smuzhiyun #endif
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1577*4882a593Smuzhiyun 	err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true);
1578*4882a593Smuzhiyun 	if (err < 0)
1579*4882a593Smuzhiyun 		goto ip6mr_err;
1580*4882a593Smuzhiyun #endif
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	return 0;
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1585*4882a593Smuzhiyun ip6mr_err:
1586*4882a593Smuzhiyun 	vrf_fib_rule(dev, RTNL_FAMILY_IPMR,  false);
1587*4882a593Smuzhiyun #endif
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1590*4882a593Smuzhiyun ipmr_err:
1591*4882a593Smuzhiyun 	vrf_fib_rule(dev, AF_INET6,  false);
1592*4882a593Smuzhiyun #endif
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun ipv6_err:
1595*4882a593Smuzhiyun 	vrf_fib_rule(dev, AF_INET,  false);
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun out_err:
1598*4882a593Smuzhiyun 	netdev_err(dev, "Failed to add FIB rules.\n");
1599*4882a593Smuzhiyun 	return err;
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun 
vrf_setup(struct net_device * dev)1602*4882a593Smuzhiyun static void vrf_setup(struct net_device *dev)
1603*4882a593Smuzhiyun {
1604*4882a593Smuzhiyun 	ether_setup(dev);
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	/* Initialize the device structure. */
1607*4882a593Smuzhiyun 	dev->netdev_ops = &vrf_netdev_ops;
1608*4882a593Smuzhiyun 	dev->l3mdev_ops = &vrf_l3mdev_ops;
1609*4882a593Smuzhiyun 	dev->ethtool_ops = &vrf_ethtool_ops;
1610*4882a593Smuzhiyun 	dev->needs_free_netdev = true;
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 	/* Fill in device structure with ethernet-generic values. */
1613*4882a593Smuzhiyun 	eth_hw_addr_random(dev);
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	/* don't acquire vrf device's netif_tx_lock when transmitting */
1616*4882a593Smuzhiyun 	dev->features |= NETIF_F_LLTX;
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	/* don't allow vrf devices to change network namespaces. */
1619*4882a593Smuzhiyun 	dev->features |= NETIF_F_NETNS_LOCAL;
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	/* does not make sense for a VLAN to be added to a vrf device */
1622*4882a593Smuzhiyun 	dev->features   |= NETIF_F_VLAN_CHALLENGED;
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 	/* enable offload features */
1625*4882a593Smuzhiyun 	dev->features   |= NETIF_F_GSO_SOFTWARE;
1626*4882a593Smuzhiyun 	dev->features   |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC;
1627*4882a593Smuzhiyun 	dev->features   |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun 	dev->hw_features = dev->features;
1630*4882a593Smuzhiyun 	dev->hw_enc_features = dev->features;
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 	/* default to no qdisc; user can add if desired */
1633*4882a593Smuzhiyun 	dev->priv_flags |= IFF_NO_QUEUE;
1634*4882a593Smuzhiyun 	dev->priv_flags |= IFF_NO_RX_HANDLER;
1635*4882a593Smuzhiyun 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	/* VRF devices do not care about MTU, but if the MTU is set
1638*4882a593Smuzhiyun 	 * too low then the ipv4 and ipv6 protocols are disabled
1639*4882a593Smuzhiyun 	 * which breaks networking.
1640*4882a593Smuzhiyun 	 */
1641*4882a593Smuzhiyun 	dev->min_mtu = IPV6_MIN_MTU;
1642*4882a593Smuzhiyun 	dev->max_mtu = IP6_MAX_MTU;
1643*4882a593Smuzhiyun 	dev->mtu = dev->max_mtu;
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun 
vrf_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1646*4882a593Smuzhiyun static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
1647*4882a593Smuzhiyun 			struct netlink_ext_ack *extack)
1648*4882a593Smuzhiyun {
1649*4882a593Smuzhiyun 	if (tb[IFLA_ADDRESS]) {
1650*4882a593Smuzhiyun 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1651*4882a593Smuzhiyun 			NL_SET_ERR_MSG(extack, "Invalid hardware address");
1652*4882a593Smuzhiyun 			return -EINVAL;
1653*4882a593Smuzhiyun 		}
1654*4882a593Smuzhiyun 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1655*4882a593Smuzhiyun 			NL_SET_ERR_MSG(extack, "Invalid hardware address");
1656*4882a593Smuzhiyun 			return -EADDRNOTAVAIL;
1657*4882a593Smuzhiyun 		}
1658*4882a593Smuzhiyun 	}
1659*4882a593Smuzhiyun 	return 0;
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun 
vrf_dellink(struct net_device * dev,struct list_head * head)1662*4882a593Smuzhiyun static void vrf_dellink(struct net_device *dev, struct list_head *head)
1663*4882a593Smuzhiyun {
1664*4882a593Smuzhiyun 	struct net_device *port_dev;
1665*4882a593Smuzhiyun 	struct list_head *iter;
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	netdev_for_each_lower_dev(dev, port_dev, iter)
1668*4882a593Smuzhiyun 		vrf_del_slave(dev, port_dev);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	vrf_map_unregister_dev(dev);
1671*4882a593Smuzhiyun 
1672*4882a593Smuzhiyun 	unregister_netdevice_queue(dev, head);
1673*4882a593Smuzhiyun }
1674*4882a593Smuzhiyun 
vrf_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1675*4882a593Smuzhiyun static int vrf_newlink(struct net *src_net, struct net_device *dev,
1676*4882a593Smuzhiyun 		       struct nlattr *tb[], struct nlattr *data[],
1677*4882a593Smuzhiyun 		       struct netlink_ext_ack *extack)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(dev);
1680*4882a593Smuzhiyun 	struct netns_vrf *nn_vrf;
1681*4882a593Smuzhiyun 	bool *add_fib_rules;
1682*4882a593Smuzhiyun 	struct net *net;
1683*4882a593Smuzhiyun 	int err;
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	if (!data || !data[IFLA_VRF_TABLE]) {
1686*4882a593Smuzhiyun 		NL_SET_ERR_MSG(extack, "VRF table id is missing");
1687*4882a593Smuzhiyun 		return -EINVAL;
1688*4882a593Smuzhiyun 	}
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 	vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
1691*4882a593Smuzhiyun 	if (vrf->tb_id == RT_TABLE_UNSPEC) {
1692*4882a593Smuzhiyun 		NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE],
1693*4882a593Smuzhiyun 				    "Invalid VRF table id");
1694*4882a593Smuzhiyun 		return -EINVAL;
1695*4882a593Smuzhiyun 	}
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	dev->priv_flags |= IFF_L3MDEV_MASTER;
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	err = register_netdevice(dev);
1700*4882a593Smuzhiyun 	if (err)
1701*4882a593Smuzhiyun 		goto out;
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	/* mapping between table_id and vrf;
1704*4882a593Smuzhiyun 	 * note: such binding could not be done in the dev init function
1705*4882a593Smuzhiyun 	 * because dev->ifindex id is not available yet.
1706*4882a593Smuzhiyun 	 */
1707*4882a593Smuzhiyun 	vrf->ifindex = dev->ifindex;
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	err = vrf_map_register_dev(dev, extack);
1710*4882a593Smuzhiyun 	if (err) {
1711*4882a593Smuzhiyun 		unregister_netdevice(dev);
1712*4882a593Smuzhiyun 		goto out;
1713*4882a593Smuzhiyun 	}
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	net = dev_net(dev);
1716*4882a593Smuzhiyun 	nn_vrf = net_generic(net, vrf_net_id);
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 	add_fib_rules = &nn_vrf->add_fib_rules;
1719*4882a593Smuzhiyun 	if (*add_fib_rules) {
1720*4882a593Smuzhiyun 		err = vrf_add_fib_rules(dev);
1721*4882a593Smuzhiyun 		if (err) {
1722*4882a593Smuzhiyun 			vrf_map_unregister_dev(dev);
1723*4882a593Smuzhiyun 			unregister_netdevice(dev);
1724*4882a593Smuzhiyun 			goto out;
1725*4882a593Smuzhiyun 		}
1726*4882a593Smuzhiyun 		*add_fib_rules = false;
1727*4882a593Smuzhiyun 	}
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun out:
1730*4882a593Smuzhiyun 	return err;
1731*4882a593Smuzhiyun }
1732*4882a593Smuzhiyun 
vrf_nl_getsize(const struct net_device * dev)1733*4882a593Smuzhiyun static size_t vrf_nl_getsize(const struct net_device *dev)
1734*4882a593Smuzhiyun {
1735*4882a593Smuzhiyun 	return nla_total_size(sizeof(u32));  /* IFLA_VRF_TABLE */
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun 
vrf_fillinfo(struct sk_buff * skb,const struct net_device * dev)1738*4882a593Smuzhiyun static int vrf_fillinfo(struct sk_buff *skb,
1739*4882a593Smuzhiyun 			const struct net_device *dev)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(dev);
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
1744*4882a593Smuzhiyun }
1745*4882a593Smuzhiyun 
vrf_get_slave_size(const struct net_device * bond_dev,const struct net_device * slave_dev)1746*4882a593Smuzhiyun static size_t vrf_get_slave_size(const struct net_device *bond_dev,
1747*4882a593Smuzhiyun 				 const struct net_device *slave_dev)
1748*4882a593Smuzhiyun {
1749*4882a593Smuzhiyun 	return nla_total_size(sizeof(u32));  /* IFLA_VRF_PORT_TABLE */
1750*4882a593Smuzhiyun }
1751*4882a593Smuzhiyun 
vrf_fill_slave_info(struct sk_buff * skb,const struct net_device * vrf_dev,const struct net_device * slave_dev)1752*4882a593Smuzhiyun static int vrf_fill_slave_info(struct sk_buff *skb,
1753*4882a593Smuzhiyun 			       const struct net_device *vrf_dev,
1754*4882a593Smuzhiyun 			       const struct net_device *slave_dev)
1755*4882a593Smuzhiyun {
1756*4882a593Smuzhiyun 	struct net_vrf *vrf = netdev_priv(vrf_dev);
1757*4882a593Smuzhiyun 
1758*4882a593Smuzhiyun 	if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
1759*4882a593Smuzhiyun 		return -EMSGSIZE;
1760*4882a593Smuzhiyun 
1761*4882a593Smuzhiyun 	return 0;
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun 
1764*4882a593Smuzhiyun static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
1765*4882a593Smuzhiyun 	[IFLA_VRF_TABLE] = { .type = NLA_U32 },
1766*4882a593Smuzhiyun };
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun static struct rtnl_link_ops vrf_link_ops __read_mostly = {
1769*4882a593Smuzhiyun 	.kind		= DRV_NAME,
1770*4882a593Smuzhiyun 	.priv_size	= sizeof(struct net_vrf),
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	.get_size	= vrf_nl_getsize,
1773*4882a593Smuzhiyun 	.policy		= vrf_nl_policy,
1774*4882a593Smuzhiyun 	.validate	= vrf_validate,
1775*4882a593Smuzhiyun 	.fill_info	= vrf_fillinfo,
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	.get_slave_size  = vrf_get_slave_size,
1778*4882a593Smuzhiyun 	.fill_slave_info = vrf_fill_slave_info,
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun 	.newlink	= vrf_newlink,
1781*4882a593Smuzhiyun 	.dellink	= vrf_dellink,
1782*4882a593Smuzhiyun 	.setup		= vrf_setup,
1783*4882a593Smuzhiyun 	.maxtype	= IFLA_VRF_MAX,
1784*4882a593Smuzhiyun };
1785*4882a593Smuzhiyun 
vrf_device_event(struct notifier_block * unused,unsigned long event,void * ptr)1786*4882a593Smuzhiyun static int vrf_device_event(struct notifier_block *unused,
1787*4882a593Smuzhiyun 			    unsigned long event, void *ptr)
1788*4882a593Smuzhiyun {
1789*4882a593Smuzhiyun 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	/* only care about unregister events to drop slave references */
1792*4882a593Smuzhiyun 	if (event == NETDEV_UNREGISTER) {
1793*4882a593Smuzhiyun 		struct net_device *vrf_dev;
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 		if (!netif_is_l3_slave(dev))
1796*4882a593Smuzhiyun 			goto out;
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 		vrf_dev = netdev_master_upper_dev_get(dev);
1799*4882a593Smuzhiyun 		vrf_del_slave(vrf_dev, dev);
1800*4882a593Smuzhiyun 	}
1801*4882a593Smuzhiyun out:
1802*4882a593Smuzhiyun 	return NOTIFY_DONE;
1803*4882a593Smuzhiyun }
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun static struct notifier_block vrf_notifier_block __read_mostly = {
1806*4882a593Smuzhiyun 	.notifier_call = vrf_device_event,
1807*4882a593Smuzhiyun };
1808*4882a593Smuzhiyun 
vrf_map_init(struct vrf_map * vmap)1809*4882a593Smuzhiyun static int vrf_map_init(struct vrf_map *vmap)
1810*4882a593Smuzhiyun {
1811*4882a593Smuzhiyun 	spin_lock_init(&vmap->vmap_lock);
1812*4882a593Smuzhiyun 	hash_init(vmap->ht);
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	vmap->strict_mode = false;
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	return 0;
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun #ifdef CONFIG_SYSCTL
vrf_strict_mode(struct vrf_map * vmap)1820*4882a593Smuzhiyun static bool vrf_strict_mode(struct vrf_map *vmap)
1821*4882a593Smuzhiyun {
1822*4882a593Smuzhiyun 	bool strict_mode;
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 	vrf_map_lock(vmap);
1825*4882a593Smuzhiyun 	strict_mode = vmap->strict_mode;
1826*4882a593Smuzhiyun 	vrf_map_unlock(vmap);
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	return strict_mode;
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun 
vrf_strict_mode_change(struct vrf_map * vmap,bool new_mode)1831*4882a593Smuzhiyun static int vrf_strict_mode_change(struct vrf_map *vmap, bool new_mode)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun 	bool *cur_mode;
1834*4882a593Smuzhiyun 	int res = 0;
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 	vrf_map_lock(vmap);
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun 	cur_mode = &vmap->strict_mode;
1839*4882a593Smuzhiyun 	if (*cur_mode == new_mode)
1840*4882a593Smuzhiyun 		goto unlock;
1841*4882a593Smuzhiyun 
1842*4882a593Smuzhiyun 	if (*cur_mode) {
1843*4882a593Smuzhiyun 		/* disable strict mode */
1844*4882a593Smuzhiyun 		*cur_mode = false;
1845*4882a593Smuzhiyun 	} else {
1846*4882a593Smuzhiyun 		if (vmap->shared_tables) {
1847*4882a593Smuzhiyun 			/* we cannot allow strict_mode because there are some
1848*4882a593Smuzhiyun 			 * vrfs that share one or more tables.
1849*4882a593Smuzhiyun 			 */
1850*4882a593Smuzhiyun 			res = -EBUSY;
1851*4882a593Smuzhiyun 			goto unlock;
1852*4882a593Smuzhiyun 		}
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 		/* no tables are shared among vrfs, so we can go back
1855*4882a593Smuzhiyun 		 * to 1:1 association between a vrf with its table.
1856*4882a593Smuzhiyun 		 */
1857*4882a593Smuzhiyun 		*cur_mode = true;
1858*4882a593Smuzhiyun 	}
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun unlock:
1861*4882a593Smuzhiyun 	vrf_map_unlock(vmap);
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 	return res;
1864*4882a593Smuzhiyun }
1865*4882a593Smuzhiyun 
vrf_shared_table_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1866*4882a593Smuzhiyun static int vrf_shared_table_handler(struct ctl_table *table, int write,
1867*4882a593Smuzhiyun 				    void *buffer, size_t *lenp, loff_t *ppos)
1868*4882a593Smuzhiyun {
1869*4882a593Smuzhiyun 	struct net *net = (struct net *)table->extra1;
1870*4882a593Smuzhiyun 	struct vrf_map *vmap = netns_vrf_map(net);
1871*4882a593Smuzhiyun 	int proc_strict_mode = 0;
1872*4882a593Smuzhiyun 	struct ctl_table tmp = {
1873*4882a593Smuzhiyun 		.procname	= table->procname,
1874*4882a593Smuzhiyun 		.data		= &proc_strict_mode,
1875*4882a593Smuzhiyun 		.maxlen		= sizeof(int),
1876*4882a593Smuzhiyun 		.mode		= table->mode,
1877*4882a593Smuzhiyun 		.extra1		= SYSCTL_ZERO,
1878*4882a593Smuzhiyun 		.extra2		= SYSCTL_ONE,
1879*4882a593Smuzhiyun 	};
1880*4882a593Smuzhiyun 	int ret;
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	if (!write)
1883*4882a593Smuzhiyun 		proc_strict_mode = vrf_strict_mode(vmap);
1884*4882a593Smuzhiyun 
1885*4882a593Smuzhiyun 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	if (write && ret == 0)
1888*4882a593Smuzhiyun 		ret = vrf_strict_mode_change(vmap, (bool)proc_strict_mode);
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 	return ret;
1891*4882a593Smuzhiyun }
1892*4882a593Smuzhiyun 
1893*4882a593Smuzhiyun static const struct ctl_table vrf_table[] = {
1894*4882a593Smuzhiyun 	{
1895*4882a593Smuzhiyun 		.procname	= "strict_mode",
1896*4882a593Smuzhiyun 		.data		= NULL,
1897*4882a593Smuzhiyun 		.maxlen		= sizeof(int),
1898*4882a593Smuzhiyun 		.mode		= 0644,
1899*4882a593Smuzhiyun 		.proc_handler	= vrf_shared_table_handler,
1900*4882a593Smuzhiyun 		/* set by the vrf_netns_init */
1901*4882a593Smuzhiyun 		.extra1		= NULL,
1902*4882a593Smuzhiyun 	},
1903*4882a593Smuzhiyun 	{ },
1904*4882a593Smuzhiyun };
1905*4882a593Smuzhiyun 
vrf_netns_init_sysctl(struct net * net,struct netns_vrf * nn_vrf)1906*4882a593Smuzhiyun static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
1907*4882a593Smuzhiyun {
1908*4882a593Smuzhiyun 	struct ctl_table *table;
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 	table = kmemdup(vrf_table, sizeof(vrf_table), GFP_KERNEL);
1911*4882a593Smuzhiyun 	if (!table)
1912*4882a593Smuzhiyun 		return -ENOMEM;
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	/* init the extra1 parameter with the reference to current netns */
1915*4882a593Smuzhiyun 	table[0].extra1 = net;
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 	nn_vrf->ctl_hdr = register_net_sysctl(net, "net/vrf", table);
1918*4882a593Smuzhiyun 	if (!nn_vrf->ctl_hdr) {
1919*4882a593Smuzhiyun 		kfree(table);
1920*4882a593Smuzhiyun 		return -ENOMEM;
1921*4882a593Smuzhiyun 	}
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 	return 0;
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun 
vrf_netns_exit_sysctl(struct net * net)1926*4882a593Smuzhiyun static void vrf_netns_exit_sysctl(struct net *net)
1927*4882a593Smuzhiyun {
1928*4882a593Smuzhiyun 	struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
1929*4882a593Smuzhiyun 	struct ctl_table *table;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	table = nn_vrf->ctl_hdr->ctl_table_arg;
1932*4882a593Smuzhiyun 	unregister_net_sysctl_table(nn_vrf->ctl_hdr);
1933*4882a593Smuzhiyun 	kfree(table);
1934*4882a593Smuzhiyun }
1935*4882a593Smuzhiyun #else
vrf_netns_init_sysctl(struct net * net,struct netns_vrf * nn_vrf)1936*4882a593Smuzhiyun static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
1937*4882a593Smuzhiyun {
1938*4882a593Smuzhiyun 	return 0;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun 
vrf_netns_exit_sysctl(struct net * net)1941*4882a593Smuzhiyun static void vrf_netns_exit_sysctl(struct net *net)
1942*4882a593Smuzhiyun {
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun #endif
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun /* Initialize per network namespace state */
vrf_netns_init(struct net * net)1947*4882a593Smuzhiyun static int __net_init vrf_netns_init(struct net *net)
1948*4882a593Smuzhiyun {
1949*4882a593Smuzhiyun 	struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun 	nn_vrf->add_fib_rules = true;
1952*4882a593Smuzhiyun 	vrf_map_init(&nn_vrf->vmap);
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	return vrf_netns_init_sysctl(net, nn_vrf);
1955*4882a593Smuzhiyun }
1956*4882a593Smuzhiyun 
vrf_netns_exit(struct net * net)1957*4882a593Smuzhiyun static void __net_exit vrf_netns_exit(struct net *net)
1958*4882a593Smuzhiyun {
1959*4882a593Smuzhiyun 	vrf_netns_exit_sysctl(net);
1960*4882a593Smuzhiyun }
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun static struct pernet_operations vrf_net_ops __net_initdata = {
1963*4882a593Smuzhiyun 	.init = vrf_netns_init,
1964*4882a593Smuzhiyun 	.exit = vrf_netns_exit,
1965*4882a593Smuzhiyun 	.id   = &vrf_net_id,
1966*4882a593Smuzhiyun 	.size = sizeof(struct netns_vrf),
1967*4882a593Smuzhiyun };
1968*4882a593Smuzhiyun 
vrf_init_module(void)1969*4882a593Smuzhiyun static int __init vrf_init_module(void)
1970*4882a593Smuzhiyun {
1971*4882a593Smuzhiyun 	int rc;
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun 	register_netdevice_notifier(&vrf_notifier_block);
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	rc = register_pernet_subsys(&vrf_net_ops);
1976*4882a593Smuzhiyun 	if (rc < 0)
1977*4882a593Smuzhiyun 		goto error;
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 	rc = l3mdev_table_lookup_register(L3MDEV_TYPE_VRF,
1980*4882a593Smuzhiyun 					  vrf_ifindex_lookup_by_table_id);
1981*4882a593Smuzhiyun 	if (rc < 0)
1982*4882a593Smuzhiyun 		goto unreg_pernet;
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 	rc = rtnl_link_register(&vrf_link_ops);
1985*4882a593Smuzhiyun 	if (rc < 0)
1986*4882a593Smuzhiyun 		goto table_lookup_unreg;
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	return 0;
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun table_lookup_unreg:
1991*4882a593Smuzhiyun 	l3mdev_table_lookup_unregister(L3MDEV_TYPE_VRF,
1992*4882a593Smuzhiyun 				       vrf_ifindex_lookup_by_table_id);
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun unreg_pernet:
1995*4882a593Smuzhiyun 	unregister_pernet_subsys(&vrf_net_ops);
1996*4882a593Smuzhiyun 
1997*4882a593Smuzhiyun error:
1998*4882a593Smuzhiyun 	unregister_netdevice_notifier(&vrf_notifier_block);
1999*4882a593Smuzhiyun 	return rc;
2000*4882a593Smuzhiyun }
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun module_init(vrf_init_module);
2003*4882a593Smuzhiyun MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
2004*4882a593Smuzhiyun MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
2005*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2006*4882a593Smuzhiyun MODULE_ALIAS_RTNL_LINK(DRV_NAME);
2007*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
2008