xref: /OK3568_Linux_fs/kernel/net/8021q/vlan_core.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/skbuff.h>
3*4882a593Smuzhiyun #include <linux/netdevice.h>
4*4882a593Smuzhiyun #include <linux/if_vlan.h>
5*4882a593Smuzhiyun #include <linux/netpoll.h>
6*4882a593Smuzhiyun #include <linux/export.h>
7*4882a593Smuzhiyun #include "vlan.h"
8*4882a593Smuzhiyun 
vlan_do_receive(struct sk_buff ** skbp)9*4882a593Smuzhiyun bool vlan_do_receive(struct sk_buff **skbp)
10*4882a593Smuzhiyun {
11*4882a593Smuzhiyun 	struct sk_buff *skb = *skbp;
12*4882a593Smuzhiyun 	__be16 vlan_proto = skb->vlan_proto;
13*4882a593Smuzhiyun 	u16 vlan_id = skb_vlan_tag_get_id(skb);
14*4882a593Smuzhiyun 	struct net_device *vlan_dev;
15*4882a593Smuzhiyun 	struct vlan_pcpu_stats *rx_stats;
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 	vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
18*4882a593Smuzhiyun 	if (!vlan_dev)
19*4882a593Smuzhiyun 		return false;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
22*4882a593Smuzhiyun 	if (unlikely(!skb))
23*4882a593Smuzhiyun 		return false;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	if (unlikely(!(vlan_dev->flags & IFF_UP))) {
26*4882a593Smuzhiyun 		kfree_skb(skb);
27*4882a593Smuzhiyun 		*skbp = NULL;
28*4882a593Smuzhiyun 		return false;
29*4882a593Smuzhiyun 	}
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	skb->dev = vlan_dev;
32*4882a593Smuzhiyun 	if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
33*4882a593Smuzhiyun 		/* Our lower layer thinks this is not local, let's make sure.
34*4882a593Smuzhiyun 		 * This allows the VLAN to have a different MAC than the
35*4882a593Smuzhiyun 		 * underlying device, and still route correctly. */
36*4882a593Smuzhiyun 		if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
37*4882a593Smuzhiyun 			skb->pkt_type = PACKET_HOST;
38*4882a593Smuzhiyun 	}
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
41*4882a593Smuzhiyun 	    !netif_is_macvlan_port(vlan_dev) &&
42*4882a593Smuzhiyun 	    !netif_is_bridge_port(vlan_dev)) {
43*4882a593Smuzhiyun 		unsigned int offset = skb->data - skb_mac_header(skb);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 		/*
46*4882a593Smuzhiyun 		 * vlan_insert_tag expect skb->data pointing to mac header.
47*4882a593Smuzhiyun 		 * So change skb->data before calling it and change back to
48*4882a593Smuzhiyun 		 * original position later
49*4882a593Smuzhiyun 		 */
50*4882a593Smuzhiyun 		skb_push(skb, offset);
51*4882a593Smuzhiyun 		skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
52*4882a593Smuzhiyun 						    skb->vlan_tci, skb->mac_len);
53*4882a593Smuzhiyun 		if (!skb)
54*4882a593Smuzhiyun 			return false;
55*4882a593Smuzhiyun 		skb_pull(skb, offset + VLAN_HLEN);
56*4882a593Smuzhiyun 		skb_reset_mac_len(skb);
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
60*4882a593Smuzhiyun 	__vlan_hwaccel_clear_tag(skb);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	u64_stats_update_begin(&rx_stats->syncp);
65*4882a593Smuzhiyun 	rx_stats->rx_packets++;
66*4882a593Smuzhiyun 	rx_stats->rx_bytes += skb->len;
67*4882a593Smuzhiyun 	if (skb->pkt_type == PACKET_MULTICAST)
68*4882a593Smuzhiyun 		rx_stats->rx_multicast++;
69*4882a593Smuzhiyun 	u64_stats_update_end(&rx_stats->syncp);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	return true;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /* Must be invoked with rcu_read_lock. */
__vlan_find_dev_deep_rcu(struct net_device * dev,__be16 vlan_proto,u16 vlan_id)75*4882a593Smuzhiyun struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
76*4882a593Smuzhiyun 					__be16 vlan_proto, u16 vlan_id)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	if (vlan_info) {
81*4882a593Smuzhiyun 		return vlan_group_get_device(&vlan_info->grp,
82*4882a593Smuzhiyun 					     vlan_proto, vlan_id);
83*4882a593Smuzhiyun 	} else {
84*4882a593Smuzhiyun 		/*
85*4882a593Smuzhiyun 		 * Lower devices of master uppers (bonding, team) do not have
86*4882a593Smuzhiyun 		 * grp assigned to themselves. Grp is assigned to upper device
87*4882a593Smuzhiyun 		 * instead.
88*4882a593Smuzhiyun 		 */
89*4882a593Smuzhiyun 		struct net_device *upper_dev;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 		upper_dev = netdev_master_upper_dev_get_rcu(dev);
92*4882a593Smuzhiyun 		if (upper_dev)
93*4882a593Smuzhiyun 			return __vlan_find_dev_deep_rcu(upper_dev,
94*4882a593Smuzhiyun 						    vlan_proto, vlan_id);
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	return NULL;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
100*4882a593Smuzhiyun 
vlan_dev_real_dev(const struct net_device * dev)101*4882a593Smuzhiyun struct net_device *vlan_dev_real_dev(const struct net_device *dev)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct net_device *ret = vlan_dev_priv(dev)->real_dev;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	while (is_vlan_dev(ret))
106*4882a593Smuzhiyun 		ret = vlan_dev_priv(ret)->real_dev;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	return ret;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun EXPORT_SYMBOL(vlan_dev_real_dev);
111*4882a593Smuzhiyun 
vlan_dev_vlan_id(const struct net_device * dev)112*4882a593Smuzhiyun u16 vlan_dev_vlan_id(const struct net_device *dev)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	return vlan_dev_priv(dev)->vlan_id;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun EXPORT_SYMBOL(vlan_dev_vlan_id);
117*4882a593Smuzhiyun 
vlan_dev_vlan_proto(const struct net_device * dev)118*4882a593Smuzhiyun __be16 vlan_dev_vlan_proto(const struct net_device *dev)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	return vlan_dev_priv(dev)->vlan_proto;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun EXPORT_SYMBOL(vlan_dev_vlan_proto);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun  * vlan info and vid list
126*4882a593Smuzhiyun  */
127*4882a593Smuzhiyun 
vlan_group_free(struct vlan_group * grp)128*4882a593Smuzhiyun static void vlan_group_free(struct vlan_group *grp)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	int i, j;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	for (i = 0; i < VLAN_PROTO_NUM; i++)
133*4882a593Smuzhiyun 		for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
134*4882a593Smuzhiyun 			kfree(grp->vlan_devices_arrays[i][j]);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
vlan_info_free(struct vlan_info * vlan_info)137*4882a593Smuzhiyun static void vlan_info_free(struct vlan_info *vlan_info)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	vlan_group_free(&vlan_info->grp);
140*4882a593Smuzhiyun 	kfree(vlan_info);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
vlan_info_rcu_free(struct rcu_head * rcu)143*4882a593Smuzhiyun static void vlan_info_rcu_free(struct rcu_head *rcu)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	vlan_info_free(container_of(rcu, struct vlan_info, rcu));
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
vlan_info_alloc(struct net_device * dev)148*4882a593Smuzhiyun static struct vlan_info *vlan_info_alloc(struct net_device *dev)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	struct vlan_info *vlan_info;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
153*4882a593Smuzhiyun 	if (!vlan_info)
154*4882a593Smuzhiyun 		return NULL;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	vlan_info->real_dev = dev;
157*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vlan_info->vid_list);
158*4882a593Smuzhiyun 	return vlan_info;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun struct vlan_vid_info {
162*4882a593Smuzhiyun 	struct list_head list;
163*4882a593Smuzhiyun 	__be16 proto;
164*4882a593Smuzhiyun 	u16 vid;
165*4882a593Smuzhiyun 	int refcount;
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun 
vlan_hw_filter_capable(const struct net_device * dev,__be16 proto)168*4882a593Smuzhiyun static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	if (proto == htons(ETH_P_8021Q) &&
171*4882a593Smuzhiyun 	    dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
172*4882a593Smuzhiyun 		return true;
173*4882a593Smuzhiyun 	if (proto == htons(ETH_P_8021AD) &&
174*4882a593Smuzhiyun 	    dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
175*4882a593Smuzhiyun 		return true;
176*4882a593Smuzhiyun 	return false;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
vlan_vid_info_get(struct vlan_info * vlan_info,__be16 proto,u16 vid)179*4882a593Smuzhiyun static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
180*4882a593Smuzhiyun 					       __be16 proto, u16 vid)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	struct vlan_vid_info *vid_info;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
185*4882a593Smuzhiyun 		if (vid_info->proto == proto && vid_info->vid == vid)
186*4882a593Smuzhiyun 			return vid_info;
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 	return NULL;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
vlan_vid_info_alloc(__be16 proto,u16 vid)191*4882a593Smuzhiyun static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct vlan_vid_info *vid_info;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
196*4882a593Smuzhiyun 	if (!vid_info)
197*4882a593Smuzhiyun 		return NULL;
198*4882a593Smuzhiyun 	vid_info->proto = proto;
199*4882a593Smuzhiyun 	vid_info->vid = vid;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	return vid_info;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
vlan_add_rx_filter_info(struct net_device * dev,__be16 proto,u16 vid)204*4882a593Smuzhiyun static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	if (!vlan_hw_filter_capable(dev, proto))
207*4882a593Smuzhiyun 		return 0;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (netif_device_present(dev))
210*4882a593Smuzhiyun 		return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
211*4882a593Smuzhiyun 	else
212*4882a593Smuzhiyun 		return -ENODEV;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
vlan_kill_rx_filter_info(struct net_device * dev,__be16 proto,u16 vid)215*4882a593Smuzhiyun static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	if (!vlan_hw_filter_capable(dev, proto))
218*4882a593Smuzhiyun 		return 0;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (netif_device_present(dev))
221*4882a593Smuzhiyun 		return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
222*4882a593Smuzhiyun 	else
223*4882a593Smuzhiyun 		return -ENODEV;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
vlan_for_each(struct net_device * dev,int (* action)(struct net_device * dev,int vid,void * arg),void * arg)226*4882a593Smuzhiyun int vlan_for_each(struct net_device *dev,
227*4882a593Smuzhiyun 		  int (*action)(struct net_device *dev, int vid, void *arg),
228*4882a593Smuzhiyun 		  void *arg)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	struct vlan_vid_info *vid_info;
231*4882a593Smuzhiyun 	struct vlan_info *vlan_info;
232*4882a593Smuzhiyun 	struct net_device *vdev;
233*4882a593Smuzhiyun 	int ret;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	ASSERT_RTNL();
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	vlan_info = rtnl_dereference(dev->vlan_info);
238*4882a593Smuzhiyun 	if (!vlan_info)
239*4882a593Smuzhiyun 		return 0;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
242*4882a593Smuzhiyun 		vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
243*4882a593Smuzhiyun 					     vid_info->vid);
244*4882a593Smuzhiyun 		ret = action(vdev, vid_info->vid, arg);
245*4882a593Smuzhiyun 		if (ret)
246*4882a593Smuzhiyun 			return ret;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	return 0;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun EXPORT_SYMBOL(vlan_for_each);
252*4882a593Smuzhiyun 
vlan_filter_push_vids(struct vlan_info * vlan_info,__be16 proto)253*4882a593Smuzhiyun int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	struct net_device *real_dev = vlan_info->real_dev;
256*4882a593Smuzhiyun 	struct vlan_vid_info *vlan_vid_info;
257*4882a593Smuzhiyun 	int err;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
260*4882a593Smuzhiyun 		if (vlan_vid_info->proto == proto) {
261*4882a593Smuzhiyun 			err = vlan_add_rx_filter_info(real_dev, proto,
262*4882a593Smuzhiyun 						      vlan_vid_info->vid);
263*4882a593Smuzhiyun 			if (err)
264*4882a593Smuzhiyun 				goto unwind;
265*4882a593Smuzhiyun 		}
266*4882a593Smuzhiyun 	}
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return 0;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun unwind:
271*4882a593Smuzhiyun 	list_for_each_entry_continue_reverse(vlan_vid_info,
272*4882a593Smuzhiyun 					     &vlan_info->vid_list, list) {
273*4882a593Smuzhiyun 		if (vlan_vid_info->proto == proto)
274*4882a593Smuzhiyun 			vlan_kill_rx_filter_info(real_dev, proto,
275*4882a593Smuzhiyun 						 vlan_vid_info->vid);
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	return err;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun EXPORT_SYMBOL(vlan_filter_push_vids);
281*4882a593Smuzhiyun 
vlan_filter_drop_vids(struct vlan_info * vlan_info,__be16 proto)282*4882a593Smuzhiyun void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	struct vlan_vid_info *vlan_vid_info;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
287*4882a593Smuzhiyun 		if (vlan_vid_info->proto == proto)
288*4882a593Smuzhiyun 			vlan_kill_rx_filter_info(vlan_info->real_dev,
289*4882a593Smuzhiyun 						 vlan_vid_info->proto,
290*4882a593Smuzhiyun 						 vlan_vid_info->vid);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun EXPORT_SYMBOL(vlan_filter_drop_vids);
293*4882a593Smuzhiyun 
__vlan_vid_add(struct vlan_info * vlan_info,__be16 proto,u16 vid,struct vlan_vid_info ** pvid_info)294*4882a593Smuzhiyun static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
295*4882a593Smuzhiyun 			  struct vlan_vid_info **pvid_info)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct net_device *dev = vlan_info->real_dev;
298*4882a593Smuzhiyun 	struct vlan_vid_info *vid_info;
299*4882a593Smuzhiyun 	int err;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	vid_info = vlan_vid_info_alloc(proto, vid);
302*4882a593Smuzhiyun 	if (!vid_info)
303*4882a593Smuzhiyun 		return -ENOMEM;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	err = vlan_add_rx_filter_info(dev, proto, vid);
306*4882a593Smuzhiyun 	if (err) {
307*4882a593Smuzhiyun 		kfree(vid_info);
308*4882a593Smuzhiyun 		return err;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	list_add(&vid_info->list, &vlan_info->vid_list);
312*4882a593Smuzhiyun 	vlan_info->nr_vids++;
313*4882a593Smuzhiyun 	*pvid_info = vid_info;
314*4882a593Smuzhiyun 	return 0;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
vlan_vid_add(struct net_device * dev,__be16 proto,u16 vid)317*4882a593Smuzhiyun int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	struct vlan_info *vlan_info;
320*4882a593Smuzhiyun 	struct vlan_vid_info *vid_info;
321*4882a593Smuzhiyun 	bool vlan_info_created = false;
322*4882a593Smuzhiyun 	int err;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	ASSERT_RTNL();
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	vlan_info = rtnl_dereference(dev->vlan_info);
327*4882a593Smuzhiyun 	if (!vlan_info) {
328*4882a593Smuzhiyun 		vlan_info = vlan_info_alloc(dev);
329*4882a593Smuzhiyun 		if (!vlan_info)
330*4882a593Smuzhiyun 			return -ENOMEM;
331*4882a593Smuzhiyun 		vlan_info_created = true;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 	vid_info = vlan_vid_info_get(vlan_info, proto, vid);
334*4882a593Smuzhiyun 	if (!vid_info) {
335*4882a593Smuzhiyun 		err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
336*4882a593Smuzhiyun 		if (err)
337*4882a593Smuzhiyun 			goto out_free_vlan_info;
338*4882a593Smuzhiyun 	}
339*4882a593Smuzhiyun 	vid_info->refcount++;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (vlan_info_created)
342*4882a593Smuzhiyun 		rcu_assign_pointer(dev->vlan_info, vlan_info);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	return 0;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun out_free_vlan_info:
347*4882a593Smuzhiyun 	if (vlan_info_created)
348*4882a593Smuzhiyun 		kfree(vlan_info);
349*4882a593Smuzhiyun 	return err;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun EXPORT_SYMBOL(vlan_vid_add);
352*4882a593Smuzhiyun 
__vlan_vid_del(struct vlan_info * vlan_info,struct vlan_vid_info * vid_info)353*4882a593Smuzhiyun static void __vlan_vid_del(struct vlan_info *vlan_info,
354*4882a593Smuzhiyun 			   struct vlan_vid_info *vid_info)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	struct net_device *dev = vlan_info->real_dev;
357*4882a593Smuzhiyun 	__be16 proto = vid_info->proto;
358*4882a593Smuzhiyun 	u16 vid = vid_info->vid;
359*4882a593Smuzhiyun 	int err;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	err = vlan_kill_rx_filter_info(dev, proto, vid);
362*4882a593Smuzhiyun 	if (err && dev->reg_state != NETREG_UNREGISTERING)
363*4882a593Smuzhiyun 		netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	list_del(&vid_info->list);
366*4882a593Smuzhiyun 	kfree(vid_info);
367*4882a593Smuzhiyun 	vlan_info->nr_vids--;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
vlan_vid_del(struct net_device * dev,__be16 proto,u16 vid)370*4882a593Smuzhiyun void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	struct vlan_info *vlan_info;
373*4882a593Smuzhiyun 	struct vlan_vid_info *vid_info;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	ASSERT_RTNL();
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	vlan_info = rtnl_dereference(dev->vlan_info);
378*4882a593Smuzhiyun 	if (!vlan_info)
379*4882a593Smuzhiyun 		return;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	vid_info = vlan_vid_info_get(vlan_info, proto, vid);
382*4882a593Smuzhiyun 	if (!vid_info)
383*4882a593Smuzhiyun 		return;
384*4882a593Smuzhiyun 	vid_info->refcount--;
385*4882a593Smuzhiyun 	if (vid_info->refcount == 0) {
386*4882a593Smuzhiyun 		__vlan_vid_del(vlan_info, vid_info);
387*4882a593Smuzhiyun 		if (vlan_info->nr_vids == 0) {
388*4882a593Smuzhiyun 			RCU_INIT_POINTER(dev->vlan_info, NULL);
389*4882a593Smuzhiyun 			call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
390*4882a593Smuzhiyun 		}
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun EXPORT_SYMBOL(vlan_vid_del);
394*4882a593Smuzhiyun 
vlan_vids_add_by_dev(struct net_device * dev,const struct net_device * by_dev)395*4882a593Smuzhiyun int vlan_vids_add_by_dev(struct net_device *dev,
396*4882a593Smuzhiyun 			 const struct net_device *by_dev)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct vlan_vid_info *vid_info;
399*4882a593Smuzhiyun 	struct vlan_info *vlan_info;
400*4882a593Smuzhiyun 	int err;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	ASSERT_RTNL();
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	vlan_info = rtnl_dereference(by_dev->vlan_info);
405*4882a593Smuzhiyun 	if (!vlan_info)
406*4882a593Smuzhiyun 		return 0;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
409*4882a593Smuzhiyun 		err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
410*4882a593Smuzhiyun 		if (err)
411*4882a593Smuzhiyun 			goto unwind;
412*4882a593Smuzhiyun 	}
413*4882a593Smuzhiyun 	return 0;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun unwind:
416*4882a593Smuzhiyun 	list_for_each_entry_continue_reverse(vid_info,
417*4882a593Smuzhiyun 					     &vlan_info->vid_list,
418*4882a593Smuzhiyun 					     list) {
419*4882a593Smuzhiyun 		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	return err;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun EXPORT_SYMBOL(vlan_vids_add_by_dev);
425*4882a593Smuzhiyun 
vlan_vids_del_by_dev(struct net_device * dev,const struct net_device * by_dev)426*4882a593Smuzhiyun void vlan_vids_del_by_dev(struct net_device *dev,
427*4882a593Smuzhiyun 			  const struct net_device *by_dev)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	struct vlan_vid_info *vid_info;
430*4882a593Smuzhiyun 	struct vlan_info *vlan_info;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	ASSERT_RTNL();
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	vlan_info = rtnl_dereference(by_dev->vlan_info);
435*4882a593Smuzhiyun 	if (!vlan_info)
436*4882a593Smuzhiyun 		return;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	list_for_each_entry(vid_info, &vlan_info->vid_list, list)
439*4882a593Smuzhiyun 		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun EXPORT_SYMBOL(vlan_vids_del_by_dev);
442*4882a593Smuzhiyun 
vlan_uses_dev(const struct net_device * dev)443*4882a593Smuzhiyun bool vlan_uses_dev(const struct net_device *dev)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	struct vlan_info *vlan_info;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	ASSERT_RTNL();
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	vlan_info = rtnl_dereference(dev->vlan_info);
450*4882a593Smuzhiyun 	if (!vlan_info)
451*4882a593Smuzhiyun 		return false;
452*4882a593Smuzhiyun 	return vlan_info->grp.nr_vlan_devs ? true : false;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun EXPORT_SYMBOL(vlan_uses_dev);
455*4882a593Smuzhiyun 
vlan_gro_receive(struct list_head * head,struct sk_buff * skb)456*4882a593Smuzhiyun static struct sk_buff *vlan_gro_receive(struct list_head *head,
457*4882a593Smuzhiyun 					struct sk_buff *skb)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	const struct packet_offload *ptype;
460*4882a593Smuzhiyun 	unsigned int hlen, off_vlan;
461*4882a593Smuzhiyun 	struct sk_buff *pp = NULL;
462*4882a593Smuzhiyun 	struct vlan_hdr *vhdr;
463*4882a593Smuzhiyun 	struct sk_buff *p;
464*4882a593Smuzhiyun 	__be16 type;
465*4882a593Smuzhiyun 	int flush = 1;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	off_vlan = skb_gro_offset(skb);
468*4882a593Smuzhiyun 	hlen = off_vlan + sizeof(*vhdr);
469*4882a593Smuzhiyun 	vhdr = skb_gro_header_fast(skb, off_vlan);
470*4882a593Smuzhiyun 	if (skb_gro_header_hard(skb, hlen)) {
471*4882a593Smuzhiyun 		vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
472*4882a593Smuzhiyun 		if (unlikely(!vhdr))
473*4882a593Smuzhiyun 			goto out;
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	type = vhdr->h_vlan_encapsulated_proto;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	rcu_read_lock();
479*4882a593Smuzhiyun 	ptype = gro_find_receive_by_type(type);
480*4882a593Smuzhiyun 	if (!ptype)
481*4882a593Smuzhiyun 		goto out_unlock;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	flush = 0;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	list_for_each_entry(p, head, list) {
486*4882a593Smuzhiyun 		struct vlan_hdr *vhdr2;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 		if (!NAPI_GRO_CB(p)->same_flow)
489*4882a593Smuzhiyun 			continue;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
492*4882a593Smuzhiyun 		if (compare_vlan_header(vhdr, vhdr2))
493*4882a593Smuzhiyun 			NAPI_GRO_CB(p)->same_flow = 0;
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	skb_gro_pull(skb, sizeof(*vhdr));
497*4882a593Smuzhiyun 	skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
498*4882a593Smuzhiyun 	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun out_unlock:
501*4882a593Smuzhiyun 	rcu_read_unlock();
502*4882a593Smuzhiyun out:
503*4882a593Smuzhiyun 	skb_gro_flush_final(skb, pp, flush);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	return pp;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun 
vlan_gro_complete(struct sk_buff * skb,int nhoff)508*4882a593Smuzhiyun static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
511*4882a593Smuzhiyun 	__be16 type = vhdr->h_vlan_encapsulated_proto;
512*4882a593Smuzhiyun 	struct packet_offload *ptype;
513*4882a593Smuzhiyun 	int err = -ENOENT;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	rcu_read_lock();
516*4882a593Smuzhiyun 	ptype = gro_find_complete_by_type(type);
517*4882a593Smuzhiyun 	if (ptype)
518*4882a593Smuzhiyun 		err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	rcu_read_unlock();
521*4882a593Smuzhiyun 	return err;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun static struct packet_offload vlan_packet_offloads[] __read_mostly = {
525*4882a593Smuzhiyun 	{
526*4882a593Smuzhiyun 		.type = cpu_to_be16(ETH_P_8021Q),
527*4882a593Smuzhiyun 		.priority = 10,
528*4882a593Smuzhiyun 		.callbacks = {
529*4882a593Smuzhiyun 			.gro_receive = vlan_gro_receive,
530*4882a593Smuzhiyun 			.gro_complete = vlan_gro_complete,
531*4882a593Smuzhiyun 		},
532*4882a593Smuzhiyun 	},
533*4882a593Smuzhiyun 	{
534*4882a593Smuzhiyun 		.type = cpu_to_be16(ETH_P_8021AD),
535*4882a593Smuzhiyun 		.priority = 10,
536*4882a593Smuzhiyun 		.callbacks = {
537*4882a593Smuzhiyun 			.gro_receive = vlan_gro_receive,
538*4882a593Smuzhiyun 			.gro_complete = vlan_gro_complete,
539*4882a593Smuzhiyun 		},
540*4882a593Smuzhiyun 	},
541*4882a593Smuzhiyun };
542*4882a593Smuzhiyun 
vlan_offload_init(void)543*4882a593Smuzhiyun static int __init vlan_offload_init(void)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	unsigned int i;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
548*4882a593Smuzhiyun 		dev_add_offload(&vlan_packet_offloads[i]);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	return 0;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun fs_initcall(vlan_offload_init);
554