xref: /OK3568_Linux_fs/kernel/include/linux/if_vlan.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * VLAN		An implementation of 802.1Q VLAN tagging.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Authors:	Ben Greear <greearb@candelatech.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #ifndef _LINUX_IF_VLAN_H_
8*4882a593Smuzhiyun #define _LINUX_IF_VLAN_H_
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/netdevice.h>
11*4882a593Smuzhiyun #include <linux/etherdevice.h>
12*4882a593Smuzhiyun #include <linux/rtnetlink.h>
13*4882a593Smuzhiyun #include <linux/bug.h>
14*4882a593Smuzhiyun #include <uapi/linux/if_vlan.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define VLAN_HLEN	4		/* The additional bytes required by VLAN
17*4882a593Smuzhiyun 					 * (in addition to the Ethernet header)
18*4882a593Smuzhiyun 					 */
19*4882a593Smuzhiyun #define VLAN_ETH_HLEN	18		/* Total octets in header.	 */
20*4882a593Smuzhiyun #define VLAN_ETH_ZLEN	64		/* Min. octets in frame sans FCS */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun #define VLAN_ETH_DATA_LEN	1500	/* Max. octets in payload	 */
26*4882a593Smuzhiyun #define VLAN_ETH_FRAME_LEN	1518	/* Max. octets in frame sans FCS */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define VLAN_MAX_DEPTH	8		/* Max. number of nested VLAN tags parsed */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun  * 	struct vlan_hdr - vlan header
32*4882a593Smuzhiyun  * 	@h_vlan_TCI: priority and VLAN ID
33*4882a593Smuzhiyun  *	@h_vlan_encapsulated_proto: packet type ID or len
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun struct vlan_hdr {
36*4882a593Smuzhiyun 	__be16	h_vlan_TCI;
37*4882a593Smuzhiyun 	__be16	h_vlan_encapsulated_proto;
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /**
41*4882a593Smuzhiyun  *	struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
42*4882a593Smuzhiyun  *	@h_dest: destination ethernet address
43*4882a593Smuzhiyun  *	@h_source: source ethernet address
44*4882a593Smuzhiyun  *	@h_vlan_proto: ethernet protocol
45*4882a593Smuzhiyun  *	@h_vlan_TCI: priority and VLAN ID
46*4882a593Smuzhiyun  *	@h_vlan_encapsulated_proto: packet type ID or len
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun struct vlan_ethhdr {
49*4882a593Smuzhiyun 	unsigned char	h_dest[ETH_ALEN];
50*4882a593Smuzhiyun 	unsigned char	h_source[ETH_ALEN];
51*4882a593Smuzhiyun 	__be16		h_vlan_proto;
52*4882a593Smuzhiyun 	__be16		h_vlan_TCI;
53*4882a593Smuzhiyun 	__be16		h_vlan_encapsulated_proto;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #include <linux/skbuff.h>
57*4882a593Smuzhiyun 
vlan_eth_hdr(const struct sk_buff * skb)58*4882a593Smuzhiyun static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	return (struct vlan_ethhdr *)skb_mac_header(skb);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #define VLAN_PRIO_MASK		0xe000 /* Priority Code Point */
64*4882a593Smuzhiyun #define VLAN_PRIO_SHIFT		13
65*4882a593Smuzhiyun #define VLAN_CFI_MASK		0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
66*4882a593Smuzhiyun #define VLAN_VID_MASK		0x0fff /* VLAN Identifier */
67*4882a593Smuzhiyun #define VLAN_N_VID		4096
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* found in socket.c */
70*4882a593Smuzhiyun extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
71*4882a593Smuzhiyun 
is_vlan_dev(const struct net_device * dev)72*4882a593Smuzhiyun static inline bool is_vlan_dev(const struct net_device *dev)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun         return dev->priv_flags & IFF_802_1Q_VLAN;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #define skb_vlan_tag_present(__skb)	((__skb)->vlan_present)
78*4882a593Smuzhiyun #define skb_vlan_tag_get(__skb)		((__skb)->vlan_tci)
79*4882a593Smuzhiyun #define skb_vlan_tag_get_id(__skb)	((__skb)->vlan_tci & VLAN_VID_MASK)
80*4882a593Smuzhiyun #define skb_vlan_tag_get_cfi(__skb)	(!!((__skb)->vlan_tci & VLAN_CFI_MASK))
81*4882a593Smuzhiyun #define skb_vlan_tag_get_prio(__skb)	(((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
82*4882a593Smuzhiyun 
vlan_get_rx_ctag_filter_info(struct net_device * dev)83*4882a593Smuzhiyun static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	ASSERT_RTNL();
86*4882a593Smuzhiyun 	return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev));
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
vlan_drop_rx_ctag_filter_info(struct net_device * dev)89*4882a593Smuzhiyun static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	ASSERT_RTNL();
92*4882a593Smuzhiyun 	call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
vlan_get_rx_stag_filter_info(struct net_device * dev)95*4882a593Smuzhiyun static inline int vlan_get_rx_stag_filter_info(struct net_device *dev)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	ASSERT_RTNL();
98*4882a593Smuzhiyun 	return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev));
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
vlan_drop_rx_stag_filter_info(struct net_device * dev)101*4882a593Smuzhiyun static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	ASSERT_RTNL();
104*4882a593Smuzhiyun 	call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun  *	struct vlan_pcpu_stats - VLAN percpu rx/tx stats
109*4882a593Smuzhiyun  *	@rx_packets: number of received packets
110*4882a593Smuzhiyun  *	@rx_bytes: number of received bytes
111*4882a593Smuzhiyun  *	@rx_multicast: number of received multicast packets
112*4882a593Smuzhiyun  *	@tx_packets: number of transmitted packets
113*4882a593Smuzhiyun  *	@tx_bytes: number of transmitted bytes
114*4882a593Smuzhiyun  *	@syncp: synchronization point for 64bit counters
115*4882a593Smuzhiyun  *	@rx_errors: number of rx errors
116*4882a593Smuzhiyun  *	@tx_dropped: number of tx drops
117*4882a593Smuzhiyun  */
118*4882a593Smuzhiyun struct vlan_pcpu_stats {
119*4882a593Smuzhiyun 	u64			rx_packets;
120*4882a593Smuzhiyun 	u64			rx_bytes;
121*4882a593Smuzhiyun 	u64			rx_multicast;
122*4882a593Smuzhiyun 	u64			tx_packets;
123*4882a593Smuzhiyun 	u64			tx_bytes;
124*4882a593Smuzhiyun 	struct u64_stats_sync	syncp;
125*4882a593Smuzhiyun 	u32			rx_errors;
126*4882a593Smuzhiyun 	u32			tx_dropped;
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
132*4882a593Smuzhiyun 					       __be16 vlan_proto, u16 vlan_id);
133*4882a593Smuzhiyun extern int vlan_for_each(struct net_device *dev,
134*4882a593Smuzhiyun 			 int (*action)(struct net_device *dev, int vid,
135*4882a593Smuzhiyun 				       void *arg), void *arg);
136*4882a593Smuzhiyun extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
137*4882a593Smuzhiyun extern u16 vlan_dev_vlan_id(const struct net_device *dev);
138*4882a593Smuzhiyun extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /**
141*4882a593Smuzhiyun  *	struct vlan_priority_tci_mapping - vlan egress priority mappings
142*4882a593Smuzhiyun  *	@priority: skb priority
143*4882a593Smuzhiyun  *	@vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
144*4882a593Smuzhiyun  *	@next: pointer to next struct
145*4882a593Smuzhiyun  */
146*4882a593Smuzhiyun struct vlan_priority_tci_mapping {
147*4882a593Smuzhiyun 	u32					priority;
148*4882a593Smuzhiyun 	u16					vlan_qos;
149*4882a593Smuzhiyun 	struct vlan_priority_tci_mapping	*next;
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun struct proc_dir_entry;
153*4882a593Smuzhiyun struct netpoll;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun  *	struct vlan_dev_priv - VLAN private device data
157*4882a593Smuzhiyun  *	@nr_ingress_mappings: number of ingress priority mappings
158*4882a593Smuzhiyun  *	@ingress_priority_map: ingress priority mappings
159*4882a593Smuzhiyun  *	@nr_egress_mappings: number of egress priority mappings
160*4882a593Smuzhiyun  *	@egress_priority_map: hash of egress priority mappings
161*4882a593Smuzhiyun  *	@vlan_proto: VLAN encapsulation protocol
162*4882a593Smuzhiyun  *	@vlan_id: VLAN identifier
163*4882a593Smuzhiyun  *	@flags: device flags
164*4882a593Smuzhiyun  *	@real_dev: underlying netdevice
165*4882a593Smuzhiyun  *	@real_dev_addr: address of underlying netdevice
166*4882a593Smuzhiyun  *	@dent: proc dir entry
167*4882a593Smuzhiyun  *	@vlan_pcpu_stats: ptr to percpu rx stats
168*4882a593Smuzhiyun  */
169*4882a593Smuzhiyun struct vlan_dev_priv {
170*4882a593Smuzhiyun 	unsigned int				nr_ingress_mappings;
171*4882a593Smuzhiyun 	u32					ingress_priority_map[8];
172*4882a593Smuzhiyun 	unsigned int				nr_egress_mappings;
173*4882a593Smuzhiyun 	struct vlan_priority_tci_mapping	*egress_priority_map[16];
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	__be16					vlan_proto;
176*4882a593Smuzhiyun 	u16					vlan_id;
177*4882a593Smuzhiyun 	u16					flags;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	struct net_device			*real_dev;
180*4882a593Smuzhiyun 	unsigned char				real_dev_addr[ETH_ALEN];
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	struct proc_dir_entry			*dent;
183*4882a593Smuzhiyun 	struct vlan_pcpu_stats __percpu		*vlan_pcpu_stats;
184*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
185*4882a593Smuzhiyun 	struct netpoll				*netpoll;
186*4882a593Smuzhiyun #endif
187*4882a593Smuzhiyun };
188*4882a593Smuzhiyun 
vlan_dev_priv(const struct net_device * dev)189*4882a593Smuzhiyun static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	return netdev_priv(dev);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun static inline u16
vlan_dev_get_egress_qos_mask(struct net_device * dev,u32 skprio)195*4882a593Smuzhiyun vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	struct vlan_priority_tci_mapping *mp;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
202*4882a593Smuzhiyun 	while (mp) {
203*4882a593Smuzhiyun 		if (mp->priority == skprio) {
204*4882a593Smuzhiyun 			return mp->vlan_qos; /* This should already be shifted
205*4882a593Smuzhiyun 					      * to mask correctly with the
206*4882a593Smuzhiyun 					      * VLAN's TCI */
207*4882a593Smuzhiyun 		}
208*4882a593Smuzhiyun 		mp = mp->next;
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 	return 0;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun extern bool vlan_do_receive(struct sk_buff **skb);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
216*4882a593Smuzhiyun extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun extern int vlan_vids_add_by_dev(struct net_device *dev,
219*4882a593Smuzhiyun 				const struct net_device *by_dev);
220*4882a593Smuzhiyun extern void vlan_vids_del_by_dev(struct net_device *dev,
221*4882a593Smuzhiyun 				 const struct net_device *by_dev);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun extern bool vlan_uses_dev(const struct net_device *dev);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun #else
226*4882a593Smuzhiyun static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device * real_dev,__be16 vlan_proto,u16 vlan_id)227*4882a593Smuzhiyun __vlan_find_dev_deep_rcu(struct net_device *real_dev,
228*4882a593Smuzhiyun 		     __be16 vlan_proto, u16 vlan_id)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	return NULL;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun static inline int
vlan_for_each(struct net_device * dev,int (* action)(struct net_device * dev,int vid,void * arg),void * arg)234*4882a593Smuzhiyun vlan_for_each(struct net_device *dev,
235*4882a593Smuzhiyun 	      int (*action)(struct net_device *dev, int vid, void *arg),
236*4882a593Smuzhiyun 	      void *arg)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	return 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
vlan_dev_real_dev(const struct net_device * dev)241*4882a593Smuzhiyun static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	BUG();
244*4882a593Smuzhiyun 	return NULL;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
vlan_dev_vlan_id(const struct net_device * dev)247*4882a593Smuzhiyun static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	BUG();
250*4882a593Smuzhiyun 	return 0;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
vlan_dev_vlan_proto(const struct net_device * dev)253*4882a593Smuzhiyun static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	BUG();
256*4882a593Smuzhiyun 	return 0;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
vlan_dev_get_egress_qos_mask(struct net_device * dev,u32 skprio)259*4882a593Smuzhiyun static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
260*4882a593Smuzhiyun 					       u32 skprio)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	return 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
vlan_do_receive(struct sk_buff ** skb)265*4882a593Smuzhiyun static inline bool vlan_do_receive(struct sk_buff **skb)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	return false;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
vlan_vid_add(struct net_device * dev,__be16 proto,u16 vid)270*4882a593Smuzhiyun static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	return 0;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
vlan_vid_del(struct net_device * dev,__be16 proto,u16 vid)275*4882a593Smuzhiyun static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
vlan_vids_add_by_dev(struct net_device * dev,const struct net_device * by_dev)279*4882a593Smuzhiyun static inline int vlan_vids_add_by_dev(struct net_device *dev,
280*4882a593Smuzhiyun 				       const struct net_device *by_dev)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
vlan_vids_del_by_dev(struct net_device * dev,const struct net_device * by_dev)285*4882a593Smuzhiyun static inline void vlan_vids_del_by_dev(struct net_device *dev,
286*4882a593Smuzhiyun 					const struct net_device *by_dev)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
vlan_uses_dev(const struct net_device * dev)290*4882a593Smuzhiyun static inline bool vlan_uses_dev(const struct net_device *dev)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	return false;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun #endif
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun /**
297*4882a593Smuzhiyun  * eth_type_vlan - check for valid vlan ether type.
298*4882a593Smuzhiyun  * @ethertype: ether type to check
299*4882a593Smuzhiyun  *
300*4882a593Smuzhiyun  * Returns true if the ether type is a vlan ether type.
301*4882a593Smuzhiyun  */
eth_type_vlan(__be16 ethertype)302*4882a593Smuzhiyun static inline bool eth_type_vlan(__be16 ethertype)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	switch (ethertype) {
305*4882a593Smuzhiyun 	case htons(ETH_P_8021Q):
306*4882a593Smuzhiyun 	case htons(ETH_P_8021AD):
307*4882a593Smuzhiyun 		return true;
308*4882a593Smuzhiyun 	default:
309*4882a593Smuzhiyun 		return false;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
vlan_hw_offload_capable(netdev_features_t features,__be16 proto)313*4882a593Smuzhiyun static inline bool vlan_hw_offload_capable(netdev_features_t features,
314*4882a593Smuzhiyun 					   __be16 proto)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
317*4882a593Smuzhiyun 		return true;
318*4882a593Smuzhiyun 	if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX)
319*4882a593Smuzhiyun 		return true;
320*4882a593Smuzhiyun 	return false;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun /**
324*4882a593Smuzhiyun  * __vlan_insert_inner_tag - inner VLAN tag inserting
325*4882a593Smuzhiyun  * @skb: skbuff to tag
326*4882a593Smuzhiyun  * @vlan_proto: VLAN encapsulation protocol
327*4882a593Smuzhiyun  * @vlan_tci: VLAN TCI to insert
328*4882a593Smuzhiyun  * @mac_len: MAC header length including outer vlan headers
329*4882a593Smuzhiyun  *
330*4882a593Smuzhiyun  * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
331*4882a593Smuzhiyun  * Returns error if skb_cow_head fails.
332*4882a593Smuzhiyun  *
333*4882a593Smuzhiyun  * Does not change skb->protocol so this function can be used during receive.
334*4882a593Smuzhiyun  */
__vlan_insert_inner_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci,unsigned int mac_len)335*4882a593Smuzhiyun static inline int __vlan_insert_inner_tag(struct sk_buff *skb,
336*4882a593Smuzhiyun 					  __be16 vlan_proto, u16 vlan_tci,
337*4882a593Smuzhiyun 					  unsigned int mac_len)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	struct vlan_ethhdr *veth;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (skb_cow_head(skb, VLAN_HLEN) < 0)
342*4882a593Smuzhiyun 		return -ENOMEM;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	skb_push(skb, VLAN_HLEN);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	/* Move the mac header sans proto to the beginning of the new header. */
347*4882a593Smuzhiyun 	if (likely(mac_len > ETH_TLEN))
348*4882a593Smuzhiyun 		memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN);
349*4882a593Smuzhiyun 	skb->mac_header -= VLAN_HLEN;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* first, the ethernet type */
354*4882a593Smuzhiyun 	if (likely(mac_len >= ETH_TLEN)) {
355*4882a593Smuzhiyun 		/* h_vlan_encapsulated_proto should already be populated, and
356*4882a593Smuzhiyun 		 * skb->data has space for h_vlan_proto
357*4882a593Smuzhiyun 		 */
358*4882a593Smuzhiyun 		veth->h_vlan_proto = vlan_proto;
359*4882a593Smuzhiyun 	} else {
360*4882a593Smuzhiyun 		/* h_vlan_encapsulated_proto should not be populated, and
361*4882a593Smuzhiyun 		 * skb->data has no space for h_vlan_proto
362*4882a593Smuzhiyun 		 */
363*4882a593Smuzhiyun 		veth->h_vlan_encapsulated_proto = skb->protocol;
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* now, the TCI */
367*4882a593Smuzhiyun 	veth->h_vlan_TCI = htons(vlan_tci);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	return 0;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun /**
373*4882a593Smuzhiyun  * __vlan_insert_tag - regular VLAN tag inserting
374*4882a593Smuzhiyun  * @skb: skbuff to tag
375*4882a593Smuzhiyun  * @vlan_proto: VLAN encapsulation protocol
376*4882a593Smuzhiyun  * @vlan_tci: VLAN TCI to insert
377*4882a593Smuzhiyun  *
378*4882a593Smuzhiyun  * Inserts the VLAN tag into @skb as part of the payload
379*4882a593Smuzhiyun  * Returns error if skb_cow_head fails.
380*4882a593Smuzhiyun  *
381*4882a593Smuzhiyun  * Does not change skb->protocol so this function can be used during receive.
382*4882a593Smuzhiyun  */
__vlan_insert_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)383*4882a593Smuzhiyun static inline int __vlan_insert_tag(struct sk_buff *skb,
384*4882a593Smuzhiyun 				    __be16 vlan_proto, u16 vlan_tci)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun /**
390*4882a593Smuzhiyun  * vlan_insert_inner_tag - inner VLAN tag inserting
391*4882a593Smuzhiyun  * @skb: skbuff to tag
392*4882a593Smuzhiyun  * @vlan_proto: VLAN encapsulation protocol
393*4882a593Smuzhiyun  * @vlan_tci: VLAN TCI to insert
394*4882a593Smuzhiyun  * @mac_len: MAC header length including outer vlan headers
395*4882a593Smuzhiyun  *
396*4882a593Smuzhiyun  * Inserts the VLAN tag into @skb as part of the payload at offset mac_len
397*4882a593Smuzhiyun  * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
398*4882a593Smuzhiyun  *
399*4882a593Smuzhiyun  * Following the skb_unshare() example, in case of error, the calling function
400*4882a593Smuzhiyun  * doesn't have to worry about freeing the original skb.
401*4882a593Smuzhiyun  *
402*4882a593Smuzhiyun  * Does not change skb->protocol so this function can be used during receive.
403*4882a593Smuzhiyun  */
vlan_insert_inner_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci,unsigned int mac_len)404*4882a593Smuzhiyun static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb,
405*4882a593Smuzhiyun 						    __be16 vlan_proto,
406*4882a593Smuzhiyun 						    u16 vlan_tci,
407*4882a593Smuzhiyun 						    unsigned int mac_len)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	int err;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len);
412*4882a593Smuzhiyun 	if (err) {
413*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
414*4882a593Smuzhiyun 		return NULL;
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 	return skb;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun  * vlan_insert_tag - regular VLAN tag inserting
421*4882a593Smuzhiyun  * @skb: skbuff to tag
422*4882a593Smuzhiyun  * @vlan_proto: VLAN encapsulation protocol
423*4882a593Smuzhiyun  * @vlan_tci: VLAN TCI to insert
424*4882a593Smuzhiyun  *
425*4882a593Smuzhiyun  * Inserts the VLAN tag into @skb as part of the payload
426*4882a593Smuzhiyun  * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
427*4882a593Smuzhiyun  *
428*4882a593Smuzhiyun  * Following the skb_unshare() example, in case of error, the calling function
429*4882a593Smuzhiyun  * doesn't have to worry about freeing the original skb.
430*4882a593Smuzhiyun  *
431*4882a593Smuzhiyun  * Does not change skb->protocol so this function can be used during receive.
432*4882a593Smuzhiyun  */
vlan_insert_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)433*4882a593Smuzhiyun static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
434*4882a593Smuzhiyun 					      __be16 vlan_proto, u16 vlan_tci)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun /**
440*4882a593Smuzhiyun  * vlan_insert_tag_set_proto - regular VLAN tag inserting
441*4882a593Smuzhiyun  * @skb: skbuff to tag
442*4882a593Smuzhiyun  * @vlan_proto: VLAN encapsulation protocol
443*4882a593Smuzhiyun  * @vlan_tci: VLAN TCI to insert
444*4882a593Smuzhiyun  *
445*4882a593Smuzhiyun  * Inserts the VLAN tag into @skb as part of the payload
446*4882a593Smuzhiyun  * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
447*4882a593Smuzhiyun  *
448*4882a593Smuzhiyun  * Following the skb_unshare() example, in case of error, the calling function
449*4882a593Smuzhiyun  * doesn't have to worry about freeing the original skb.
450*4882a593Smuzhiyun  */
vlan_insert_tag_set_proto(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)451*4882a593Smuzhiyun static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
452*4882a593Smuzhiyun 							__be16 vlan_proto,
453*4882a593Smuzhiyun 							u16 vlan_tci)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun 	skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
456*4882a593Smuzhiyun 	if (skb)
457*4882a593Smuzhiyun 		skb->protocol = vlan_proto;
458*4882a593Smuzhiyun 	return skb;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun /**
462*4882a593Smuzhiyun  * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
463*4882a593Smuzhiyun  * @skb: skbuff to clear
464*4882a593Smuzhiyun  *
465*4882a593Smuzhiyun  * Clears the VLAN information from @skb
466*4882a593Smuzhiyun  */
__vlan_hwaccel_clear_tag(struct sk_buff * skb)467*4882a593Smuzhiyun static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	skb->vlan_present = 0;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun /**
473*4882a593Smuzhiyun  * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
474*4882a593Smuzhiyun  * @dst: skbuff to copy to
475*4882a593Smuzhiyun  * @src: skbuff to copy from
476*4882a593Smuzhiyun  *
477*4882a593Smuzhiyun  * Copies VLAN information from @src to @dst (for branchless code)
478*4882a593Smuzhiyun  */
__vlan_hwaccel_copy_tag(struct sk_buff * dst,const struct sk_buff * src)479*4882a593Smuzhiyun static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	dst->vlan_present = src->vlan_present;
482*4882a593Smuzhiyun 	dst->vlan_proto = src->vlan_proto;
483*4882a593Smuzhiyun 	dst->vlan_tci = src->vlan_tci;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun  * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
488*4882a593Smuzhiyun  * @skb: skbuff to tag
489*4882a593Smuzhiyun  *
490*4882a593Smuzhiyun  * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
491*4882a593Smuzhiyun  *
492*4882a593Smuzhiyun  * Following the skb_unshare() example, in case of error, the calling function
493*4882a593Smuzhiyun  * doesn't have to worry about freeing the original skb.
494*4882a593Smuzhiyun  */
__vlan_hwaccel_push_inside(struct sk_buff * skb)495*4882a593Smuzhiyun static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
498*4882a593Smuzhiyun 					skb_vlan_tag_get(skb));
499*4882a593Smuzhiyun 	if (likely(skb))
500*4882a593Smuzhiyun 		__vlan_hwaccel_clear_tag(skb);
501*4882a593Smuzhiyun 	return skb;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun /**
505*4882a593Smuzhiyun  * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
506*4882a593Smuzhiyun  * @skb: skbuff to tag
507*4882a593Smuzhiyun  * @vlan_proto: VLAN encapsulation protocol
508*4882a593Smuzhiyun  * @vlan_tci: VLAN TCI to insert
509*4882a593Smuzhiyun  *
510*4882a593Smuzhiyun  * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
511*4882a593Smuzhiyun  */
__vlan_hwaccel_put_tag(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)512*4882a593Smuzhiyun static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
513*4882a593Smuzhiyun 					  __be16 vlan_proto, u16 vlan_tci)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun 	skb->vlan_proto = vlan_proto;
516*4882a593Smuzhiyun 	skb->vlan_tci = vlan_tci;
517*4882a593Smuzhiyun 	skb->vlan_present = 1;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun /**
521*4882a593Smuzhiyun  * __vlan_get_tag - get the VLAN ID that is part of the payload
522*4882a593Smuzhiyun  * @skb: skbuff to query
523*4882a593Smuzhiyun  * @vlan_tci: buffer to store value
524*4882a593Smuzhiyun  *
525*4882a593Smuzhiyun  * Returns error if the skb is not of VLAN type
526*4882a593Smuzhiyun  */
__vlan_get_tag(const struct sk_buff * skb,u16 * vlan_tci)527*4882a593Smuzhiyun static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	if (!eth_type_vlan(veth->h_vlan_proto))
532*4882a593Smuzhiyun 		return -EINVAL;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	*vlan_tci = ntohs(veth->h_vlan_TCI);
535*4882a593Smuzhiyun 	return 0;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun /**
539*4882a593Smuzhiyun  * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
540*4882a593Smuzhiyun  * @skb: skbuff to query
541*4882a593Smuzhiyun  * @vlan_tci: buffer to store value
542*4882a593Smuzhiyun  *
543*4882a593Smuzhiyun  * Returns error if @skb->vlan_tci is not set correctly
544*4882a593Smuzhiyun  */
__vlan_hwaccel_get_tag(const struct sk_buff * skb,u16 * vlan_tci)545*4882a593Smuzhiyun static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
546*4882a593Smuzhiyun 					 u16 *vlan_tci)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb)) {
549*4882a593Smuzhiyun 		*vlan_tci = skb_vlan_tag_get(skb);
550*4882a593Smuzhiyun 		return 0;
551*4882a593Smuzhiyun 	} else {
552*4882a593Smuzhiyun 		*vlan_tci = 0;
553*4882a593Smuzhiyun 		return -EINVAL;
554*4882a593Smuzhiyun 	}
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun /**
558*4882a593Smuzhiyun  * vlan_get_tag - get the VLAN ID from the skb
559*4882a593Smuzhiyun  * @skb: skbuff to query
560*4882a593Smuzhiyun  * @vlan_tci: buffer to store value
561*4882a593Smuzhiyun  *
562*4882a593Smuzhiyun  * Returns error if the skb is not VLAN tagged
563*4882a593Smuzhiyun  */
vlan_get_tag(const struct sk_buff * skb,u16 * vlan_tci)564*4882a593Smuzhiyun static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun 	if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
567*4882a593Smuzhiyun 		return __vlan_hwaccel_get_tag(skb, vlan_tci);
568*4882a593Smuzhiyun 	} else {
569*4882a593Smuzhiyun 		return __vlan_get_tag(skb, vlan_tci);
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun /**
574*4882a593Smuzhiyun  * vlan_get_protocol - get protocol EtherType.
575*4882a593Smuzhiyun  * @skb: skbuff to query
576*4882a593Smuzhiyun  * @type: first vlan protocol
577*4882a593Smuzhiyun  * @depth: buffer to store length of eth and vlan tags in bytes
578*4882a593Smuzhiyun  *
579*4882a593Smuzhiyun  * Returns the EtherType of the packet, regardless of whether it is
580*4882a593Smuzhiyun  * vlan encapsulated (normal or hardware accelerated) or not.
581*4882a593Smuzhiyun  */
__vlan_get_protocol(const struct sk_buff * skb,__be16 type,int * depth)582*4882a593Smuzhiyun static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
583*4882a593Smuzhiyun 					 int *depth)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun 	unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/* if type is 802.1Q/AD then the header should already be
588*4882a593Smuzhiyun 	 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
589*4882a593Smuzhiyun 	 * ETH_HLEN otherwise
590*4882a593Smuzhiyun 	 */
591*4882a593Smuzhiyun 	if (eth_type_vlan(type)) {
592*4882a593Smuzhiyun 		if (vlan_depth) {
593*4882a593Smuzhiyun 			if (WARN_ON(vlan_depth < VLAN_HLEN))
594*4882a593Smuzhiyun 				return 0;
595*4882a593Smuzhiyun 			vlan_depth -= VLAN_HLEN;
596*4882a593Smuzhiyun 		} else {
597*4882a593Smuzhiyun 			vlan_depth = ETH_HLEN;
598*4882a593Smuzhiyun 		}
599*4882a593Smuzhiyun 		do {
600*4882a593Smuzhiyun 			struct vlan_hdr vhdr, *vh;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 			vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
603*4882a593Smuzhiyun 			if (unlikely(!vh || !--parse_depth))
604*4882a593Smuzhiyun 				return 0;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 			type = vh->h_vlan_encapsulated_proto;
607*4882a593Smuzhiyun 			vlan_depth += VLAN_HLEN;
608*4882a593Smuzhiyun 		} while (eth_type_vlan(type));
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	if (depth)
612*4882a593Smuzhiyun 		*depth = vlan_depth;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	return type;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun /**
618*4882a593Smuzhiyun  * vlan_get_protocol - get protocol EtherType.
619*4882a593Smuzhiyun  * @skb: skbuff to query
620*4882a593Smuzhiyun  *
621*4882a593Smuzhiyun  * Returns the EtherType of the packet, regardless of whether it is
622*4882a593Smuzhiyun  * vlan encapsulated (normal or hardware accelerated) or not.
623*4882a593Smuzhiyun  */
vlan_get_protocol(const struct sk_buff * skb)624*4882a593Smuzhiyun static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	return __vlan_get_protocol(skb, skb->protocol, NULL);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun /* A getter for the SKB protocol field which will handle VLAN tags consistently
630*4882a593Smuzhiyun  * whether VLAN acceleration is enabled or not.
631*4882a593Smuzhiyun  */
skb_protocol(const struct sk_buff * skb,bool skip_vlan)632*4882a593Smuzhiyun static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	if (!skip_vlan)
635*4882a593Smuzhiyun 		/* VLAN acceleration strips the VLAN header from the skb and
636*4882a593Smuzhiyun 		 * moves it to skb->vlan_proto
637*4882a593Smuzhiyun 		 */
638*4882a593Smuzhiyun 		return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	return vlan_get_protocol(skb);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun 
vlan_set_encap_proto(struct sk_buff * skb,struct vlan_hdr * vhdr)643*4882a593Smuzhiyun static inline void vlan_set_encap_proto(struct sk_buff *skb,
644*4882a593Smuzhiyun 					struct vlan_hdr *vhdr)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	__be16 proto;
647*4882a593Smuzhiyun 	unsigned short *rawp;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	/*
650*4882a593Smuzhiyun 	 * Was a VLAN packet, grab the encapsulated protocol, which the layer
651*4882a593Smuzhiyun 	 * three protocols care about.
652*4882a593Smuzhiyun 	 */
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	proto = vhdr->h_vlan_encapsulated_proto;
655*4882a593Smuzhiyun 	if (eth_proto_is_802_3(proto)) {
656*4882a593Smuzhiyun 		skb->protocol = proto;
657*4882a593Smuzhiyun 		return;
658*4882a593Smuzhiyun 	}
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	rawp = (unsigned short *)(vhdr + 1);
661*4882a593Smuzhiyun 	if (*rawp == 0xFFFF)
662*4882a593Smuzhiyun 		/*
663*4882a593Smuzhiyun 		 * This is a magic hack to spot IPX packets. Older Novell
664*4882a593Smuzhiyun 		 * breaks the protocol design and runs IPX over 802.3 without
665*4882a593Smuzhiyun 		 * an 802.2 LLC layer. We look for FFFF which isn't a used
666*4882a593Smuzhiyun 		 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
667*4882a593Smuzhiyun 		 * but does for the rest.
668*4882a593Smuzhiyun 		 */
669*4882a593Smuzhiyun 		skb->protocol = htons(ETH_P_802_3);
670*4882a593Smuzhiyun 	else
671*4882a593Smuzhiyun 		/*
672*4882a593Smuzhiyun 		 * Real 802.2 LLC
673*4882a593Smuzhiyun 		 */
674*4882a593Smuzhiyun 		skb->protocol = htons(ETH_P_802_2);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun /**
678*4882a593Smuzhiyun  * skb_vlan_tagged - check if skb is vlan tagged.
679*4882a593Smuzhiyun  * @skb: skbuff to query
680*4882a593Smuzhiyun  *
681*4882a593Smuzhiyun  * Returns true if the skb is tagged, regardless of whether it is hardware
682*4882a593Smuzhiyun  * accelerated or not.
683*4882a593Smuzhiyun  */
skb_vlan_tagged(const struct sk_buff * skb)684*4882a593Smuzhiyun static inline bool skb_vlan_tagged(const struct sk_buff *skb)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	if (!skb_vlan_tag_present(skb) &&
687*4882a593Smuzhiyun 	    likely(!eth_type_vlan(skb->protocol)))
688*4882a593Smuzhiyun 		return false;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	return true;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun /**
694*4882a593Smuzhiyun  * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
695*4882a593Smuzhiyun  * @skb: skbuff to query
696*4882a593Smuzhiyun  *
697*4882a593Smuzhiyun  * Returns true if the skb is tagged with multiple vlan headers, regardless
698*4882a593Smuzhiyun  * of whether it is hardware accelerated or not.
699*4882a593Smuzhiyun  */
skb_vlan_tagged_multi(struct sk_buff * skb)700*4882a593Smuzhiyun static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	__be16 protocol = skb->protocol;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	if (!skb_vlan_tag_present(skb)) {
705*4882a593Smuzhiyun 		struct vlan_ethhdr *veh;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 		if (likely(!eth_type_vlan(protocol)))
708*4882a593Smuzhiyun 			return false;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
711*4882a593Smuzhiyun 			return false;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 		veh = (struct vlan_ethhdr *)skb->data;
714*4882a593Smuzhiyun 		protocol = veh->h_vlan_encapsulated_proto;
715*4882a593Smuzhiyun 	}
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	if (!eth_type_vlan(protocol))
718*4882a593Smuzhiyun 		return false;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	return true;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun /**
724*4882a593Smuzhiyun  * vlan_features_check - drop unsafe features for skb with multiple tags.
725*4882a593Smuzhiyun  * @skb: skbuff to query
726*4882a593Smuzhiyun  * @features: features to be checked
727*4882a593Smuzhiyun  *
728*4882a593Smuzhiyun  * Returns features without unsafe ones if the skb has multiple tags.
729*4882a593Smuzhiyun  */
vlan_features_check(struct sk_buff * skb,netdev_features_t features)730*4882a593Smuzhiyun static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
731*4882a593Smuzhiyun 						    netdev_features_t features)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun 	if (skb_vlan_tagged_multi(skb)) {
734*4882a593Smuzhiyun 		/* In the case of multi-tagged packets, use a direct mask
735*4882a593Smuzhiyun 		 * instead of using netdev_interesect_features(), to make
736*4882a593Smuzhiyun 		 * sure that only devices supporting NETIF_F_HW_CSUM will
737*4882a593Smuzhiyun 		 * have checksum offloading support.
738*4882a593Smuzhiyun 		 */
739*4882a593Smuzhiyun 		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
740*4882a593Smuzhiyun 			    NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
741*4882a593Smuzhiyun 			    NETIF_F_HW_VLAN_STAG_TX;
742*4882a593Smuzhiyun 	}
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	return features;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun /**
748*4882a593Smuzhiyun  * compare_vlan_header - Compare two vlan headers
749*4882a593Smuzhiyun  * @h1: Pointer to vlan header
750*4882a593Smuzhiyun  * @h2: Pointer to vlan header
751*4882a593Smuzhiyun  *
752*4882a593Smuzhiyun  * Compare two vlan headers, returns 0 if equal.
753*4882a593Smuzhiyun  *
754*4882a593Smuzhiyun  * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
755*4882a593Smuzhiyun  */
compare_vlan_header(const struct vlan_hdr * h1,const struct vlan_hdr * h2)756*4882a593Smuzhiyun static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
757*4882a593Smuzhiyun 						const struct vlan_hdr *h2)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
760*4882a593Smuzhiyun 	return *(u32 *)h1 ^ *(u32 *)h2;
761*4882a593Smuzhiyun #else
762*4882a593Smuzhiyun 	return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
763*4882a593Smuzhiyun 	       ((__force u32)h1->h_vlan_encapsulated_proto ^
764*4882a593Smuzhiyun 		(__force u32)h2->h_vlan_encapsulated_proto);
765*4882a593Smuzhiyun #endif
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun #endif /* !(_LINUX_IF_VLAN_H_) */
768