xref: /OK3568_Linux_fs/kernel/drivers/net/tun.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  TUN - Universal TUN/TAP device driver.
4*4882a593Smuzhiyun  *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  *  Changes:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
13*4882a593Smuzhiyun  *    Add TUNSETLINK ioctl to set the link encapsulation
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *  Mark Smith <markzzzsmith@yahoo.com.au>
16*4882a593Smuzhiyun  *    Use eth_random_addr() for tap MAC address.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
19*4882a593Smuzhiyun  *    Fixes in packet dropping, queue length setting and queue wakeup.
20*4882a593Smuzhiyun  *    Increased default tx queue length.
21*4882a593Smuzhiyun  *    Added ethtool API.
22*4882a593Smuzhiyun  *    Minor cleanups
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  *  Daniel Podlejski <underley@underley.eu.org>
25*4882a593Smuzhiyun  *    Modifications for 2.3.99-pre5 kernel.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define DRV_NAME	"tun"
31*4882a593Smuzhiyun #define DRV_VERSION	"1.6"
32*4882a593Smuzhiyun #define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
33*4882a593Smuzhiyun #define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linux/module.h>
36*4882a593Smuzhiyun #include <linux/errno.h>
37*4882a593Smuzhiyun #include <linux/kernel.h>
38*4882a593Smuzhiyun #include <linux/sched/signal.h>
39*4882a593Smuzhiyun #include <linux/major.h>
40*4882a593Smuzhiyun #include <linux/slab.h>
41*4882a593Smuzhiyun #include <linux/poll.h>
42*4882a593Smuzhiyun #include <linux/fcntl.h>
43*4882a593Smuzhiyun #include <linux/init.h>
44*4882a593Smuzhiyun #include <linux/skbuff.h>
45*4882a593Smuzhiyun #include <linux/netdevice.h>
46*4882a593Smuzhiyun #include <linux/etherdevice.h>
47*4882a593Smuzhiyun #include <linux/miscdevice.h>
48*4882a593Smuzhiyun #include <linux/ethtool.h>
49*4882a593Smuzhiyun #include <linux/rtnetlink.h>
50*4882a593Smuzhiyun #include <linux/compat.h>
51*4882a593Smuzhiyun #include <linux/if.h>
52*4882a593Smuzhiyun #include <linux/if_arp.h>
53*4882a593Smuzhiyun #include <linux/if_ether.h>
54*4882a593Smuzhiyun #include <linux/if_tun.h>
55*4882a593Smuzhiyun #include <linux/if_vlan.h>
56*4882a593Smuzhiyun #include <linux/crc32.h>
57*4882a593Smuzhiyun #include <linux/nsproxy.h>
58*4882a593Smuzhiyun #include <linux/virtio_net.h>
59*4882a593Smuzhiyun #include <linux/rcupdate.h>
60*4882a593Smuzhiyun #include <net/net_namespace.h>
61*4882a593Smuzhiyun #include <net/netns/generic.h>
62*4882a593Smuzhiyun #include <net/rtnetlink.h>
63*4882a593Smuzhiyun #include <net/sock.h>
64*4882a593Smuzhiyun #include <net/xdp.h>
65*4882a593Smuzhiyun #include <net/ip_tunnels.h>
66*4882a593Smuzhiyun #include <linux/seq_file.h>
67*4882a593Smuzhiyun #include <linux/uio.h>
68*4882a593Smuzhiyun #include <linux/skb_array.h>
69*4882a593Smuzhiyun #include <linux/bpf.h>
70*4882a593Smuzhiyun #include <linux/bpf_trace.h>
71*4882a593Smuzhiyun #include <linux/mutex.h>
72*4882a593Smuzhiyun #include <linux/ieee802154.h>
73*4882a593Smuzhiyun #include <linux/if_ltalk.h>
74*4882a593Smuzhiyun #include <uapi/linux/if_fddi.h>
75*4882a593Smuzhiyun #include <uapi/linux/if_hippi.h>
76*4882a593Smuzhiyun #include <uapi/linux/if_fc.h>
77*4882a593Smuzhiyun #include <net/ax25.h>
78*4882a593Smuzhiyun #include <net/rose.h>
79*4882a593Smuzhiyun #include <net/6lowpan.h>
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #include <linux/uaccess.h>
82*4882a593Smuzhiyun #include <linux/proc_fs.h>
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun static void tun_default_link_ksettings(struct net_device *dev,
85*4882a593Smuzhiyun 				       struct ethtool_link_ksettings *cmd);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* TUN device flags */
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /* IFF_ATTACH_QUEUE is never stored in device flags,
92*4882a593Smuzhiyun  * overload it to mean fasync when stored there.
93*4882a593Smuzhiyun  */
94*4882a593Smuzhiyun #define TUN_FASYNC	IFF_ATTACH_QUEUE
95*4882a593Smuzhiyun /* High bits in flags field are unused. */
96*4882a593Smuzhiyun #define TUN_VNET_LE     0x80000000
97*4882a593Smuzhiyun #define TUN_VNET_BE     0x40000000
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
100*4882a593Smuzhiyun 		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun #define GOODCOPY_LEN 128
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #define FLT_EXACT_COUNT 8
105*4882a593Smuzhiyun struct tap_filter {
106*4882a593Smuzhiyun 	unsigned int    count;    /* Number of addrs. Zero means disabled */
107*4882a593Smuzhiyun 	u32             mask[2];  /* Mask of the hashed addrs */
108*4882a593Smuzhiyun 	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
112*4882a593Smuzhiyun  * to max number of VCPUs in guest. */
113*4882a593Smuzhiyun #define MAX_TAP_QUEUES 256
114*4882a593Smuzhiyun #define MAX_TAP_FLOWS  4096
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #define TUN_FLOW_EXPIRE (3 * HZ)
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun struct tun_pcpu_stats {
119*4882a593Smuzhiyun 	u64_stats_t rx_packets;
120*4882a593Smuzhiyun 	u64_stats_t rx_bytes;
121*4882a593Smuzhiyun 	u64_stats_t tx_packets;
122*4882a593Smuzhiyun 	u64_stats_t tx_bytes;
123*4882a593Smuzhiyun 	struct u64_stats_sync syncp;
124*4882a593Smuzhiyun 	u32 rx_dropped;
125*4882a593Smuzhiyun 	u32 tx_dropped;
126*4882a593Smuzhiyun 	u32 rx_frame_errors;
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /* A tun_file connects an open character device to a tuntap netdevice. It
130*4882a593Smuzhiyun  * also contains all socket related structures (except sock_fprog and tap_filter)
131*4882a593Smuzhiyun  * to serve as one transmit queue for tuntap device. The sock_fprog and
132*4882a593Smuzhiyun  * tap_filter were kept in tun_struct since they were used for filtering for the
133*4882a593Smuzhiyun  * netdevice not for a specific queue (at least I didn't see the requirement for
134*4882a593Smuzhiyun  * this).
135*4882a593Smuzhiyun  *
136*4882a593Smuzhiyun  * RCU usage:
137*4882a593Smuzhiyun  * The tun_file and tun_struct are loosely coupled, the pointer from one to the
138*4882a593Smuzhiyun  * other can only be read while rcu_read_lock or rtnl_lock is held.
139*4882a593Smuzhiyun  */
140*4882a593Smuzhiyun struct tun_file {
141*4882a593Smuzhiyun 	struct sock sk;
142*4882a593Smuzhiyun 	struct socket socket;
143*4882a593Smuzhiyun 	struct tun_struct __rcu *tun;
144*4882a593Smuzhiyun 	struct fasync_struct *fasync;
145*4882a593Smuzhiyun 	/* only used for fasnyc */
146*4882a593Smuzhiyun 	unsigned int flags;
147*4882a593Smuzhiyun 	union {
148*4882a593Smuzhiyun 		u16 queue_index;
149*4882a593Smuzhiyun 		unsigned int ifindex;
150*4882a593Smuzhiyun 	};
151*4882a593Smuzhiyun 	struct napi_struct napi;
152*4882a593Smuzhiyun 	bool napi_enabled;
153*4882a593Smuzhiyun 	bool napi_frags_enabled;
154*4882a593Smuzhiyun 	struct mutex napi_mutex;	/* Protects access to the above napi */
155*4882a593Smuzhiyun 	struct list_head next;
156*4882a593Smuzhiyun 	struct tun_struct *detached;
157*4882a593Smuzhiyun 	struct ptr_ring tx_ring;
158*4882a593Smuzhiyun 	struct xdp_rxq_info xdp_rxq;
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun struct tun_page {
162*4882a593Smuzhiyun 	struct page *page;
163*4882a593Smuzhiyun 	int count;
164*4882a593Smuzhiyun };
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun struct tun_flow_entry {
167*4882a593Smuzhiyun 	struct hlist_node hash_link;
168*4882a593Smuzhiyun 	struct rcu_head rcu;
169*4882a593Smuzhiyun 	struct tun_struct *tun;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	u32 rxhash;
172*4882a593Smuzhiyun 	u32 rps_rxhash;
173*4882a593Smuzhiyun 	int queue_index;
174*4882a593Smuzhiyun 	unsigned long updated ____cacheline_aligned_in_smp;
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun #define TUN_NUM_FLOW_ENTRIES 1024
178*4882a593Smuzhiyun #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun struct tun_prog {
181*4882a593Smuzhiyun 	struct rcu_head rcu;
182*4882a593Smuzhiyun 	struct bpf_prog *prog;
183*4882a593Smuzhiyun };
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun /* Since the socket were moved to tun_file, to preserve the behavior of persist
186*4882a593Smuzhiyun  * device, socket filter, sndbuf and vnet header size were restore when the
187*4882a593Smuzhiyun  * file were attached to a persist device.
188*4882a593Smuzhiyun  */
189*4882a593Smuzhiyun struct tun_struct {
190*4882a593Smuzhiyun 	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
191*4882a593Smuzhiyun 	unsigned int            numqueues;
192*4882a593Smuzhiyun 	unsigned int 		flags;
193*4882a593Smuzhiyun 	kuid_t			owner;
194*4882a593Smuzhiyun 	kgid_t			group;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	struct net_device	*dev;
197*4882a593Smuzhiyun 	netdev_features_t	set_features;
198*4882a593Smuzhiyun #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
199*4882a593Smuzhiyun 			  NETIF_F_TSO6)
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	int			align;
202*4882a593Smuzhiyun 	int			vnet_hdr_sz;
203*4882a593Smuzhiyun 	int			sndbuf;
204*4882a593Smuzhiyun 	struct tap_filter	txflt;
205*4882a593Smuzhiyun 	struct sock_fprog	fprog;
206*4882a593Smuzhiyun 	/* protected by rtnl lock */
207*4882a593Smuzhiyun 	bool			filter_attached;
208*4882a593Smuzhiyun 	u32			msg_enable;
209*4882a593Smuzhiyun 	spinlock_t lock;
210*4882a593Smuzhiyun 	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
211*4882a593Smuzhiyun 	struct timer_list flow_gc_timer;
212*4882a593Smuzhiyun 	unsigned long ageing_time;
213*4882a593Smuzhiyun 	unsigned int numdisabled;
214*4882a593Smuzhiyun 	struct list_head disabled;
215*4882a593Smuzhiyun 	void *security;
216*4882a593Smuzhiyun 	u32 flow_count;
217*4882a593Smuzhiyun 	u32 rx_batched;
218*4882a593Smuzhiyun 	struct tun_pcpu_stats __percpu *pcpu_stats;
219*4882a593Smuzhiyun 	struct bpf_prog __rcu *xdp_prog;
220*4882a593Smuzhiyun 	struct tun_prog __rcu *steering_prog;
221*4882a593Smuzhiyun 	struct tun_prog __rcu *filter_prog;
222*4882a593Smuzhiyun 	struct ethtool_link_ksettings link_ksettings;
223*4882a593Smuzhiyun 	/* init args */
224*4882a593Smuzhiyun 	struct file *file;
225*4882a593Smuzhiyun 	struct ifreq *ifr;
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun struct veth {
229*4882a593Smuzhiyun 	__be16 h_vlan_proto;
230*4882a593Smuzhiyun 	__be16 h_vlan_TCI;
231*4882a593Smuzhiyun };
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun static void tun_flow_init(struct tun_struct *tun);
234*4882a593Smuzhiyun static void tun_flow_uninit(struct tun_struct *tun);
235*4882a593Smuzhiyun 
tun_napi_receive(struct napi_struct * napi,int budget)236*4882a593Smuzhiyun static int tun_napi_receive(struct napi_struct *napi, int budget)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
239*4882a593Smuzhiyun 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
240*4882a593Smuzhiyun 	struct sk_buff_head process_queue;
241*4882a593Smuzhiyun 	struct sk_buff *skb;
242*4882a593Smuzhiyun 	int received = 0;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	__skb_queue_head_init(&process_queue);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	spin_lock(&queue->lock);
247*4882a593Smuzhiyun 	skb_queue_splice_tail_init(queue, &process_queue);
248*4882a593Smuzhiyun 	spin_unlock(&queue->lock);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
251*4882a593Smuzhiyun 		napi_gro_receive(napi, skb);
252*4882a593Smuzhiyun 		++received;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	if (!skb_queue_empty(&process_queue)) {
256*4882a593Smuzhiyun 		spin_lock(&queue->lock);
257*4882a593Smuzhiyun 		skb_queue_splice(&process_queue, queue);
258*4882a593Smuzhiyun 		spin_unlock(&queue->lock);
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return received;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
tun_napi_poll(struct napi_struct * napi,int budget)264*4882a593Smuzhiyun static int tun_napi_poll(struct napi_struct *napi, int budget)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	unsigned int received;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	received = tun_napi_receive(napi, budget);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	if (received < budget)
271*4882a593Smuzhiyun 		napi_complete_done(napi, received);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	return received;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
tun_napi_init(struct tun_struct * tun,struct tun_file * tfile,bool napi_en,bool napi_frags)276*4882a593Smuzhiyun static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
277*4882a593Smuzhiyun 			  bool napi_en, bool napi_frags)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	tfile->napi_enabled = napi_en;
280*4882a593Smuzhiyun 	tfile->napi_frags_enabled = napi_en && napi_frags;
281*4882a593Smuzhiyun 	if (napi_en) {
282*4882a593Smuzhiyun 		netif_tx_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
283*4882a593Smuzhiyun 				  NAPI_POLL_WEIGHT);
284*4882a593Smuzhiyun 		napi_enable(&tfile->napi);
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
tun_napi_enable(struct tun_file * tfile)288*4882a593Smuzhiyun static void tun_napi_enable(struct tun_file *tfile)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	if (tfile->napi_enabled)
291*4882a593Smuzhiyun 		napi_enable(&tfile->napi);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
tun_napi_disable(struct tun_file * tfile)294*4882a593Smuzhiyun static void tun_napi_disable(struct tun_file *tfile)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	if (tfile->napi_enabled)
297*4882a593Smuzhiyun 		napi_disable(&tfile->napi);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
tun_napi_del(struct tun_file * tfile)300*4882a593Smuzhiyun static void tun_napi_del(struct tun_file *tfile)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	if (tfile->napi_enabled)
303*4882a593Smuzhiyun 		netif_napi_del(&tfile->napi);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
tun_napi_frags_enabled(const struct tun_file * tfile)306*4882a593Smuzhiyun static bool tun_napi_frags_enabled(const struct tun_file *tfile)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	return tfile->napi_frags_enabled;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun #ifdef CONFIG_TUN_VNET_CROSS_LE
tun_legacy_is_little_endian(struct tun_struct * tun)312*4882a593Smuzhiyun static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	return tun->flags & TUN_VNET_BE ? false :
315*4882a593Smuzhiyun 		virtio_legacy_is_little_endian();
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
tun_get_vnet_be(struct tun_struct * tun,int __user * argp)318*4882a593Smuzhiyun static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	int be = !!(tun->flags & TUN_VNET_BE);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (put_user(be, argp))
323*4882a593Smuzhiyun 		return -EFAULT;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	return 0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
tun_set_vnet_be(struct tun_struct * tun,int __user * argp)328*4882a593Smuzhiyun static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	int be;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (get_user(be, argp))
333*4882a593Smuzhiyun 		return -EFAULT;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if (be)
336*4882a593Smuzhiyun 		tun->flags |= TUN_VNET_BE;
337*4882a593Smuzhiyun 	else
338*4882a593Smuzhiyun 		tun->flags &= ~TUN_VNET_BE;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	return 0;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun #else
tun_legacy_is_little_endian(struct tun_struct * tun)343*4882a593Smuzhiyun static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	return virtio_legacy_is_little_endian();
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
tun_get_vnet_be(struct tun_struct * tun,int __user * argp)348*4882a593Smuzhiyun static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	return -EINVAL;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
tun_set_vnet_be(struct tun_struct * tun,int __user * argp)353*4882a593Smuzhiyun static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	return -EINVAL;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun #endif /* CONFIG_TUN_VNET_CROSS_LE */
358*4882a593Smuzhiyun 
tun_is_little_endian(struct tun_struct * tun)359*4882a593Smuzhiyun static inline bool tun_is_little_endian(struct tun_struct *tun)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	return tun->flags & TUN_VNET_LE ||
362*4882a593Smuzhiyun 		tun_legacy_is_little_endian(tun);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
tun16_to_cpu(struct tun_struct * tun,__virtio16 val)365*4882a593Smuzhiyun static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
cpu_to_tun16(struct tun_struct * tun,u16 val)370*4882a593Smuzhiyun static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
tun_hashfn(u32 rxhash)375*4882a593Smuzhiyun static inline u32 tun_hashfn(u32 rxhash)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	return rxhash & TUN_MASK_FLOW_ENTRIES;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
tun_flow_find(struct hlist_head * head,u32 rxhash)380*4882a593Smuzhiyun static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	struct tun_flow_entry *e;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(e, head, hash_link) {
385*4882a593Smuzhiyun 		if (e->rxhash == rxhash)
386*4882a593Smuzhiyun 			return e;
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 	return NULL;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
tun_flow_create(struct tun_struct * tun,struct hlist_head * head,u32 rxhash,u16 queue_index)391*4882a593Smuzhiyun static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
392*4882a593Smuzhiyun 					      struct hlist_head *head,
393*4882a593Smuzhiyun 					      u32 rxhash, u16 queue_index)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (e) {
398*4882a593Smuzhiyun 		netif_info(tun, tx_queued, tun->dev,
399*4882a593Smuzhiyun 			   "create flow: hash %u index %u\n",
400*4882a593Smuzhiyun 			   rxhash, queue_index);
401*4882a593Smuzhiyun 		e->updated = jiffies;
402*4882a593Smuzhiyun 		e->rxhash = rxhash;
403*4882a593Smuzhiyun 		e->rps_rxhash = 0;
404*4882a593Smuzhiyun 		e->queue_index = queue_index;
405*4882a593Smuzhiyun 		e->tun = tun;
406*4882a593Smuzhiyun 		hlist_add_head_rcu(&e->hash_link, head);
407*4882a593Smuzhiyun 		++tun->flow_count;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 	return e;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
tun_flow_delete(struct tun_struct * tun,struct tun_flow_entry * e)412*4882a593Smuzhiyun static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
415*4882a593Smuzhiyun 		   e->rxhash, e->queue_index);
416*4882a593Smuzhiyun 	hlist_del_rcu(&e->hash_link);
417*4882a593Smuzhiyun 	kfree_rcu(e, rcu);
418*4882a593Smuzhiyun 	--tun->flow_count;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
tun_flow_flush(struct tun_struct * tun)421*4882a593Smuzhiyun static void tun_flow_flush(struct tun_struct *tun)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	int i;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	spin_lock_bh(&tun->lock);
426*4882a593Smuzhiyun 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
427*4882a593Smuzhiyun 		struct tun_flow_entry *e;
428*4882a593Smuzhiyun 		struct hlist_node *n;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
431*4882a593Smuzhiyun 			tun_flow_delete(tun, e);
432*4882a593Smuzhiyun 	}
433*4882a593Smuzhiyun 	spin_unlock_bh(&tun->lock);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
tun_flow_delete_by_queue(struct tun_struct * tun,u16 queue_index)436*4882a593Smuzhiyun static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	int i;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	spin_lock_bh(&tun->lock);
441*4882a593Smuzhiyun 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
442*4882a593Smuzhiyun 		struct tun_flow_entry *e;
443*4882a593Smuzhiyun 		struct hlist_node *n;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
446*4882a593Smuzhiyun 			if (e->queue_index == queue_index)
447*4882a593Smuzhiyun 				tun_flow_delete(tun, e);
448*4882a593Smuzhiyun 		}
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 	spin_unlock_bh(&tun->lock);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
tun_flow_cleanup(struct timer_list * t)453*4882a593Smuzhiyun static void tun_flow_cleanup(struct timer_list *t)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun 	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
456*4882a593Smuzhiyun 	unsigned long delay = tun->ageing_time;
457*4882a593Smuzhiyun 	unsigned long next_timer = jiffies + delay;
458*4882a593Smuzhiyun 	unsigned long count = 0;
459*4882a593Smuzhiyun 	int i;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	spin_lock(&tun->lock);
462*4882a593Smuzhiyun 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
463*4882a593Smuzhiyun 		struct tun_flow_entry *e;
464*4882a593Smuzhiyun 		struct hlist_node *n;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
467*4882a593Smuzhiyun 			unsigned long this_timer;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 			this_timer = e->updated + delay;
470*4882a593Smuzhiyun 			if (time_before_eq(this_timer, jiffies)) {
471*4882a593Smuzhiyun 				tun_flow_delete(tun, e);
472*4882a593Smuzhiyun 				continue;
473*4882a593Smuzhiyun 			}
474*4882a593Smuzhiyun 			count++;
475*4882a593Smuzhiyun 			if (time_before(this_timer, next_timer))
476*4882a593Smuzhiyun 				next_timer = this_timer;
477*4882a593Smuzhiyun 		}
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (count)
481*4882a593Smuzhiyun 		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
482*4882a593Smuzhiyun 	spin_unlock(&tun->lock);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
tun_flow_update(struct tun_struct * tun,u32 rxhash,struct tun_file * tfile)485*4882a593Smuzhiyun static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
486*4882a593Smuzhiyun 			    struct tun_file *tfile)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	struct hlist_head *head;
489*4882a593Smuzhiyun 	struct tun_flow_entry *e;
490*4882a593Smuzhiyun 	unsigned long delay = tun->ageing_time;
491*4882a593Smuzhiyun 	u16 queue_index = tfile->queue_index;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	head = &tun->flows[tun_hashfn(rxhash)];
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	rcu_read_lock();
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	e = tun_flow_find(head, rxhash);
498*4882a593Smuzhiyun 	if (likely(e)) {
499*4882a593Smuzhiyun 		/* TODO: keep queueing to old queue until it's empty? */
500*4882a593Smuzhiyun 		if (READ_ONCE(e->queue_index) != queue_index)
501*4882a593Smuzhiyun 			WRITE_ONCE(e->queue_index, queue_index);
502*4882a593Smuzhiyun 		if (e->updated != jiffies)
503*4882a593Smuzhiyun 			e->updated = jiffies;
504*4882a593Smuzhiyun 		sock_rps_record_flow_hash(e->rps_rxhash);
505*4882a593Smuzhiyun 	} else {
506*4882a593Smuzhiyun 		spin_lock_bh(&tun->lock);
507*4882a593Smuzhiyun 		if (!tun_flow_find(head, rxhash) &&
508*4882a593Smuzhiyun 		    tun->flow_count < MAX_TAP_FLOWS)
509*4882a593Smuzhiyun 			tun_flow_create(tun, head, rxhash, queue_index);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 		if (!timer_pending(&tun->flow_gc_timer))
512*4882a593Smuzhiyun 			mod_timer(&tun->flow_gc_timer,
513*4882a593Smuzhiyun 				  round_jiffies_up(jiffies + delay));
514*4882a593Smuzhiyun 		spin_unlock_bh(&tun->lock);
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	rcu_read_unlock();
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun /* Save the hash received in the stack receive path and update the
521*4882a593Smuzhiyun  * flow_hash table accordingly.
522*4882a593Smuzhiyun  */
tun_flow_save_rps_rxhash(struct tun_flow_entry * e,u32 hash)523*4882a593Smuzhiyun static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	if (unlikely(e->rps_rxhash != hash))
526*4882a593Smuzhiyun 		e->rps_rxhash = hash;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /* We try to identify a flow through its rxhash. The reason that
530*4882a593Smuzhiyun  * we do not check rxq no. is because some cards(e.g 82599), chooses
531*4882a593Smuzhiyun  * the rxq based on the txq where the last packet of the flow comes. As
532*4882a593Smuzhiyun  * the userspace application move between processors, we may get a
533*4882a593Smuzhiyun  * different rxq no. here.
534*4882a593Smuzhiyun  */
tun_automq_select_queue(struct tun_struct * tun,struct sk_buff * skb)535*4882a593Smuzhiyun static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun 	struct tun_flow_entry *e;
538*4882a593Smuzhiyun 	u32 txq = 0;
539*4882a593Smuzhiyun 	u32 numqueues = 0;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	numqueues = READ_ONCE(tun->numqueues);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	txq = __skb_get_hash_symmetric(skb);
544*4882a593Smuzhiyun 	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
545*4882a593Smuzhiyun 	if (e) {
546*4882a593Smuzhiyun 		tun_flow_save_rps_rxhash(e, txq);
547*4882a593Smuzhiyun 		txq = e->queue_index;
548*4882a593Smuzhiyun 	} else {
549*4882a593Smuzhiyun 		/* use multiply and shift instead of expensive divide */
550*4882a593Smuzhiyun 		txq = ((u64)txq * numqueues) >> 32;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	return txq;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
tun_ebpf_select_queue(struct tun_struct * tun,struct sk_buff * skb)556*4882a593Smuzhiyun static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun 	struct tun_prog *prog;
559*4882a593Smuzhiyun 	u32 numqueues;
560*4882a593Smuzhiyun 	u16 ret = 0;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	numqueues = READ_ONCE(tun->numqueues);
563*4882a593Smuzhiyun 	if (!numqueues)
564*4882a593Smuzhiyun 		return 0;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	prog = rcu_dereference(tun->steering_prog);
567*4882a593Smuzhiyun 	if (prog)
568*4882a593Smuzhiyun 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	return ret % numqueues;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
tun_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)573*4882a593Smuzhiyun static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
574*4882a593Smuzhiyun 			    struct net_device *sb_dev)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
577*4882a593Smuzhiyun 	u16 ret;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	rcu_read_lock();
580*4882a593Smuzhiyun 	if (rcu_dereference(tun->steering_prog))
581*4882a593Smuzhiyun 		ret = tun_ebpf_select_queue(tun, skb);
582*4882a593Smuzhiyun 	else
583*4882a593Smuzhiyun 		ret = tun_automq_select_queue(tun, skb);
584*4882a593Smuzhiyun 	rcu_read_unlock();
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	return ret;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
tun_not_capable(struct tun_struct * tun)589*4882a593Smuzhiyun static inline bool tun_not_capable(struct tun_struct *tun)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun 	const struct cred *cred = current_cred();
592*4882a593Smuzhiyun 	struct net *net = dev_net(tun->dev);
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
595*4882a593Smuzhiyun 		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
596*4882a593Smuzhiyun 		!ns_capable(net->user_ns, CAP_NET_ADMIN);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
tun_set_real_num_queues(struct tun_struct * tun)599*4882a593Smuzhiyun static void tun_set_real_num_queues(struct tun_struct *tun)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun 	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
602*4882a593Smuzhiyun 	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun 
tun_disable_queue(struct tun_struct * tun,struct tun_file * tfile)605*4882a593Smuzhiyun static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun 	tfile->detached = tun;
608*4882a593Smuzhiyun 	list_add_tail(&tfile->next, &tun->disabled);
609*4882a593Smuzhiyun 	++tun->numdisabled;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
tun_enable_queue(struct tun_file * tfile)612*4882a593Smuzhiyun static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	struct tun_struct *tun = tfile->detached;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	tfile->detached = NULL;
617*4882a593Smuzhiyun 	list_del_init(&tfile->next);
618*4882a593Smuzhiyun 	--tun->numdisabled;
619*4882a593Smuzhiyun 	return tun;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun 
tun_ptr_free(void * ptr)622*4882a593Smuzhiyun void tun_ptr_free(void *ptr)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun 	if (!ptr)
625*4882a593Smuzhiyun 		return;
626*4882a593Smuzhiyun 	if (tun_is_xdp_frame(ptr)) {
627*4882a593Smuzhiyun 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 		xdp_return_frame(xdpf);
630*4882a593Smuzhiyun 	} else {
631*4882a593Smuzhiyun 		__skb_array_destroy_skb(ptr);
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tun_ptr_free);
635*4882a593Smuzhiyun 
tun_queue_purge(struct tun_file * tfile)636*4882a593Smuzhiyun static void tun_queue_purge(struct tun_file *tfile)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun 	void *ptr;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
641*4882a593Smuzhiyun 		tun_ptr_free(ptr);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	skb_queue_purge(&tfile->sk.sk_write_queue);
644*4882a593Smuzhiyun 	skb_queue_purge(&tfile->sk.sk_error_queue);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
__tun_detach(struct tun_file * tfile,bool clean)647*4882a593Smuzhiyun static void __tun_detach(struct tun_file *tfile, bool clean)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun 	struct tun_file *ntfile;
650*4882a593Smuzhiyun 	struct tun_struct *tun;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	tun = rtnl_dereference(tfile->tun);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (tun && clean) {
655*4882a593Smuzhiyun 		if (!tfile->detached)
656*4882a593Smuzhiyun 			tun_napi_disable(tfile);
657*4882a593Smuzhiyun 		tun_napi_del(tfile);
658*4882a593Smuzhiyun 	}
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	if (tun && !tfile->detached) {
661*4882a593Smuzhiyun 		u16 index = tfile->queue_index;
662*4882a593Smuzhiyun 		BUG_ON(index >= tun->numqueues);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 		rcu_assign_pointer(tun->tfiles[index],
665*4882a593Smuzhiyun 				   tun->tfiles[tun->numqueues - 1]);
666*4882a593Smuzhiyun 		ntfile = rtnl_dereference(tun->tfiles[index]);
667*4882a593Smuzhiyun 		ntfile->queue_index = index;
668*4882a593Smuzhiyun 		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
669*4882a593Smuzhiyun 				   NULL);
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 		--tun->numqueues;
672*4882a593Smuzhiyun 		if (clean) {
673*4882a593Smuzhiyun 			RCU_INIT_POINTER(tfile->tun, NULL);
674*4882a593Smuzhiyun 			sock_put(&tfile->sk);
675*4882a593Smuzhiyun 		} else {
676*4882a593Smuzhiyun 			tun_disable_queue(tun, tfile);
677*4882a593Smuzhiyun 			tun_napi_disable(tfile);
678*4882a593Smuzhiyun 		}
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 		synchronize_net();
681*4882a593Smuzhiyun 		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
682*4882a593Smuzhiyun 		/* Drop read queue */
683*4882a593Smuzhiyun 		tun_queue_purge(tfile);
684*4882a593Smuzhiyun 		tun_set_real_num_queues(tun);
685*4882a593Smuzhiyun 	} else if (tfile->detached && clean) {
686*4882a593Smuzhiyun 		tun = tun_enable_queue(tfile);
687*4882a593Smuzhiyun 		sock_put(&tfile->sk);
688*4882a593Smuzhiyun 	}
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	if (clean) {
691*4882a593Smuzhiyun 		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
692*4882a593Smuzhiyun 			netif_carrier_off(tun->dev);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 			if (!(tun->flags & IFF_PERSIST) &&
695*4882a593Smuzhiyun 			    tun->dev->reg_state == NETREG_REGISTERED)
696*4882a593Smuzhiyun 				unregister_netdevice(tun->dev);
697*4882a593Smuzhiyun 		}
698*4882a593Smuzhiyun 		if (tun)
699*4882a593Smuzhiyun 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
700*4882a593Smuzhiyun 		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
701*4882a593Smuzhiyun 	}
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun 
tun_detach(struct tun_file * tfile,bool clean)704*4882a593Smuzhiyun static void tun_detach(struct tun_file *tfile, bool clean)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	struct tun_struct *tun;
707*4882a593Smuzhiyun 	struct net_device *dev;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	rtnl_lock();
710*4882a593Smuzhiyun 	tun = rtnl_dereference(tfile->tun);
711*4882a593Smuzhiyun 	dev = tun ? tun->dev : NULL;
712*4882a593Smuzhiyun 	__tun_detach(tfile, clean);
713*4882a593Smuzhiyun 	if (dev)
714*4882a593Smuzhiyun 		netdev_state_change(dev);
715*4882a593Smuzhiyun 	rtnl_unlock();
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	if (clean)
718*4882a593Smuzhiyun 		sock_put(&tfile->sk);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun 
tun_detach_all(struct net_device * dev)721*4882a593Smuzhiyun static void tun_detach_all(struct net_device *dev)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
724*4882a593Smuzhiyun 	struct tun_file *tfile, *tmp;
725*4882a593Smuzhiyun 	int i, n = tun->numqueues;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	for (i = 0; i < n; i++) {
728*4882a593Smuzhiyun 		tfile = rtnl_dereference(tun->tfiles[i]);
729*4882a593Smuzhiyun 		BUG_ON(!tfile);
730*4882a593Smuzhiyun 		tun_napi_disable(tfile);
731*4882a593Smuzhiyun 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
732*4882a593Smuzhiyun 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
733*4882a593Smuzhiyun 		RCU_INIT_POINTER(tfile->tun, NULL);
734*4882a593Smuzhiyun 		--tun->numqueues;
735*4882a593Smuzhiyun 	}
736*4882a593Smuzhiyun 	list_for_each_entry(tfile, &tun->disabled, next) {
737*4882a593Smuzhiyun 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
738*4882a593Smuzhiyun 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
739*4882a593Smuzhiyun 		RCU_INIT_POINTER(tfile->tun, NULL);
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun 	BUG_ON(tun->numqueues != 0);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	synchronize_net();
744*4882a593Smuzhiyun 	for (i = 0; i < n; i++) {
745*4882a593Smuzhiyun 		tfile = rtnl_dereference(tun->tfiles[i]);
746*4882a593Smuzhiyun 		tun_napi_del(tfile);
747*4882a593Smuzhiyun 		/* Drop read queue */
748*4882a593Smuzhiyun 		tun_queue_purge(tfile);
749*4882a593Smuzhiyun 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
750*4882a593Smuzhiyun 		sock_put(&tfile->sk);
751*4882a593Smuzhiyun 	}
752*4882a593Smuzhiyun 	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
753*4882a593Smuzhiyun 		tun_napi_del(tfile);
754*4882a593Smuzhiyun 		tun_enable_queue(tfile);
755*4882a593Smuzhiyun 		tun_queue_purge(tfile);
756*4882a593Smuzhiyun 		xdp_rxq_info_unreg(&tfile->xdp_rxq);
757*4882a593Smuzhiyun 		sock_put(&tfile->sk);
758*4882a593Smuzhiyun 	}
759*4882a593Smuzhiyun 	BUG_ON(tun->numdisabled != 0);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	if (tun->flags & IFF_PERSIST)
762*4882a593Smuzhiyun 		module_put(THIS_MODULE);
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun 
tun_attach(struct tun_struct * tun,struct file * file,bool skip_filter,bool napi,bool napi_frags,bool publish_tun)765*4882a593Smuzhiyun static int tun_attach(struct tun_struct *tun, struct file *file,
766*4882a593Smuzhiyun 		      bool skip_filter, bool napi, bool napi_frags,
767*4882a593Smuzhiyun 		      bool publish_tun)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 	struct tun_file *tfile = file->private_data;
770*4882a593Smuzhiyun 	struct net_device *dev = tun->dev;
771*4882a593Smuzhiyun 	int err;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
774*4882a593Smuzhiyun 	if (err < 0)
775*4882a593Smuzhiyun 		goto out;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	err = -EINVAL;
778*4882a593Smuzhiyun 	if (rtnl_dereference(tfile->tun) && !tfile->detached)
779*4882a593Smuzhiyun 		goto out;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	err = -EBUSY;
782*4882a593Smuzhiyun 	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
783*4882a593Smuzhiyun 		goto out;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	err = -E2BIG;
786*4882a593Smuzhiyun 	if (!tfile->detached &&
787*4882a593Smuzhiyun 	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
788*4882a593Smuzhiyun 		goto out;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	err = 0;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	/* Re-attach the filter to persist device */
793*4882a593Smuzhiyun 	if (!skip_filter && (tun->filter_attached == true)) {
794*4882a593Smuzhiyun 		lock_sock(tfile->socket.sk);
795*4882a593Smuzhiyun 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
796*4882a593Smuzhiyun 		release_sock(tfile->socket.sk);
797*4882a593Smuzhiyun 		if (!err)
798*4882a593Smuzhiyun 			goto out;
799*4882a593Smuzhiyun 	}
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	if (!tfile->detached &&
802*4882a593Smuzhiyun 	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
803*4882a593Smuzhiyun 			    GFP_KERNEL, tun_ptr_free)) {
804*4882a593Smuzhiyun 		err = -ENOMEM;
805*4882a593Smuzhiyun 		goto out;
806*4882a593Smuzhiyun 	}
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	tfile->queue_index = tun->numqueues;
809*4882a593Smuzhiyun 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	if (tfile->detached) {
812*4882a593Smuzhiyun 		/* Re-attach detached tfile, updating XDP queue_index */
813*4882a593Smuzhiyun 		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
816*4882a593Smuzhiyun 			tfile->xdp_rxq.queue_index = tfile->queue_index;
817*4882a593Smuzhiyun 	} else {
818*4882a593Smuzhiyun 		/* Setup XDP RX-queue info, for new tfile getting attached */
819*4882a593Smuzhiyun 		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
820*4882a593Smuzhiyun 				       tun->dev, tfile->queue_index);
821*4882a593Smuzhiyun 		if (err < 0)
822*4882a593Smuzhiyun 			goto out;
823*4882a593Smuzhiyun 		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
824*4882a593Smuzhiyun 						 MEM_TYPE_PAGE_SHARED, NULL);
825*4882a593Smuzhiyun 		if (err < 0) {
826*4882a593Smuzhiyun 			xdp_rxq_info_unreg(&tfile->xdp_rxq);
827*4882a593Smuzhiyun 			goto out;
828*4882a593Smuzhiyun 		}
829*4882a593Smuzhiyun 		err = 0;
830*4882a593Smuzhiyun 	}
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	if (tfile->detached) {
833*4882a593Smuzhiyun 		tun_enable_queue(tfile);
834*4882a593Smuzhiyun 		tun_napi_enable(tfile);
835*4882a593Smuzhiyun 	} else {
836*4882a593Smuzhiyun 		sock_hold(&tfile->sk);
837*4882a593Smuzhiyun 		tun_napi_init(tun, tfile, napi, napi_frags);
838*4882a593Smuzhiyun 	}
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	if (rtnl_dereference(tun->xdp_prog))
841*4882a593Smuzhiyun 		sock_set_flag(&tfile->sk, SOCK_XDP);
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	/* device is allowed to go away first, so no need to hold extra
844*4882a593Smuzhiyun 	 * refcnt.
845*4882a593Smuzhiyun 	 */
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	/* Publish tfile->tun and tun->tfiles only after we've fully
848*4882a593Smuzhiyun 	 * initialized tfile; otherwise we risk using half-initialized
849*4882a593Smuzhiyun 	 * object.
850*4882a593Smuzhiyun 	 */
851*4882a593Smuzhiyun 	if (publish_tun)
852*4882a593Smuzhiyun 		rcu_assign_pointer(tfile->tun, tun);
853*4882a593Smuzhiyun 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
854*4882a593Smuzhiyun 	tun->numqueues++;
855*4882a593Smuzhiyun 	tun_set_real_num_queues(tun);
856*4882a593Smuzhiyun out:
857*4882a593Smuzhiyun 	return err;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun 
tun_get(struct tun_file * tfile)860*4882a593Smuzhiyun static struct tun_struct *tun_get(struct tun_file *tfile)
861*4882a593Smuzhiyun {
862*4882a593Smuzhiyun 	struct tun_struct *tun;
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	rcu_read_lock();
865*4882a593Smuzhiyun 	tun = rcu_dereference(tfile->tun);
866*4882a593Smuzhiyun 	if (tun)
867*4882a593Smuzhiyun 		dev_hold(tun->dev);
868*4882a593Smuzhiyun 	rcu_read_unlock();
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	return tun;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun 
tun_put(struct tun_struct * tun)873*4882a593Smuzhiyun static void tun_put(struct tun_struct *tun)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	dev_put(tun->dev);
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun /* TAP filtering */
addr_hash_set(u32 * mask,const u8 * addr)879*4882a593Smuzhiyun static void addr_hash_set(u32 *mask, const u8 *addr)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun 	int n = ether_crc(ETH_ALEN, addr) >> 26;
882*4882a593Smuzhiyun 	mask[n >> 5] |= (1 << (n & 31));
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun 
addr_hash_test(const u32 * mask,const u8 * addr)885*4882a593Smuzhiyun static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun 	int n = ether_crc(ETH_ALEN, addr) >> 26;
888*4882a593Smuzhiyun 	return mask[n >> 5] & (1 << (n & 31));
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun 
update_filter(struct tap_filter * filter,void __user * arg)891*4882a593Smuzhiyun static int update_filter(struct tap_filter *filter, void __user *arg)
892*4882a593Smuzhiyun {
893*4882a593Smuzhiyun 	struct { u8 u[ETH_ALEN]; } *addr;
894*4882a593Smuzhiyun 	struct tun_filter uf;
895*4882a593Smuzhiyun 	int err, alen, n, nexact;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	if (copy_from_user(&uf, arg, sizeof(uf)))
898*4882a593Smuzhiyun 		return -EFAULT;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	if (!uf.count) {
901*4882a593Smuzhiyun 		/* Disabled */
902*4882a593Smuzhiyun 		filter->count = 0;
903*4882a593Smuzhiyun 		return 0;
904*4882a593Smuzhiyun 	}
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	alen = ETH_ALEN * uf.count;
907*4882a593Smuzhiyun 	addr = memdup_user(arg + sizeof(uf), alen);
908*4882a593Smuzhiyun 	if (IS_ERR(addr))
909*4882a593Smuzhiyun 		return PTR_ERR(addr);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	/* The filter is updated without holding any locks. Which is
912*4882a593Smuzhiyun 	 * perfectly safe. We disable it first and in the worst
913*4882a593Smuzhiyun 	 * case we'll accept a few undesired packets. */
914*4882a593Smuzhiyun 	filter->count = 0;
915*4882a593Smuzhiyun 	wmb();
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	/* Use first set of addresses as an exact filter */
918*4882a593Smuzhiyun 	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
919*4882a593Smuzhiyun 		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	nexact = n;
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	/* Remaining multicast addresses are hashed,
924*4882a593Smuzhiyun 	 * unicast will leave the filter disabled. */
925*4882a593Smuzhiyun 	memset(filter->mask, 0, sizeof(filter->mask));
926*4882a593Smuzhiyun 	for (; n < uf.count; n++) {
927*4882a593Smuzhiyun 		if (!is_multicast_ether_addr(addr[n].u)) {
928*4882a593Smuzhiyun 			err = 0; /* no filter */
929*4882a593Smuzhiyun 			goto free_addr;
930*4882a593Smuzhiyun 		}
931*4882a593Smuzhiyun 		addr_hash_set(filter->mask, addr[n].u);
932*4882a593Smuzhiyun 	}
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	/* For ALLMULTI just set the mask to all ones.
935*4882a593Smuzhiyun 	 * This overrides the mask populated above. */
936*4882a593Smuzhiyun 	if ((uf.flags & TUN_FLT_ALLMULTI))
937*4882a593Smuzhiyun 		memset(filter->mask, ~0, sizeof(filter->mask));
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	/* Now enable the filter */
940*4882a593Smuzhiyun 	wmb();
941*4882a593Smuzhiyun 	filter->count = nexact;
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	/* Return the number of exact filters */
944*4882a593Smuzhiyun 	err = nexact;
945*4882a593Smuzhiyun free_addr:
946*4882a593Smuzhiyun 	kfree(addr);
947*4882a593Smuzhiyun 	return err;
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun /* Returns: 0 - drop, !=0 - accept */
run_filter(struct tap_filter * filter,const struct sk_buff * skb)951*4882a593Smuzhiyun static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun 	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
954*4882a593Smuzhiyun 	 * at this point. */
955*4882a593Smuzhiyun 	struct ethhdr *eh = (struct ethhdr *) skb->data;
956*4882a593Smuzhiyun 	int i;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	/* Exact match */
959*4882a593Smuzhiyun 	for (i = 0; i < filter->count; i++)
960*4882a593Smuzhiyun 		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
961*4882a593Smuzhiyun 			return 1;
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	/* Inexact match (multicast only) */
964*4882a593Smuzhiyun 	if (is_multicast_ether_addr(eh->h_dest))
965*4882a593Smuzhiyun 		return addr_hash_test(filter->mask, eh->h_dest);
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	return 0;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun /*
971*4882a593Smuzhiyun  * Checks whether the packet is accepted or not.
972*4882a593Smuzhiyun  * Returns: 0 - drop, !=0 - accept
973*4882a593Smuzhiyun  */
check_filter(struct tap_filter * filter,const struct sk_buff * skb)974*4882a593Smuzhiyun static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun 	if (!filter->count)
977*4882a593Smuzhiyun 		return 1;
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	return run_filter(filter, skb);
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun /* Network device part of the driver */
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun static const struct ethtool_ops tun_ethtool_ops;
985*4882a593Smuzhiyun 
tun_net_init(struct net_device * dev)986*4882a593Smuzhiyun static int tun_net_init(struct net_device *dev)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
989*4882a593Smuzhiyun 	struct ifreq *ifr = tun->ifr;
990*4882a593Smuzhiyun 	int err;
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
993*4882a593Smuzhiyun 	if (!tun->pcpu_stats)
994*4882a593Smuzhiyun 		return -ENOMEM;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	spin_lock_init(&tun->lock);
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	err = security_tun_dev_alloc_security(&tun->security);
999*4882a593Smuzhiyun 	if (err < 0) {
1000*4882a593Smuzhiyun 		free_percpu(tun->pcpu_stats);
1001*4882a593Smuzhiyun 		return err;
1002*4882a593Smuzhiyun 	}
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	tun_flow_init(tun);
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1007*4882a593Smuzhiyun 			   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
1008*4882a593Smuzhiyun 			   NETIF_F_HW_VLAN_STAG_TX;
1009*4882a593Smuzhiyun 	dev->features = dev->hw_features | NETIF_F_LLTX;
1010*4882a593Smuzhiyun 	dev->vlan_features = dev->features &
1011*4882a593Smuzhiyun 			     ~(NETIF_F_HW_VLAN_CTAG_TX |
1012*4882a593Smuzhiyun 			       NETIF_F_HW_VLAN_STAG_TX);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	tun->flags = (tun->flags & ~TUN_FEATURES) |
1015*4882a593Smuzhiyun 		      (ifr->ifr_flags & TUN_FEATURES);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	INIT_LIST_HEAD(&tun->disabled);
1018*4882a593Smuzhiyun 	err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
1019*4882a593Smuzhiyun 			 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
1020*4882a593Smuzhiyun 	if (err < 0) {
1021*4882a593Smuzhiyun 		tun_flow_uninit(tun);
1022*4882a593Smuzhiyun 		security_tun_dev_free_security(tun->security);
1023*4882a593Smuzhiyun 		free_percpu(tun->pcpu_stats);
1024*4882a593Smuzhiyun 		return err;
1025*4882a593Smuzhiyun 	}
1026*4882a593Smuzhiyun 	return 0;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun /* Net device detach from fd. */
tun_net_uninit(struct net_device * dev)1030*4882a593Smuzhiyun static void tun_net_uninit(struct net_device *dev)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun 	tun_detach_all(dev);
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun /* Net device open. */
tun_net_open(struct net_device * dev)1036*4882a593Smuzhiyun static int tun_net_open(struct net_device *dev)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun 	netif_tx_start_all_queues(dev);
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	return 0;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun /* Net device close. */
tun_net_close(struct net_device * dev)1044*4882a593Smuzhiyun static int tun_net_close(struct net_device *dev)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun 	netif_tx_stop_all_queues(dev);
1047*4882a593Smuzhiyun 	return 0;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun /* Net device start xmit */
tun_automq_xmit(struct tun_struct * tun,struct sk_buff * skb)1051*4882a593Smuzhiyun static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun #ifdef CONFIG_RPS
1054*4882a593Smuzhiyun 	if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1055*4882a593Smuzhiyun 		/* Select queue was not called for the skbuff, so we extract the
1056*4882a593Smuzhiyun 		 * RPS hash and save it into the flow_table here.
1057*4882a593Smuzhiyun 		 */
1058*4882a593Smuzhiyun 		struct tun_flow_entry *e;
1059*4882a593Smuzhiyun 		__u32 rxhash;
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 		rxhash = __skb_get_hash_symmetric(skb);
1062*4882a593Smuzhiyun 		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1063*4882a593Smuzhiyun 		if (e)
1064*4882a593Smuzhiyun 			tun_flow_save_rps_rxhash(e, rxhash);
1065*4882a593Smuzhiyun 	}
1066*4882a593Smuzhiyun #endif
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun 
run_ebpf_filter(struct tun_struct * tun,struct sk_buff * skb,int len)1069*4882a593Smuzhiyun static unsigned int run_ebpf_filter(struct tun_struct *tun,
1070*4882a593Smuzhiyun 				    struct sk_buff *skb,
1071*4882a593Smuzhiyun 				    int len)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun 	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	if (prog)
1076*4882a593Smuzhiyun 		len = bpf_prog_run_clear_cb(prog->prog, skb);
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	return len;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun /* Net device start xmit */
tun_net_xmit(struct sk_buff * skb,struct net_device * dev)1082*4882a593Smuzhiyun static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1083*4882a593Smuzhiyun {
1084*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
1085*4882a593Smuzhiyun 	int txq = skb->queue_mapping;
1086*4882a593Smuzhiyun 	struct netdev_queue *queue;
1087*4882a593Smuzhiyun 	struct tun_file *tfile;
1088*4882a593Smuzhiyun 	int len = skb->len;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	rcu_read_lock();
1091*4882a593Smuzhiyun 	tfile = rcu_dereference(tun->tfiles[txq]);
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	/* Drop packet if interface is not attached */
1094*4882a593Smuzhiyun 	if (!tfile)
1095*4882a593Smuzhiyun 		goto drop;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	if (!rcu_dereference(tun->steering_prog))
1098*4882a593Smuzhiyun 		tun_automq_xmit(tun, skb);
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	/* Drop if the filter does not like it.
1103*4882a593Smuzhiyun 	 * This is a noop if the filter is disabled.
1104*4882a593Smuzhiyun 	 * Filter can be enabled only for the TAP devices. */
1105*4882a593Smuzhiyun 	if (!check_filter(&tun->txflt, skb))
1106*4882a593Smuzhiyun 		goto drop;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	if (tfile->socket.sk->sk_filter &&
1109*4882a593Smuzhiyun 	    sk_filter(tfile->socket.sk, skb))
1110*4882a593Smuzhiyun 		goto drop;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	len = run_ebpf_filter(tun, skb, len);
1113*4882a593Smuzhiyun 	if (len == 0 || pskb_trim(skb, len))
1114*4882a593Smuzhiyun 		goto drop;
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1117*4882a593Smuzhiyun 		goto drop;
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	skb_tx_timestamp(skb);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	/* Orphan the skb - required as we might hang on to it
1122*4882a593Smuzhiyun 	 * for indefinite time.
1123*4882a593Smuzhiyun 	 */
1124*4882a593Smuzhiyun 	skb_orphan(skb);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	nf_reset_ct(skb);
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	if (ptr_ring_produce(&tfile->tx_ring, skb))
1129*4882a593Smuzhiyun 		goto drop;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	/* NETIF_F_LLTX requires to do our own update of trans_start */
1132*4882a593Smuzhiyun 	queue = netdev_get_tx_queue(dev, txq);
1133*4882a593Smuzhiyun 	queue->trans_start = jiffies;
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	/* Notify and wake up reader process */
1136*4882a593Smuzhiyun 	if (tfile->flags & TUN_FASYNC)
1137*4882a593Smuzhiyun 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1138*4882a593Smuzhiyun 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	rcu_read_unlock();
1141*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun drop:
1144*4882a593Smuzhiyun 	this_cpu_inc(tun->pcpu_stats->tx_dropped);
1145*4882a593Smuzhiyun 	skb_tx_error(skb);
1146*4882a593Smuzhiyun 	kfree_skb(skb);
1147*4882a593Smuzhiyun 	rcu_read_unlock();
1148*4882a593Smuzhiyun 	return NET_XMIT_DROP;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun 
tun_net_mclist(struct net_device * dev)1151*4882a593Smuzhiyun static void tun_net_mclist(struct net_device *dev)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun 	/*
1154*4882a593Smuzhiyun 	 * This callback is supposed to deal with mc filter in
1155*4882a593Smuzhiyun 	 * _rx_ path and has nothing to do with the _tx_ path.
1156*4882a593Smuzhiyun 	 * In rx path we always accept everything userspace gives us.
1157*4882a593Smuzhiyun 	 */
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun 
tun_net_fix_features(struct net_device * dev,netdev_features_t features)1160*4882a593Smuzhiyun static netdev_features_t tun_net_fix_features(struct net_device *dev,
1161*4882a593Smuzhiyun 	netdev_features_t features)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun 
tun_set_headroom(struct net_device * dev,int new_hr)1168*4882a593Smuzhiyun static void tun_set_headroom(struct net_device *dev, int new_hr)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	if (new_hr < NET_SKB_PAD)
1173*4882a593Smuzhiyun 		new_hr = NET_SKB_PAD;
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	tun->align = new_hr;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun static void
tun_net_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1179*4882a593Smuzhiyun tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1180*4882a593Smuzhiyun {
1181*4882a593Smuzhiyun 	u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
1182*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
1183*4882a593Smuzhiyun 	struct tun_pcpu_stats *p;
1184*4882a593Smuzhiyun 	int i;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	for_each_possible_cpu(i) {
1187*4882a593Smuzhiyun 		u64 rxpackets, rxbytes, txpackets, txbytes;
1188*4882a593Smuzhiyun 		unsigned int start;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 		p = per_cpu_ptr(tun->pcpu_stats, i);
1191*4882a593Smuzhiyun 		do {
1192*4882a593Smuzhiyun 			start = u64_stats_fetch_begin(&p->syncp);
1193*4882a593Smuzhiyun 			rxpackets	= u64_stats_read(&p->rx_packets);
1194*4882a593Smuzhiyun 			rxbytes		= u64_stats_read(&p->rx_bytes);
1195*4882a593Smuzhiyun 			txpackets	= u64_stats_read(&p->tx_packets);
1196*4882a593Smuzhiyun 			txbytes		= u64_stats_read(&p->tx_bytes);
1197*4882a593Smuzhiyun 		} while (u64_stats_fetch_retry(&p->syncp, start));
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 		stats->rx_packets	+= rxpackets;
1200*4882a593Smuzhiyun 		stats->rx_bytes		+= rxbytes;
1201*4882a593Smuzhiyun 		stats->tx_packets	+= txpackets;
1202*4882a593Smuzhiyun 		stats->tx_bytes		+= txbytes;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 		/* u32 counters */
1205*4882a593Smuzhiyun 		rx_dropped	+= p->rx_dropped;
1206*4882a593Smuzhiyun 		rx_frame_errors	+= p->rx_frame_errors;
1207*4882a593Smuzhiyun 		tx_dropped	+= p->tx_dropped;
1208*4882a593Smuzhiyun 	}
1209*4882a593Smuzhiyun 	stats->rx_dropped  = rx_dropped;
1210*4882a593Smuzhiyun 	stats->rx_frame_errors = rx_frame_errors;
1211*4882a593Smuzhiyun 	stats->tx_dropped = tx_dropped;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun 
tun_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)1214*4882a593Smuzhiyun static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1215*4882a593Smuzhiyun 		       struct netlink_ext_ack *extack)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
1218*4882a593Smuzhiyun 	struct tun_file *tfile;
1219*4882a593Smuzhiyun 	struct bpf_prog *old_prog;
1220*4882a593Smuzhiyun 	int i;
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	old_prog = rtnl_dereference(tun->xdp_prog);
1223*4882a593Smuzhiyun 	rcu_assign_pointer(tun->xdp_prog, prog);
1224*4882a593Smuzhiyun 	if (old_prog)
1225*4882a593Smuzhiyun 		bpf_prog_put(old_prog);
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	for (i = 0; i < tun->numqueues; i++) {
1228*4882a593Smuzhiyun 		tfile = rtnl_dereference(tun->tfiles[i]);
1229*4882a593Smuzhiyun 		if (prog)
1230*4882a593Smuzhiyun 			sock_set_flag(&tfile->sk, SOCK_XDP);
1231*4882a593Smuzhiyun 		else
1232*4882a593Smuzhiyun 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1233*4882a593Smuzhiyun 	}
1234*4882a593Smuzhiyun 	list_for_each_entry(tfile, &tun->disabled, next) {
1235*4882a593Smuzhiyun 		if (prog)
1236*4882a593Smuzhiyun 			sock_set_flag(&tfile->sk, SOCK_XDP);
1237*4882a593Smuzhiyun 		else
1238*4882a593Smuzhiyun 			sock_reset_flag(&tfile->sk, SOCK_XDP);
1239*4882a593Smuzhiyun 	}
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	return 0;
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun 
tun_xdp(struct net_device * dev,struct netdev_bpf * xdp)1244*4882a593Smuzhiyun static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1245*4882a593Smuzhiyun {
1246*4882a593Smuzhiyun 	switch (xdp->command) {
1247*4882a593Smuzhiyun 	case XDP_SETUP_PROG:
1248*4882a593Smuzhiyun 		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1249*4882a593Smuzhiyun 	default:
1250*4882a593Smuzhiyun 		return -EINVAL;
1251*4882a593Smuzhiyun 	}
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun 
tun_net_change_carrier(struct net_device * dev,bool new_carrier)1254*4882a593Smuzhiyun static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1255*4882a593Smuzhiyun {
1256*4882a593Smuzhiyun 	if (new_carrier) {
1257*4882a593Smuzhiyun 		struct tun_struct *tun = netdev_priv(dev);
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 		if (!tun->numqueues)
1260*4882a593Smuzhiyun 			return -EPERM;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 		netif_carrier_on(dev);
1263*4882a593Smuzhiyun 	} else {
1264*4882a593Smuzhiyun 		netif_carrier_off(dev);
1265*4882a593Smuzhiyun 	}
1266*4882a593Smuzhiyun 	return 0;
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun static const struct net_device_ops tun_netdev_ops = {
1270*4882a593Smuzhiyun 	.ndo_init		= tun_net_init,
1271*4882a593Smuzhiyun 	.ndo_uninit		= tun_net_uninit,
1272*4882a593Smuzhiyun 	.ndo_open		= tun_net_open,
1273*4882a593Smuzhiyun 	.ndo_stop		= tun_net_close,
1274*4882a593Smuzhiyun 	.ndo_start_xmit		= tun_net_xmit,
1275*4882a593Smuzhiyun 	.ndo_fix_features	= tun_net_fix_features,
1276*4882a593Smuzhiyun 	.ndo_select_queue	= tun_select_queue,
1277*4882a593Smuzhiyun 	.ndo_set_rx_headroom	= tun_set_headroom,
1278*4882a593Smuzhiyun 	.ndo_get_stats64	= tun_net_get_stats64,
1279*4882a593Smuzhiyun 	.ndo_change_carrier	= tun_net_change_carrier,
1280*4882a593Smuzhiyun };
1281*4882a593Smuzhiyun 
__tun_xdp_flush_tfile(struct tun_file * tfile)1282*4882a593Smuzhiyun static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun 	/* Notify and wake up reader process */
1285*4882a593Smuzhiyun 	if (tfile->flags & TUN_FASYNC)
1286*4882a593Smuzhiyun 		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1287*4882a593Smuzhiyun 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun 
tun_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)1290*4882a593Smuzhiyun static int tun_xdp_xmit(struct net_device *dev, int n,
1291*4882a593Smuzhiyun 			struct xdp_frame **frames, u32 flags)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
1294*4882a593Smuzhiyun 	struct tun_file *tfile;
1295*4882a593Smuzhiyun 	u32 numqueues;
1296*4882a593Smuzhiyun 	int drops = 0;
1297*4882a593Smuzhiyun 	int cnt = n;
1298*4882a593Smuzhiyun 	int i;
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1301*4882a593Smuzhiyun 		return -EINVAL;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	rcu_read_lock();
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun resample:
1306*4882a593Smuzhiyun 	numqueues = READ_ONCE(tun->numqueues);
1307*4882a593Smuzhiyun 	if (!numqueues) {
1308*4882a593Smuzhiyun 		rcu_read_unlock();
1309*4882a593Smuzhiyun 		return -ENXIO; /* Caller will free/return all frames */
1310*4882a593Smuzhiyun 	}
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1313*4882a593Smuzhiyun 					    numqueues]);
1314*4882a593Smuzhiyun 	if (unlikely(!tfile))
1315*4882a593Smuzhiyun 		goto resample;
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	spin_lock(&tfile->tx_ring.producer_lock);
1318*4882a593Smuzhiyun 	for (i = 0; i < n; i++) {
1319*4882a593Smuzhiyun 		struct xdp_frame *xdp = frames[i];
1320*4882a593Smuzhiyun 		/* Encode the XDP flag into lowest bit for consumer to differ
1321*4882a593Smuzhiyun 		 * XDP buffer from sk_buff.
1322*4882a593Smuzhiyun 		 */
1323*4882a593Smuzhiyun 		void *frame = tun_xdp_to_ptr(xdp);
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1326*4882a593Smuzhiyun 			this_cpu_inc(tun->pcpu_stats->tx_dropped);
1327*4882a593Smuzhiyun 			xdp_return_frame_rx_napi(xdp);
1328*4882a593Smuzhiyun 			drops++;
1329*4882a593Smuzhiyun 		}
1330*4882a593Smuzhiyun 	}
1331*4882a593Smuzhiyun 	spin_unlock(&tfile->tx_ring.producer_lock);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	if (flags & XDP_XMIT_FLUSH)
1334*4882a593Smuzhiyun 		__tun_xdp_flush_tfile(tfile);
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	rcu_read_unlock();
1337*4882a593Smuzhiyun 	return cnt - drops;
1338*4882a593Smuzhiyun }
1339*4882a593Smuzhiyun 
tun_xdp_tx(struct net_device * dev,struct xdp_buff * xdp)1340*4882a593Smuzhiyun static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1341*4882a593Smuzhiyun {
1342*4882a593Smuzhiyun 	struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 	if (unlikely(!frame))
1345*4882a593Smuzhiyun 		return -EOVERFLOW;
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun static const struct net_device_ops tap_netdev_ops = {
1351*4882a593Smuzhiyun 	.ndo_init		= tun_net_init,
1352*4882a593Smuzhiyun 	.ndo_uninit		= tun_net_uninit,
1353*4882a593Smuzhiyun 	.ndo_open		= tun_net_open,
1354*4882a593Smuzhiyun 	.ndo_stop		= tun_net_close,
1355*4882a593Smuzhiyun 	.ndo_start_xmit		= tun_net_xmit,
1356*4882a593Smuzhiyun 	.ndo_fix_features	= tun_net_fix_features,
1357*4882a593Smuzhiyun 	.ndo_set_rx_mode	= tun_net_mclist,
1358*4882a593Smuzhiyun 	.ndo_set_mac_address	= eth_mac_addr,
1359*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
1360*4882a593Smuzhiyun 	.ndo_select_queue	= tun_select_queue,
1361*4882a593Smuzhiyun 	.ndo_features_check	= passthru_features_check,
1362*4882a593Smuzhiyun 	.ndo_set_rx_headroom	= tun_set_headroom,
1363*4882a593Smuzhiyun 	.ndo_get_stats64	= tun_net_get_stats64,
1364*4882a593Smuzhiyun 	.ndo_bpf		= tun_xdp,
1365*4882a593Smuzhiyun 	.ndo_xdp_xmit		= tun_xdp_xmit,
1366*4882a593Smuzhiyun 	.ndo_change_carrier	= tun_net_change_carrier,
1367*4882a593Smuzhiyun };
1368*4882a593Smuzhiyun 
tun_flow_init(struct tun_struct * tun)1369*4882a593Smuzhiyun static void tun_flow_init(struct tun_struct *tun)
1370*4882a593Smuzhiyun {
1371*4882a593Smuzhiyun 	int i;
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun 	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1374*4882a593Smuzhiyun 		INIT_HLIST_HEAD(&tun->flows[i]);
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	tun->ageing_time = TUN_FLOW_EXPIRE;
1377*4882a593Smuzhiyun 	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1378*4882a593Smuzhiyun 	mod_timer(&tun->flow_gc_timer,
1379*4882a593Smuzhiyun 		  round_jiffies_up(jiffies + tun->ageing_time));
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun 
tun_flow_uninit(struct tun_struct * tun)1382*4882a593Smuzhiyun static void tun_flow_uninit(struct tun_struct *tun)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun 	del_timer_sync(&tun->flow_gc_timer);
1385*4882a593Smuzhiyun 	tun_flow_flush(tun);
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun #define MIN_MTU 68
1389*4882a593Smuzhiyun #define MAX_MTU 65535
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun /* Initialize net device. */
tun_net_initialize(struct net_device * dev)1392*4882a593Smuzhiyun static void tun_net_initialize(struct net_device *dev)
1393*4882a593Smuzhiyun {
1394*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	switch (tun->flags & TUN_TYPE_MASK) {
1397*4882a593Smuzhiyun 	case IFF_TUN:
1398*4882a593Smuzhiyun 		dev->netdev_ops = &tun_netdev_ops;
1399*4882a593Smuzhiyun 		dev->header_ops = &ip_tunnel_header_ops;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 		/* Point-to-Point TUN Device */
1402*4882a593Smuzhiyun 		dev->hard_header_len = 0;
1403*4882a593Smuzhiyun 		dev->addr_len = 0;
1404*4882a593Smuzhiyun 		dev->mtu = 1500;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 		/* Zero header length */
1407*4882a593Smuzhiyun 		dev->type = ARPHRD_NONE;
1408*4882a593Smuzhiyun 		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1409*4882a593Smuzhiyun 		break;
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	case IFF_TAP:
1412*4882a593Smuzhiyun 		dev->netdev_ops = &tap_netdev_ops;
1413*4882a593Smuzhiyun 		/* Ethernet TAP Device */
1414*4882a593Smuzhiyun 		ether_setup(dev);
1415*4882a593Smuzhiyun 		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1416*4882a593Smuzhiyun 		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 		eth_hw_addr_random(dev);
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 		break;
1421*4882a593Smuzhiyun 	}
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	dev->min_mtu = MIN_MTU;
1424*4882a593Smuzhiyun 	dev->max_mtu = MAX_MTU - dev->hard_header_len;
1425*4882a593Smuzhiyun }
1426*4882a593Smuzhiyun 
tun_sock_writeable(struct tun_struct * tun,struct tun_file * tfile)1427*4882a593Smuzhiyun static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1428*4882a593Smuzhiyun {
1429*4882a593Smuzhiyun 	struct sock *sk = tfile->socket.sk;
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun /* Character device part */
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun /* Poll */
tun_chr_poll(struct file * file,poll_table * wait)1437*4882a593Smuzhiyun static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1438*4882a593Smuzhiyun {
1439*4882a593Smuzhiyun 	struct tun_file *tfile = file->private_data;
1440*4882a593Smuzhiyun 	struct tun_struct *tun = tun_get(tfile);
1441*4882a593Smuzhiyun 	struct sock *sk;
1442*4882a593Smuzhiyun 	__poll_t mask = 0;
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 	if (!tun)
1445*4882a593Smuzhiyun 		return EPOLLERR;
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	sk = tfile->socket.sk;
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	poll_wait(file, sk_sleep(sk), wait);
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	if (!ptr_ring_empty(&tfile->tx_ring))
1452*4882a593Smuzhiyun 		mask |= EPOLLIN | EPOLLRDNORM;
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1455*4882a593Smuzhiyun 	 * guarantee EPOLLOUT to be raised by either here or
1456*4882a593Smuzhiyun 	 * tun_sock_write_space(). Then process could get notification
1457*4882a593Smuzhiyun 	 * after it writes to a down device and meets -EIO.
1458*4882a593Smuzhiyun 	 */
1459*4882a593Smuzhiyun 	if (tun_sock_writeable(tun, tfile) ||
1460*4882a593Smuzhiyun 	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1461*4882a593Smuzhiyun 	     tun_sock_writeable(tun, tfile)))
1462*4882a593Smuzhiyun 		mask |= EPOLLOUT | EPOLLWRNORM;
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	if (tun->dev->reg_state != NETREG_REGISTERED)
1465*4882a593Smuzhiyun 		mask = EPOLLERR;
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	tun_put(tun);
1468*4882a593Smuzhiyun 	return mask;
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun 
tun_napi_alloc_frags(struct tun_file * tfile,size_t len,const struct iov_iter * it)1471*4882a593Smuzhiyun static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1472*4882a593Smuzhiyun 					    size_t len,
1473*4882a593Smuzhiyun 					    const struct iov_iter *it)
1474*4882a593Smuzhiyun {
1475*4882a593Smuzhiyun 	struct sk_buff *skb;
1476*4882a593Smuzhiyun 	size_t linear;
1477*4882a593Smuzhiyun 	int err;
1478*4882a593Smuzhiyun 	int i;
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
1481*4882a593Smuzhiyun 	    len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
1482*4882a593Smuzhiyun 		return ERR_PTR(-EMSGSIZE);
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	local_bh_disable();
1485*4882a593Smuzhiyun 	skb = napi_get_frags(&tfile->napi);
1486*4882a593Smuzhiyun 	local_bh_enable();
1487*4882a593Smuzhiyun 	if (!skb)
1488*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	linear = iov_iter_single_seg_count(it);
1491*4882a593Smuzhiyun 	err = __skb_grow(skb, linear);
1492*4882a593Smuzhiyun 	if (err)
1493*4882a593Smuzhiyun 		goto free;
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 	skb->len = len;
1496*4882a593Smuzhiyun 	skb->data_len = len - linear;
1497*4882a593Smuzhiyun 	skb->truesize += skb->data_len;
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	for (i = 1; i < it->nr_segs; i++) {
1500*4882a593Smuzhiyun 		size_t fragsz = it->iov[i].iov_len;
1501*4882a593Smuzhiyun 		struct page *page;
1502*4882a593Smuzhiyun 		void *frag;
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 		if (fragsz == 0 || fragsz > PAGE_SIZE) {
1505*4882a593Smuzhiyun 			err = -EINVAL;
1506*4882a593Smuzhiyun 			goto free;
1507*4882a593Smuzhiyun 		}
1508*4882a593Smuzhiyun 		frag = netdev_alloc_frag(fragsz);
1509*4882a593Smuzhiyun 		if (!frag) {
1510*4882a593Smuzhiyun 			err = -ENOMEM;
1511*4882a593Smuzhiyun 			goto free;
1512*4882a593Smuzhiyun 		}
1513*4882a593Smuzhiyun 		page = virt_to_head_page(frag);
1514*4882a593Smuzhiyun 		skb_fill_page_desc(skb, i - 1, page,
1515*4882a593Smuzhiyun 				   frag - page_address(page), fragsz);
1516*4882a593Smuzhiyun 	}
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	return skb;
1519*4882a593Smuzhiyun free:
1520*4882a593Smuzhiyun 	/* frees skb and all frags allocated with napi_alloc_frag() */
1521*4882a593Smuzhiyun 	napi_free_frags(&tfile->napi);
1522*4882a593Smuzhiyun 	return ERR_PTR(err);
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun /* prepad is the amount to reserve at front.  len is length after that.
1526*4882a593Smuzhiyun  * linear is a hint as to how much to copy (usually headers). */
tun_alloc_skb(struct tun_file * tfile,size_t prepad,size_t len,size_t linear,int noblock)1527*4882a593Smuzhiyun static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1528*4882a593Smuzhiyun 				     size_t prepad, size_t len,
1529*4882a593Smuzhiyun 				     size_t linear, int noblock)
1530*4882a593Smuzhiyun {
1531*4882a593Smuzhiyun 	struct sock *sk = tfile->socket.sk;
1532*4882a593Smuzhiyun 	struct sk_buff *skb;
1533*4882a593Smuzhiyun 	int err;
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	/* Under a page?  Don't bother with paged skb. */
1536*4882a593Smuzhiyun 	if (prepad + len < PAGE_SIZE || !linear)
1537*4882a593Smuzhiyun 		linear = len;
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1540*4882a593Smuzhiyun 				   &err, 0);
1541*4882a593Smuzhiyun 	if (!skb)
1542*4882a593Smuzhiyun 		return ERR_PTR(err);
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	skb_reserve(skb, prepad);
1545*4882a593Smuzhiyun 	skb_put(skb, linear);
1546*4882a593Smuzhiyun 	skb->data_len = len - linear;
1547*4882a593Smuzhiyun 	skb->len += len - linear;
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	return skb;
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun 
tun_rx_batched(struct tun_struct * tun,struct tun_file * tfile,struct sk_buff * skb,int more)1552*4882a593Smuzhiyun static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1553*4882a593Smuzhiyun 			   struct sk_buff *skb, int more)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1556*4882a593Smuzhiyun 	struct sk_buff_head process_queue;
1557*4882a593Smuzhiyun 	u32 rx_batched = tun->rx_batched;
1558*4882a593Smuzhiyun 	bool rcv = false;
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	if (!rx_batched || (!more && skb_queue_empty(queue))) {
1561*4882a593Smuzhiyun 		local_bh_disable();
1562*4882a593Smuzhiyun 		skb_record_rx_queue(skb, tfile->queue_index);
1563*4882a593Smuzhiyun 		netif_receive_skb(skb);
1564*4882a593Smuzhiyun 		local_bh_enable();
1565*4882a593Smuzhiyun 		return;
1566*4882a593Smuzhiyun 	}
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun 	spin_lock(&queue->lock);
1569*4882a593Smuzhiyun 	if (!more || skb_queue_len(queue) == rx_batched) {
1570*4882a593Smuzhiyun 		__skb_queue_head_init(&process_queue);
1571*4882a593Smuzhiyun 		skb_queue_splice_tail_init(queue, &process_queue);
1572*4882a593Smuzhiyun 		rcv = true;
1573*4882a593Smuzhiyun 	} else {
1574*4882a593Smuzhiyun 		__skb_queue_tail(queue, skb);
1575*4882a593Smuzhiyun 	}
1576*4882a593Smuzhiyun 	spin_unlock(&queue->lock);
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 	if (rcv) {
1579*4882a593Smuzhiyun 		struct sk_buff *nskb;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 		local_bh_disable();
1582*4882a593Smuzhiyun 		while ((nskb = __skb_dequeue(&process_queue))) {
1583*4882a593Smuzhiyun 			skb_record_rx_queue(nskb, tfile->queue_index);
1584*4882a593Smuzhiyun 			netif_receive_skb(nskb);
1585*4882a593Smuzhiyun 		}
1586*4882a593Smuzhiyun 		skb_record_rx_queue(skb, tfile->queue_index);
1587*4882a593Smuzhiyun 		netif_receive_skb(skb);
1588*4882a593Smuzhiyun 		local_bh_enable();
1589*4882a593Smuzhiyun 	}
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun 
tun_can_build_skb(struct tun_struct * tun,struct tun_file * tfile,int len,int noblock,bool zerocopy)1592*4882a593Smuzhiyun static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1593*4882a593Smuzhiyun 			      int len, int noblock, bool zerocopy)
1594*4882a593Smuzhiyun {
1595*4882a593Smuzhiyun 	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1596*4882a593Smuzhiyun 		return false;
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1599*4882a593Smuzhiyun 		return false;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	if (!noblock)
1602*4882a593Smuzhiyun 		return false;
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	if (zerocopy)
1605*4882a593Smuzhiyun 		return false;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
1608*4882a593Smuzhiyun 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1609*4882a593Smuzhiyun 		return false;
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 	return true;
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun 
__tun_build_skb(struct tun_file * tfile,struct page_frag * alloc_frag,char * buf,int buflen,int len,int pad)1614*4882a593Smuzhiyun static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1615*4882a593Smuzhiyun 				       struct page_frag *alloc_frag, char *buf,
1616*4882a593Smuzhiyun 				       int buflen, int len, int pad)
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun 	struct sk_buff *skb = build_skb(buf, buflen);
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	if (!skb)
1621*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	skb_reserve(skb, pad);
1624*4882a593Smuzhiyun 	skb_put(skb, len);
1625*4882a593Smuzhiyun 	skb_set_owner_w(skb, tfile->socket.sk);
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 	get_page(alloc_frag->page);
1628*4882a593Smuzhiyun 	alloc_frag->offset += buflen;
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	return skb;
1631*4882a593Smuzhiyun }
1632*4882a593Smuzhiyun 
tun_xdp_act(struct tun_struct * tun,struct bpf_prog * xdp_prog,struct xdp_buff * xdp,u32 act)1633*4882a593Smuzhiyun static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1634*4882a593Smuzhiyun 		       struct xdp_buff *xdp, u32 act)
1635*4882a593Smuzhiyun {
1636*4882a593Smuzhiyun 	int err;
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 	switch (act) {
1639*4882a593Smuzhiyun 	case XDP_REDIRECT:
1640*4882a593Smuzhiyun 		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1641*4882a593Smuzhiyun 		if (err)
1642*4882a593Smuzhiyun 			return err;
1643*4882a593Smuzhiyun 		break;
1644*4882a593Smuzhiyun 	case XDP_TX:
1645*4882a593Smuzhiyun 		err = tun_xdp_tx(tun->dev, xdp);
1646*4882a593Smuzhiyun 		if (err < 0)
1647*4882a593Smuzhiyun 			return err;
1648*4882a593Smuzhiyun 		break;
1649*4882a593Smuzhiyun 	case XDP_PASS:
1650*4882a593Smuzhiyun 		break;
1651*4882a593Smuzhiyun 	default:
1652*4882a593Smuzhiyun 		bpf_warn_invalid_xdp_action(act);
1653*4882a593Smuzhiyun 		fallthrough;
1654*4882a593Smuzhiyun 	case XDP_ABORTED:
1655*4882a593Smuzhiyun 		trace_xdp_exception(tun->dev, xdp_prog, act);
1656*4882a593Smuzhiyun 		fallthrough;
1657*4882a593Smuzhiyun 	case XDP_DROP:
1658*4882a593Smuzhiyun 		this_cpu_inc(tun->pcpu_stats->rx_dropped);
1659*4882a593Smuzhiyun 		break;
1660*4882a593Smuzhiyun 	}
1661*4882a593Smuzhiyun 
1662*4882a593Smuzhiyun 	return act;
1663*4882a593Smuzhiyun }
1664*4882a593Smuzhiyun 
tun_build_skb(struct tun_struct * tun,struct tun_file * tfile,struct iov_iter * from,struct virtio_net_hdr * hdr,int len,int * skb_xdp)1665*4882a593Smuzhiyun static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1666*4882a593Smuzhiyun 				     struct tun_file *tfile,
1667*4882a593Smuzhiyun 				     struct iov_iter *from,
1668*4882a593Smuzhiyun 				     struct virtio_net_hdr *hdr,
1669*4882a593Smuzhiyun 				     int len, int *skb_xdp)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun 	struct page_frag *alloc_frag = &current->task_frag;
1672*4882a593Smuzhiyun 	struct bpf_prog *xdp_prog;
1673*4882a593Smuzhiyun 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1674*4882a593Smuzhiyun 	char *buf;
1675*4882a593Smuzhiyun 	size_t copied;
1676*4882a593Smuzhiyun 	int pad = TUN_RX_PAD;
1677*4882a593Smuzhiyun 	int err = 0;
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	rcu_read_lock();
1680*4882a593Smuzhiyun 	xdp_prog = rcu_dereference(tun->xdp_prog);
1681*4882a593Smuzhiyun 	if (xdp_prog)
1682*4882a593Smuzhiyun 		pad += XDP_PACKET_HEADROOM;
1683*4882a593Smuzhiyun 	buflen += SKB_DATA_ALIGN(len + pad);
1684*4882a593Smuzhiyun 	rcu_read_unlock();
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1687*4882a593Smuzhiyun 	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1688*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1691*4882a593Smuzhiyun 	copied = copy_page_from_iter(alloc_frag->page,
1692*4882a593Smuzhiyun 				     alloc_frag->offset + pad,
1693*4882a593Smuzhiyun 				     len, from);
1694*4882a593Smuzhiyun 	if (copied != len)
1695*4882a593Smuzhiyun 		return ERR_PTR(-EFAULT);
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	/* There's a small window that XDP may be set after the check
1698*4882a593Smuzhiyun 	 * of xdp_prog above, this should be rare and for simplicity
1699*4882a593Smuzhiyun 	 * we do XDP on skb in case the headroom is not enough.
1700*4882a593Smuzhiyun 	 */
1701*4882a593Smuzhiyun 	if (hdr->gso_type || !xdp_prog) {
1702*4882a593Smuzhiyun 		*skb_xdp = 1;
1703*4882a593Smuzhiyun 		return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1704*4882a593Smuzhiyun 				       pad);
1705*4882a593Smuzhiyun 	}
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	*skb_xdp = 0;
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	local_bh_disable();
1710*4882a593Smuzhiyun 	rcu_read_lock();
1711*4882a593Smuzhiyun 	xdp_prog = rcu_dereference(tun->xdp_prog);
1712*4882a593Smuzhiyun 	if (xdp_prog) {
1713*4882a593Smuzhiyun 		struct xdp_buff xdp;
1714*4882a593Smuzhiyun 		u32 act;
1715*4882a593Smuzhiyun 
1716*4882a593Smuzhiyun 		xdp.data_hard_start = buf;
1717*4882a593Smuzhiyun 		xdp.data = buf + pad;
1718*4882a593Smuzhiyun 		xdp_set_data_meta_invalid(&xdp);
1719*4882a593Smuzhiyun 		xdp.data_end = xdp.data + len;
1720*4882a593Smuzhiyun 		xdp.rxq = &tfile->xdp_rxq;
1721*4882a593Smuzhiyun 		xdp.frame_sz = buflen;
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
1724*4882a593Smuzhiyun 		if (act == XDP_REDIRECT || act == XDP_TX) {
1725*4882a593Smuzhiyun 			get_page(alloc_frag->page);
1726*4882a593Smuzhiyun 			alloc_frag->offset += buflen;
1727*4882a593Smuzhiyun 		}
1728*4882a593Smuzhiyun 		err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1729*4882a593Smuzhiyun 		if (err < 0) {
1730*4882a593Smuzhiyun 			if (act == XDP_REDIRECT || act == XDP_TX)
1731*4882a593Smuzhiyun 				put_page(alloc_frag->page);
1732*4882a593Smuzhiyun 			goto out;
1733*4882a593Smuzhiyun 		}
1734*4882a593Smuzhiyun 
1735*4882a593Smuzhiyun 		if (err == XDP_REDIRECT)
1736*4882a593Smuzhiyun 			xdp_do_flush();
1737*4882a593Smuzhiyun 		if (err != XDP_PASS)
1738*4882a593Smuzhiyun 			goto out;
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 		pad = xdp.data - xdp.data_hard_start;
1741*4882a593Smuzhiyun 		len = xdp.data_end - xdp.data;
1742*4882a593Smuzhiyun 	}
1743*4882a593Smuzhiyun 	rcu_read_unlock();
1744*4882a593Smuzhiyun 	local_bh_enable();
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun 	return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun out:
1749*4882a593Smuzhiyun 	rcu_read_unlock();
1750*4882a593Smuzhiyun 	local_bh_enable();
1751*4882a593Smuzhiyun 	return NULL;
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun /* Get packet from user space buffer */
tun_get_user(struct tun_struct * tun,struct tun_file * tfile,void * msg_control,struct iov_iter * from,int noblock,bool more)1755*4882a593Smuzhiyun static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1756*4882a593Smuzhiyun 			    void *msg_control, struct iov_iter *from,
1757*4882a593Smuzhiyun 			    int noblock, bool more)
1758*4882a593Smuzhiyun {
1759*4882a593Smuzhiyun 	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1760*4882a593Smuzhiyun 	struct sk_buff *skb;
1761*4882a593Smuzhiyun 	size_t total_len = iov_iter_count(from);
1762*4882a593Smuzhiyun 	size_t len = total_len, align = tun->align, linear;
1763*4882a593Smuzhiyun 	struct virtio_net_hdr gso = { 0 };
1764*4882a593Smuzhiyun 	struct tun_pcpu_stats *stats;
1765*4882a593Smuzhiyun 	int good_linear;
1766*4882a593Smuzhiyun 	int copylen;
1767*4882a593Smuzhiyun 	bool zerocopy = false;
1768*4882a593Smuzhiyun 	int err;
1769*4882a593Smuzhiyun 	u32 rxhash = 0;
1770*4882a593Smuzhiyun 	int skb_xdp = 1;
1771*4882a593Smuzhiyun 	bool frags = tun_napi_frags_enabled(tfile);
1772*4882a593Smuzhiyun 
1773*4882a593Smuzhiyun 	if (!(tun->flags & IFF_NO_PI)) {
1774*4882a593Smuzhiyun 		if (len < sizeof(pi))
1775*4882a593Smuzhiyun 			return -EINVAL;
1776*4882a593Smuzhiyun 		len -= sizeof(pi);
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 		if (!copy_from_iter_full(&pi, sizeof(pi), from))
1779*4882a593Smuzhiyun 			return -EFAULT;
1780*4882a593Smuzhiyun 	}
1781*4882a593Smuzhiyun 
1782*4882a593Smuzhiyun 	if (tun->flags & IFF_VNET_HDR) {
1783*4882a593Smuzhiyun 		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 		if (len < vnet_hdr_sz)
1786*4882a593Smuzhiyun 			return -EINVAL;
1787*4882a593Smuzhiyun 		len -= vnet_hdr_sz;
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1790*4882a593Smuzhiyun 			return -EFAULT;
1791*4882a593Smuzhiyun 
1792*4882a593Smuzhiyun 		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1793*4882a593Smuzhiyun 		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1794*4882a593Smuzhiyun 			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1797*4882a593Smuzhiyun 			return -EINVAL;
1798*4882a593Smuzhiyun 		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1799*4882a593Smuzhiyun 	}
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1802*4882a593Smuzhiyun 		align += NET_IP_ALIGN;
1803*4882a593Smuzhiyun 		if (unlikely(len < ETH_HLEN ||
1804*4882a593Smuzhiyun 			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1805*4882a593Smuzhiyun 			return -EINVAL;
1806*4882a593Smuzhiyun 	}
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 	good_linear = SKB_MAX_HEAD(align);
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	if (msg_control) {
1811*4882a593Smuzhiyun 		struct iov_iter i = *from;
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 		/* There are 256 bytes to be copied in skb, so there is
1814*4882a593Smuzhiyun 		 * enough room for skb expand head in case it is used.
1815*4882a593Smuzhiyun 		 * The rest of the buffer is mapped from userspace.
1816*4882a593Smuzhiyun 		 */
1817*4882a593Smuzhiyun 		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1818*4882a593Smuzhiyun 		if (copylen > good_linear)
1819*4882a593Smuzhiyun 			copylen = good_linear;
1820*4882a593Smuzhiyun 		linear = copylen;
1821*4882a593Smuzhiyun 		iov_iter_advance(&i, copylen);
1822*4882a593Smuzhiyun 		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1823*4882a593Smuzhiyun 			zerocopy = true;
1824*4882a593Smuzhiyun 	}
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1827*4882a593Smuzhiyun 		/* For the packet that is not easy to be processed
1828*4882a593Smuzhiyun 		 * (e.g gso or jumbo packet), we will do it at after
1829*4882a593Smuzhiyun 		 * skb was created with generic XDP routine.
1830*4882a593Smuzhiyun 		 */
1831*4882a593Smuzhiyun 		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1832*4882a593Smuzhiyun 		if (IS_ERR(skb)) {
1833*4882a593Smuzhiyun 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
1834*4882a593Smuzhiyun 			return PTR_ERR(skb);
1835*4882a593Smuzhiyun 		}
1836*4882a593Smuzhiyun 		if (!skb)
1837*4882a593Smuzhiyun 			return total_len;
1838*4882a593Smuzhiyun 	} else {
1839*4882a593Smuzhiyun 		if (!zerocopy) {
1840*4882a593Smuzhiyun 			copylen = len;
1841*4882a593Smuzhiyun 			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1842*4882a593Smuzhiyun 				linear = good_linear;
1843*4882a593Smuzhiyun 			else
1844*4882a593Smuzhiyun 				linear = tun16_to_cpu(tun, gso.hdr_len);
1845*4882a593Smuzhiyun 		}
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 		if (frags) {
1848*4882a593Smuzhiyun 			mutex_lock(&tfile->napi_mutex);
1849*4882a593Smuzhiyun 			skb = tun_napi_alloc_frags(tfile, copylen, from);
1850*4882a593Smuzhiyun 			/* tun_napi_alloc_frags() enforces a layout for the skb.
1851*4882a593Smuzhiyun 			 * If zerocopy is enabled, then this layout will be
1852*4882a593Smuzhiyun 			 * overwritten by zerocopy_sg_from_iter().
1853*4882a593Smuzhiyun 			 */
1854*4882a593Smuzhiyun 			zerocopy = false;
1855*4882a593Smuzhiyun 		} else {
1856*4882a593Smuzhiyun 			skb = tun_alloc_skb(tfile, align, copylen, linear,
1857*4882a593Smuzhiyun 					    noblock);
1858*4882a593Smuzhiyun 		}
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 		if (IS_ERR(skb)) {
1861*4882a593Smuzhiyun 			if (PTR_ERR(skb) != -EAGAIN)
1862*4882a593Smuzhiyun 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1863*4882a593Smuzhiyun 			if (frags)
1864*4882a593Smuzhiyun 				mutex_unlock(&tfile->napi_mutex);
1865*4882a593Smuzhiyun 			return PTR_ERR(skb);
1866*4882a593Smuzhiyun 		}
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 		if (zerocopy)
1869*4882a593Smuzhiyun 			err = zerocopy_sg_from_iter(skb, from);
1870*4882a593Smuzhiyun 		else
1871*4882a593Smuzhiyun 			err = skb_copy_datagram_from_iter(skb, 0, from, len);
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 		if (err) {
1874*4882a593Smuzhiyun 			err = -EFAULT;
1875*4882a593Smuzhiyun drop:
1876*4882a593Smuzhiyun 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
1877*4882a593Smuzhiyun 			kfree_skb(skb);
1878*4882a593Smuzhiyun 			if (frags) {
1879*4882a593Smuzhiyun 				tfile->napi.skb = NULL;
1880*4882a593Smuzhiyun 				mutex_unlock(&tfile->napi_mutex);
1881*4882a593Smuzhiyun 			}
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 			return err;
1884*4882a593Smuzhiyun 		}
1885*4882a593Smuzhiyun 	}
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1888*4882a593Smuzhiyun 		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1889*4882a593Smuzhiyun 		kfree_skb(skb);
1890*4882a593Smuzhiyun 		if (frags) {
1891*4882a593Smuzhiyun 			tfile->napi.skb = NULL;
1892*4882a593Smuzhiyun 			mutex_unlock(&tfile->napi_mutex);
1893*4882a593Smuzhiyun 		}
1894*4882a593Smuzhiyun 
1895*4882a593Smuzhiyun 		return -EINVAL;
1896*4882a593Smuzhiyun 	}
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 	switch (tun->flags & TUN_TYPE_MASK) {
1899*4882a593Smuzhiyun 	case IFF_TUN:
1900*4882a593Smuzhiyun 		if (tun->flags & IFF_NO_PI) {
1901*4882a593Smuzhiyun 			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 			switch (ip_version) {
1904*4882a593Smuzhiyun 			case 4:
1905*4882a593Smuzhiyun 				pi.proto = htons(ETH_P_IP);
1906*4882a593Smuzhiyun 				break;
1907*4882a593Smuzhiyun 			case 6:
1908*4882a593Smuzhiyun 				pi.proto = htons(ETH_P_IPV6);
1909*4882a593Smuzhiyun 				break;
1910*4882a593Smuzhiyun 			default:
1911*4882a593Smuzhiyun 				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1912*4882a593Smuzhiyun 				kfree_skb(skb);
1913*4882a593Smuzhiyun 				return -EINVAL;
1914*4882a593Smuzhiyun 			}
1915*4882a593Smuzhiyun 		}
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 		skb_reset_mac_header(skb);
1918*4882a593Smuzhiyun 		skb->protocol = pi.proto;
1919*4882a593Smuzhiyun 		skb->dev = tun->dev;
1920*4882a593Smuzhiyun 		break;
1921*4882a593Smuzhiyun 	case IFF_TAP:
1922*4882a593Smuzhiyun 		if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
1923*4882a593Smuzhiyun 			err = -ENOMEM;
1924*4882a593Smuzhiyun 			goto drop;
1925*4882a593Smuzhiyun 		}
1926*4882a593Smuzhiyun 		skb->protocol = eth_type_trans(skb, tun->dev);
1927*4882a593Smuzhiyun 		break;
1928*4882a593Smuzhiyun 	}
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 	/* copy skb_ubuf_info for callback when skb has no error */
1931*4882a593Smuzhiyun 	if (zerocopy) {
1932*4882a593Smuzhiyun 		skb_shinfo(skb)->destructor_arg = msg_control;
1933*4882a593Smuzhiyun 		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1934*4882a593Smuzhiyun 		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1935*4882a593Smuzhiyun 	} else if (msg_control) {
1936*4882a593Smuzhiyun 		struct ubuf_info *uarg = msg_control;
1937*4882a593Smuzhiyun 		uarg->callback(uarg, false);
1938*4882a593Smuzhiyun 	}
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun 	skb_reset_network_header(skb);
1941*4882a593Smuzhiyun 	skb_probe_transport_header(skb);
1942*4882a593Smuzhiyun 	skb_record_rx_queue(skb, tfile->queue_index);
1943*4882a593Smuzhiyun 
1944*4882a593Smuzhiyun 	if (skb_xdp) {
1945*4882a593Smuzhiyun 		struct bpf_prog *xdp_prog;
1946*4882a593Smuzhiyun 		int ret;
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 		local_bh_disable();
1949*4882a593Smuzhiyun 		rcu_read_lock();
1950*4882a593Smuzhiyun 		xdp_prog = rcu_dereference(tun->xdp_prog);
1951*4882a593Smuzhiyun 		if (xdp_prog) {
1952*4882a593Smuzhiyun 			ret = do_xdp_generic(xdp_prog, skb);
1953*4882a593Smuzhiyun 			if (ret != XDP_PASS) {
1954*4882a593Smuzhiyun 				rcu_read_unlock();
1955*4882a593Smuzhiyun 				local_bh_enable();
1956*4882a593Smuzhiyun 				if (frags) {
1957*4882a593Smuzhiyun 					tfile->napi.skb = NULL;
1958*4882a593Smuzhiyun 					mutex_unlock(&tfile->napi_mutex);
1959*4882a593Smuzhiyun 				}
1960*4882a593Smuzhiyun 				return total_len;
1961*4882a593Smuzhiyun 			}
1962*4882a593Smuzhiyun 		}
1963*4882a593Smuzhiyun 		rcu_read_unlock();
1964*4882a593Smuzhiyun 		local_bh_enable();
1965*4882a593Smuzhiyun 	}
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 	/* Compute the costly rx hash only if needed for flow updates.
1968*4882a593Smuzhiyun 	 * We may get a very small possibility of OOO during switching, not
1969*4882a593Smuzhiyun 	 * worth to optimize.
1970*4882a593Smuzhiyun 	 */
1971*4882a593Smuzhiyun 	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1972*4882a593Smuzhiyun 	    !tfile->detached)
1973*4882a593Smuzhiyun 		rxhash = __skb_get_hash_symmetric(skb);
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	rcu_read_lock();
1976*4882a593Smuzhiyun 	if (unlikely(!(tun->dev->flags & IFF_UP))) {
1977*4882a593Smuzhiyun 		err = -EIO;
1978*4882a593Smuzhiyun 		rcu_read_unlock();
1979*4882a593Smuzhiyun 		goto drop;
1980*4882a593Smuzhiyun 	}
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun 	if (frags) {
1983*4882a593Smuzhiyun 		u32 headlen;
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 		/* Exercise flow dissector code path. */
1986*4882a593Smuzhiyun 		skb_push(skb, ETH_HLEN);
1987*4882a593Smuzhiyun 		headlen = eth_get_headlen(tun->dev, skb->data,
1988*4882a593Smuzhiyun 					  skb_headlen(skb));
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 		if (unlikely(headlen > skb_headlen(skb))) {
1991*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
1992*4882a593Smuzhiyun 			err = -ENOMEM;
1993*4882a593Smuzhiyun 			this_cpu_inc(tun->pcpu_stats->rx_dropped);
1994*4882a593Smuzhiyun napi_busy:
1995*4882a593Smuzhiyun 			napi_free_frags(&tfile->napi);
1996*4882a593Smuzhiyun 			rcu_read_unlock();
1997*4882a593Smuzhiyun 			mutex_unlock(&tfile->napi_mutex);
1998*4882a593Smuzhiyun 			return err;
1999*4882a593Smuzhiyun 		}
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 		if (likely(napi_schedule_prep(&tfile->napi))) {
2002*4882a593Smuzhiyun 			local_bh_disable();
2003*4882a593Smuzhiyun 			napi_gro_frags(&tfile->napi);
2004*4882a593Smuzhiyun 			napi_complete(&tfile->napi);
2005*4882a593Smuzhiyun 			local_bh_enable();
2006*4882a593Smuzhiyun 		} else {
2007*4882a593Smuzhiyun 			err = -EBUSY;
2008*4882a593Smuzhiyun 			goto napi_busy;
2009*4882a593Smuzhiyun 		}
2010*4882a593Smuzhiyun 		mutex_unlock(&tfile->napi_mutex);
2011*4882a593Smuzhiyun 	} else if (tfile->napi_enabled) {
2012*4882a593Smuzhiyun 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
2013*4882a593Smuzhiyun 		int queue_len;
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun 		spin_lock_bh(&queue->lock);
2016*4882a593Smuzhiyun 		__skb_queue_tail(queue, skb);
2017*4882a593Smuzhiyun 		queue_len = skb_queue_len(queue);
2018*4882a593Smuzhiyun 		spin_unlock(&queue->lock);
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun 		if (!more || queue_len > NAPI_POLL_WEIGHT)
2021*4882a593Smuzhiyun 			napi_schedule(&tfile->napi);
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun 		local_bh_enable();
2024*4882a593Smuzhiyun 	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
2025*4882a593Smuzhiyun 		tun_rx_batched(tun, tfile, skb, more);
2026*4882a593Smuzhiyun 	} else {
2027*4882a593Smuzhiyun 		netif_rx_ni(skb);
2028*4882a593Smuzhiyun 	}
2029*4882a593Smuzhiyun 	rcu_read_unlock();
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun 	stats = get_cpu_ptr(tun->pcpu_stats);
2032*4882a593Smuzhiyun 	u64_stats_update_begin(&stats->syncp);
2033*4882a593Smuzhiyun 	u64_stats_inc(&stats->rx_packets);
2034*4882a593Smuzhiyun 	u64_stats_add(&stats->rx_bytes, len);
2035*4882a593Smuzhiyun 	u64_stats_update_end(&stats->syncp);
2036*4882a593Smuzhiyun 	put_cpu_ptr(stats);
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	if (rxhash)
2039*4882a593Smuzhiyun 		tun_flow_update(tun, rxhash, tfile);
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun 	return total_len;
2042*4882a593Smuzhiyun }
2043*4882a593Smuzhiyun 
tun_chr_write_iter(struct kiocb * iocb,struct iov_iter * from)2044*4882a593Smuzhiyun static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
2045*4882a593Smuzhiyun {
2046*4882a593Smuzhiyun 	struct file *file = iocb->ki_filp;
2047*4882a593Smuzhiyun 	struct tun_file *tfile = file->private_data;
2048*4882a593Smuzhiyun 	struct tun_struct *tun = tun_get(tfile);
2049*4882a593Smuzhiyun 	ssize_t result;
2050*4882a593Smuzhiyun 	int noblock = 0;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	if (!tun)
2053*4882a593Smuzhiyun 		return -EBADFD;
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun 	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2056*4882a593Smuzhiyun 		noblock = 1;
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 	result = tun_get_user(tun, tfile, NULL, from, noblock, false);
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun 	tun_put(tun);
2061*4882a593Smuzhiyun 	return result;
2062*4882a593Smuzhiyun }
2063*4882a593Smuzhiyun 
tun_put_user_xdp(struct tun_struct * tun,struct tun_file * tfile,struct xdp_frame * xdp_frame,struct iov_iter * iter)2064*4882a593Smuzhiyun static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2065*4882a593Smuzhiyun 				struct tun_file *tfile,
2066*4882a593Smuzhiyun 				struct xdp_frame *xdp_frame,
2067*4882a593Smuzhiyun 				struct iov_iter *iter)
2068*4882a593Smuzhiyun {
2069*4882a593Smuzhiyun 	int vnet_hdr_sz = 0;
2070*4882a593Smuzhiyun 	size_t size = xdp_frame->len;
2071*4882a593Smuzhiyun 	struct tun_pcpu_stats *stats;
2072*4882a593Smuzhiyun 	size_t ret;
2073*4882a593Smuzhiyun 
2074*4882a593Smuzhiyun 	if (tun->flags & IFF_VNET_HDR) {
2075*4882a593Smuzhiyun 		struct virtio_net_hdr gso = { 0 };
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2078*4882a593Smuzhiyun 		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2079*4882a593Smuzhiyun 			return -EINVAL;
2080*4882a593Smuzhiyun 		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2081*4882a593Smuzhiyun 			     sizeof(gso)))
2082*4882a593Smuzhiyun 			return -EFAULT;
2083*4882a593Smuzhiyun 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2084*4882a593Smuzhiyun 	}
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun 	stats = get_cpu_ptr(tun->pcpu_stats);
2089*4882a593Smuzhiyun 	u64_stats_update_begin(&stats->syncp);
2090*4882a593Smuzhiyun 	u64_stats_inc(&stats->tx_packets);
2091*4882a593Smuzhiyun 	u64_stats_add(&stats->tx_bytes, ret);
2092*4882a593Smuzhiyun 	u64_stats_update_end(&stats->syncp);
2093*4882a593Smuzhiyun 	put_cpu_ptr(tun->pcpu_stats);
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	return ret;
2096*4882a593Smuzhiyun }
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun /* Put packet to the user space buffer */
tun_put_user(struct tun_struct * tun,struct tun_file * tfile,struct sk_buff * skb,struct iov_iter * iter)2099*4882a593Smuzhiyun static ssize_t tun_put_user(struct tun_struct *tun,
2100*4882a593Smuzhiyun 			    struct tun_file *tfile,
2101*4882a593Smuzhiyun 			    struct sk_buff *skb,
2102*4882a593Smuzhiyun 			    struct iov_iter *iter)
2103*4882a593Smuzhiyun {
2104*4882a593Smuzhiyun 	struct tun_pi pi = { 0, skb->protocol };
2105*4882a593Smuzhiyun 	struct tun_pcpu_stats *stats;
2106*4882a593Smuzhiyun 	ssize_t total;
2107*4882a593Smuzhiyun 	int vlan_offset = 0;
2108*4882a593Smuzhiyun 	int vlan_hlen = 0;
2109*4882a593Smuzhiyun 	int vnet_hdr_sz = 0;
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb))
2112*4882a593Smuzhiyun 		vlan_hlen = VLAN_HLEN;
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 	if (tun->flags & IFF_VNET_HDR)
2115*4882a593Smuzhiyun 		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun 	total = skb->len + vlan_hlen + vnet_hdr_sz;
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 	if (!(tun->flags & IFF_NO_PI)) {
2120*4882a593Smuzhiyun 		if (iov_iter_count(iter) < sizeof(pi))
2121*4882a593Smuzhiyun 			return -EINVAL;
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 		total += sizeof(pi);
2124*4882a593Smuzhiyun 		if (iov_iter_count(iter) < total) {
2125*4882a593Smuzhiyun 			/* Packet will be striped */
2126*4882a593Smuzhiyun 			pi.flags |= TUN_PKT_STRIP;
2127*4882a593Smuzhiyun 		}
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun 		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2130*4882a593Smuzhiyun 			return -EFAULT;
2131*4882a593Smuzhiyun 	}
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun 	if (vnet_hdr_sz) {
2134*4882a593Smuzhiyun 		struct virtio_net_hdr gso;
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun 		if (iov_iter_count(iter) < vnet_hdr_sz)
2137*4882a593Smuzhiyun 			return -EINVAL;
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 		if (virtio_net_hdr_from_skb(skb, &gso,
2140*4882a593Smuzhiyun 					    tun_is_little_endian(tun), true,
2141*4882a593Smuzhiyun 					    vlan_hlen)) {
2142*4882a593Smuzhiyun 			struct skb_shared_info *sinfo = skb_shinfo(skb);
2143*4882a593Smuzhiyun 			pr_err("unexpected GSO type: "
2144*4882a593Smuzhiyun 			       "0x%x, gso_size %d, hdr_len %d\n",
2145*4882a593Smuzhiyun 			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2146*4882a593Smuzhiyun 			       tun16_to_cpu(tun, gso.hdr_len));
2147*4882a593Smuzhiyun 			print_hex_dump(KERN_ERR, "tun: ",
2148*4882a593Smuzhiyun 				       DUMP_PREFIX_NONE,
2149*4882a593Smuzhiyun 				       16, 1, skb->head,
2150*4882a593Smuzhiyun 				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2151*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
2152*4882a593Smuzhiyun 			return -EINVAL;
2153*4882a593Smuzhiyun 		}
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2156*4882a593Smuzhiyun 			return -EFAULT;
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2159*4882a593Smuzhiyun 	}
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun 	if (vlan_hlen) {
2162*4882a593Smuzhiyun 		int ret;
2163*4882a593Smuzhiyun 		struct veth veth;
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 		veth.h_vlan_proto = skb->vlan_proto;
2166*4882a593Smuzhiyun 		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2167*4882a593Smuzhiyun 
2168*4882a593Smuzhiyun 		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2171*4882a593Smuzhiyun 		if (ret || !iov_iter_count(iter))
2172*4882a593Smuzhiyun 			goto done;
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 		ret = copy_to_iter(&veth, sizeof(veth), iter);
2175*4882a593Smuzhiyun 		if (ret != sizeof(veth) || !iov_iter_count(iter))
2176*4882a593Smuzhiyun 			goto done;
2177*4882a593Smuzhiyun 	}
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun done:
2182*4882a593Smuzhiyun 	/* caller is in process context, */
2183*4882a593Smuzhiyun 	stats = get_cpu_ptr(tun->pcpu_stats);
2184*4882a593Smuzhiyun 	u64_stats_update_begin(&stats->syncp);
2185*4882a593Smuzhiyun 	u64_stats_inc(&stats->tx_packets);
2186*4882a593Smuzhiyun 	u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
2187*4882a593Smuzhiyun 	u64_stats_update_end(&stats->syncp);
2188*4882a593Smuzhiyun 	put_cpu_ptr(tun->pcpu_stats);
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 	return total;
2191*4882a593Smuzhiyun }
2192*4882a593Smuzhiyun 
tun_ring_recv(struct tun_file * tfile,int noblock,int * err)2193*4882a593Smuzhiyun static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2194*4882a593Smuzhiyun {
2195*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(wait, current);
2196*4882a593Smuzhiyun 	void *ptr = NULL;
2197*4882a593Smuzhiyun 	int error = 0;
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 	ptr = ptr_ring_consume(&tfile->tx_ring);
2200*4882a593Smuzhiyun 	if (ptr)
2201*4882a593Smuzhiyun 		goto out;
2202*4882a593Smuzhiyun 	if (noblock) {
2203*4882a593Smuzhiyun 		error = -EAGAIN;
2204*4882a593Smuzhiyun 		goto out;
2205*4882a593Smuzhiyun 	}
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 	add_wait_queue(&tfile->socket.wq.wait, &wait);
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	while (1) {
2210*4882a593Smuzhiyun 		set_current_state(TASK_INTERRUPTIBLE);
2211*4882a593Smuzhiyun 		ptr = ptr_ring_consume(&tfile->tx_ring);
2212*4882a593Smuzhiyun 		if (ptr)
2213*4882a593Smuzhiyun 			break;
2214*4882a593Smuzhiyun 		if (signal_pending(current)) {
2215*4882a593Smuzhiyun 			error = -ERESTARTSYS;
2216*4882a593Smuzhiyun 			break;
2217*4882a593Smuzhiyun 		}
2218*4882a593Smuzhiyun 		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2219*4882a593Smuzhiyun 			error = -EFAULT;
2220*4882a593Smuzhiyun 			break;
2221*4882a593Smuzhiyun 		}
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun 		schedule();
2224*4882a593Smuzhiyun 	}
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 	__set_current_state(TASK_RUNNING);
2227*4882a593Smuzhiyun 	remove_wait_queue(&tfile->socket.wq.wait, &wait);
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun out:
2230*4882a593Smuzhiyun 	*err = error;
2231*4882a593Smuzhiyun 	return ptr;
2232*4882a593Smuzhiyun }
2233*4882a593Smuzhiyun 
tun_do_read(struct tun_struct * tun,struct tun_file * tfile,struct iov_iter * to,int noblock,void * ptr)2234*4882a593Smuzhiyun static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2235*4882a593Smuzhiyun 			   struct iov_iter *to,
2236*4882a593Smuzhiyun 			   int noblock, void *ptr)
2237*4882a593Smuzhiyun {
2238*4882a593Smuzhiyun 	ssize_t ret;
2239*4882a593Smuzhiyun 	int err;
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	if (!iov_iter_count(to)) {
2242*4882a593Smuzhiyun 		tun_ptr_free(ptr);
2243*4882a593Smuzhiyun 		return 0;
2244*4882a593Smuzhiyun 	}
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun 	if (!ptr) {
2247*4882a593Smuzhiyun 		/* Read frames from ring */
2248*4882a593Smuzhiyun 		ptr = tun_ring_recv(tfile, noblock, &err);
2249*4882a593Smuzhiyun 		if (!ptr)
2250*4882a593Smuzhiyun 			return err;
2251*4882a593Smuzhiyun 	}
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	if (tun_is_xdp_frame(ptr)) {
2254*4882a593Smuzhiyun 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2257*4882a593Smuzhiyun 		xdp_return_frame(xdpf);
2258*4882a593Smuzhiyun 	} else {
2259*4882a593Smuzhiyun 		struct sk_buff *skb = ptr;
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun 		ret = tun_put_user(tun, tfile, skb, to);
2262*4882a593Smuzhiyun 		if (unlikely(ret < 0))
2263*4882a593Smuzhiyun 			kfree_skb(skb);
2264*4882a593Smuzhiyun 		else
2265*4882a593Smuzhiyun 			consume_skb(skb);
2266*4882a593Smuzhiyun 	}
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun 	return ret;
2269*4882a593Smuzhiyun }
2270*4882a593Smuzhiyun 
tun_chr_read_iter(struct kiocb * iocb,struct iov_iter * to)2271*4882a593Smuzhiyun static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2272*4882a593Smuzhiyun {
2273*4882a593Smuzhiyun 	struct file *file = iocb->ki_filp;
2274*4882a593Smuzhiyun 	struct tun_file *tfile = file->private_data;
2275*4882a593Smuzhiyun 	struct tun_struct *tun = tun_get(tfile);
2276*4882a593Smuzhiyun 	ssize_t len = iov_iter_count(to), ret;
2277*4882a593Smuzhiyun 	int noblock = 0;
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	if (!tun)
2280*4882a593Smuzhiyun 		return -EBADFD;
2281*4882a593Smuzhiyun 
2282*4882a593Smuzhiyun 	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2283*4882a593Smuzhiyun 		noblock = 1;
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	ret = tun_do_read(tun, tfile, to, noblock, NULL);
2286*4882a593Smuzhiyun 	ret = min_t(ssize_t, ret, len);
2287*4882a593Smuzhiyun 	if (ret > 0)
2288*4882a593Smuzhiyun 		iocb->ki_pos = ret;
2289*4882a593Smuzhiyun 	tun_put(tun);
2290*4882a593Smuzhiyun 	return ret;
2291*4882a593Smuzhiyun }
2292*4882a593Smuzhiyun 
tun_prog_free(struct rcu_head * rcu)2293*4882a593Smuzhiyun static void tun_prog_free(struct rcu_head *rcu)
2294*4882a593Smuzhiyun {
2295*4882a593Smuzhiyun 	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2296*4882a593Smuzhiyun 
2297*4882a593Smuzhiyun 	bpf_prog_destroy(prog->prog);
2298*4882a593Smuzhiyun 	kfree(prog);
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun 
__tun_set_ebpf(struct tun_struct * tun,struct tun_prog __rcu ** prog_p,struct bpf_prog * prog)2301*4882a593Smuzhiyun static int __tun_set_ebpf(struct tun_struct *tun,
2302*4882a593Smuzhiyun 			  struct tun_prog __rcu **prog_p,
2303*4882a593Smuzhiyun 			  struct bpf_prog *prog)
2304*4882a593Smuzhiyun {
2305*4882a593Smuzhiyun 	struct tun_prog *old, *new = NULL;
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun 	if (prog) {
2308*4882a593Smuzhiyun 		new = kmalloc(sizeof(*new), GFP_KERNEL);
2309*4882a593Smuzhiyun 		if (!new)
2310*4882a593Smuzhiyun 			return -ENOMEM;
2311*4882a593Smuzhiyun 		new->prog = prog;
2312*4882a593Smuzhiyun 	}
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 	spin_lock_bh(&tun->lock);
2315*4882a593Smuzhiyun 	old = rcu_dereference_protected(*prog_p,
2316*4882a593Smuzhiyun 					lockdep_is_held(&tun->lock));
2317*4882a593Smuzhiyun 	rcu_assign_pointer(*prog_p, new);
2318*4882a593Smuzhiyun 	spin_unlock_bh(&tun->lock);
2319*4882a593Smuzhiyun 
2320*4882a593Smuzhiyun 	if (old)
2321*4882a593Smuzhiyun 		call_rcu(&old->rcu, tun_prog_free);
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	return 0;
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun 
tun_free_netdev(struct net_device * dev)2326*4882a593Smuzhiyun static void tun_free_netdev(struct net_device *dev)
2327*4882a593Smuzhiyun {
2328*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun 	BUG_ON(!(list_empty(&tun->disabled)));
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 	free_percpu(tun->pcpu_stats);
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun 	tun_flow_uninit(tun);
2335*4882a593Smuzhiyun 	security_tun_dev_free_security(tun->security);
2336*4882a593Smuzhiyun 	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2337*4882a593Smuzhiyun 	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
2338*4882a593Smuzhiyun }
2339*4882a593Smuzhiyun 
tun_setup(struct net_device * dev)2340*4882a593Smuzhiyun static void tun_setup(struct net_device *dev)
2341*4882a593Smuzhiyun {
2342*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 	tun->owner = INVALID_UID;
2345*4882a593Smuzhiyun 	tun->group = INVALID_GID;
2346*4882a593Smuzhiyun 	tun_default_link_ksettings(dev, &tun->link_ksettings);
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun 	dev->ethtool_ops = &tun_ethtool_ops;
2349*4882a593Smuzhiyun 	dev->needs_free_netdev = true;
2350*4882a593Smuzhiyun 	dev->priv_destructor = tun_free_netdev;
2351*4882a593Smuzhiyun 	/* We prefer our own queue length */
2352*4882a593Smuzhiyun 	dev->tx_queue_len = TUN_READQ_SIZE;
2353*4882a593Smuzhiyun }
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun /* Trivial set of netlink ops to allow deleting tun or tap
2356*4882a593Smuzhiyun  * device with netlink.
2357*4882a593Smuzhiyun  */
tun_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2358*4882a593Smuzhiyun static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2359*4882a593Smuzhiyun 			struct netlink_ext_ack *extack)
2360*4882a593Smuzhiyun {
2361*4882a593Smuzhiyun 	NL_SET_ERR_MSG(extack,
2362*4882a593Smuzhiyun 		       "tun/tap creation via rtnetlink is not supported.");
2363*4882a593Smuzhiyun 	return -EOPNOTSUPP;
2364*4882a593Smuzhiyun }
2365*4882a593Smuzhiyun 
tun_get_size(const struct net_device * dev)2366*4882a593Smuzhiyun static size_t tun_get_size(const struct net_device *dev)
2367*4882a593Smuzhiyun {
2368*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2369*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2370*4882a593Smuzhiyun 
2371*4882a593Smuzhiyun 	return nla_total_size(sizeof(uid_t)) + /* OWNER */
2372*4882a593Smuzhiyun 	       nla_total_size(sizeof(gid_t)) + /* GROUP */
2373*4882a593Smuzhiyun 	       nla_total_size(sizeof(u8)) + /* TYPE */
2374*4882a593Smuzhiyun 	       nla_total_size(sizeof(u8)) + /* PI */
2375*4882a593Smuzhiyun 	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
2376*4882a593Smuzhiyun 	       nla_total_size(sizeof(u8)) + /* PERSIST */
2377*4882a593Smuzhiyun 	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2378*4882a593Smuzhiyun 	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2379*4882a593Smuzhiyun 	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2380*4882a593Smuzhiyun 	       0;
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun 
tun_fill_info(struct sk_buff * skb,const struct net_device * dev)2383*4882a593Smuzhiyun static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2384*4882a593Smuzhiyun {
2385*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
2386*4882a593Smuzhiyun 
2387*4882a593Smuzhiyun 	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2388*4882a593Smuzhiyun 		goto nla_put_failure;
2389*4882a593Smuzhiyun 	if (uid_valid(tun->owner) &&
2390*4882a593Smuzhiyun 	    nla_put_u32(skb, IFLA_TUN_OWNER,
2391*4882a593Smuzhiyun 			from_kuid_munged(current_user_ns(), tun->owner)))
2392*4882a593Smuzhiyun 		goto nla_put_failure;
2393*4882a593Smuzhiyun 	if (gid_valid(tun->group) &&
2394*4882a593Smuzhiyun 	    nla_put_u32(skb, IFLA_TUN_GROUP,
2395*4882a593Smuzhiyun 			from_kgid_munged(current_user_ns(), tun->group)))
2396*4882a593Smuzhiyun 		goto nla_put_failure;
2397*4882a593Smuzhiyun 	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2398*4882a593Smuzhiyun 		goto nla_put_failure;
2399*4882a593Smuzhiyun 	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2400*4882a593Smuzhiyun 		goto nla_put_failure;
2401*4882a593Smuzhiyun 	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2402*4882a593Smuzhiyun 		goto nla_put_failure;
2403*4882a593Smuzhiyun 	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2404*4882a593Smuzhiyun 		       !!(tun->flags & IFF_MULTI_QUEUE)))
2405*4882a593Smuzhiyun 		goto nla_put_failure;
2406*4882a593Smuzhiyun 	if (tun->flags & IFF_MULTI_QUEUE) {
2407*4882a593Smuzhiyun 		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2408*4882a593Smuzhiyun 			goto nla_put_failure;
2409*4882a593Smuzhiyun 		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2410*4882a593Smuzhiyun 				tun->numdisabled))
2411*4882a593Smuzhiyun 			goto nla_put_failure;
2412*4882a593Smuzhiyun 	}
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun 	return 0;
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun nla_put_failure:
2417*4882a593Smuzhiyun 	return -EMSGSIZE;
2418*4882a593Smuzhiyun }
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun static struct rtnl_link_ops tun_link_ops __read_mostly = {
2421*4882a593Smuzhiyun 	.kind		= DRV_NAME,
2422*4882a593Smuzhiyun 	.priv_size	= sizeof(struct tun_struct),
2423*4882a593Smuzhiyun 	.setup		= tun_setup,
2424*4882a593Smuzhiyun 	.validate	= tun_validate,
2425*4882a593Smuzhiyun 	.get_size       = tun_get_size,
2426*4882a593Smuzhiyun 	.fill_info      = tun_fill_info,
2427*4882a593Smuzhiyun };
2428*4882a593Smuzhiyun 
tun_sock_write_space(struct sock * sk)2429*4882a593Smuzhiyun static void tun_sock_write_space(struct sock *sk)
2430*4882a593Smuzhiyun {
2431*4882a593Smuzhiyun 	struct tun_file *tfile;
2432*4882a593Smuzhiyun 	wait_queue_head_t *wqueue;
2433*4882a593Smuzhiyun 
2434*4882a593Smuzhiyun 	if (!sock_writeable(sk))
2435*4882a593Smuzhiyun 		return;
2436*4882a593Smuzhiyun 
2437*4882a593Smuzhiyun 	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2438*4882a593Smuzhiyun 		return;
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun 	wqueue = sk_sleep(sk);
2441*4882a593Smuzhiyun 	if (wqueue && waitqueue_active(wqueue))
2442*4882a593Smuzhiyun 		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2443*4882a593Smuzhiyun 						EPOLLWRNORM | EPOLLWRBAND);
2444*4882a593Smuzhiyun 
2445*4882a593Smuzhiyun 	tfile = container_of(sk, struct tun_file, sk);
2446*4882a593Smuzhiyun 	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2447*4882a593Smuzhiyun }
2448*4882a593Smuzhiyun 
tun_put_page(struct tun_page * tpage)2449*4882a593Smuzhiyun static void tun_put_page(struct tun_page *tpage)
2450*4882a593Smuzhiyun {
2451*4882a593Smuzhiyun 	if (tpage->page)
2452*4882a593Smuzhiyun 		__page_frag_cache_drain(tpage->page, tpage->count);
2453*4882a593Smuzhiyun }
2454*4882a593Smuzhiyun 
tun_xdp_one(struct tun_struct * tun,struct tun_file * tfile,struct xdp_buff * xdp,int * flush,struct tun_page * tpage)2455*4882a593Smuzhiyun static int tun_xdp_one(struct tun_struct *tun,
2456*4882a593Smuzhiyun 		       struct tun_file *tfile,
2457*4882a593Smuzhiyun 		       struct xdp_buff *xdp, int *flush,
2458*4882a593Smuzhiyun 		       struct tun_page *tpage)
2459*4882a593Smuzhiyun {
2460*4882a593Smuzhiyun 	unsigned int datasize = xdp->data_end - xdp->data;
2461*4882a593Smuzhiyun 	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2462*4882a593Smuzhiyun 	struct virtio_net_hdr *gso = &hdr->gso;
2463*4882a593Smuzhiyun 	struct tun_pcpu_stats *stats;
2464*4882a593Smuzhiyun 	struct bpf_prog *xdp_prog;
2465*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
2466*4882a593Smuzhiyun 	u32 rxhash = 0, act;
2467*4882a593Smuzhiyun 	int buflen = hdr->buflen;
2468*4882a593Smuzhiyun 	int err = 0;
2469*4882a593Smuzhiyun 	bool skb_xdp = false;
2470*4882a593Smuzhiyun 	struct page *page;
2471*4882a593Smuzhiyun 
2472*4882a593Smuzhiyun 	xdp_prog = rcu_dereference(tun->xdp_prog);
2473*4882a593Smuzhiyun 	if (xdp_prog) {
2474*4882a593Smuzhiyun 		if (gso->gso_type) {
2475*4882a593Smuzhiyun 			skb_xdp = true;
2476*4882a593Smuzhiyun 			goto build;
2477*4882a593Smuzhiyun 		}
2478*4882a593Smuzhiyun 		xdp_set_data_meta_invalid(xdp);
2479*4882a593Smuzhiyun 		xdp->rxq = &tfile->xdp_rxq;
2480*4882a593Smuzhiyun 		xdp->frame_sz = buflen;
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 		act = bpf_prog_run_xdp(xdp_prog, xdp);
2483*4882a593Smuzhiyun 		err = tun_xdp_act(tun, xdp_prog, xdp, act);
2484*4882a593Smuzhiyun 		if (err < 0) {
2485*4882a593Smuzhiyun 			put_page(virt_to_head_page(xdp->data));
2486*4882a593Smuzhiyun 			return err;
2487*4882a593Smuzhiyun 		}
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 		switch (err) {
2490*4882a593Smuzhiyun 		case XDP_REDIRECT:
2491*4882a593Smuzhiyun 			*flush = true;
2492*4882a593Smuzhiyun 			fallthrough;
2493*4882a593Smuzhiyun 		case XDP_TX:
2494*4882a593Smuzhiyun 			return 0;
2495*4882a593Smuzhiyun 		case XDP_PASS:
2496*4882a593Smuzhiyun 			break;
2497*4882a593Smuzhiyun 		default:
2498*4882a593Smuzhiyun 			page = virt_to_head_page(xdp->data);
2499*4882a593Smuzhiyun 			if (tpage->page == page) {
2500*4882a593Smuzhiyun 				++tpage->count;
2501*4882a593Smuzhiyun 			} else {
2502*4882a593Smuzhiyun 				tun_put_page(tpage);
2503*4882a593Smuzhiyun 				tpage->page = page;
2504*4882a593Smuzhiyun 				tpage->count = 1;
2505*4882a593Smuzhiyun 			}
2506*4882a593Smuzhiyun 			return 0;
2507*4882a593Smuzhiyun 		}
2508*4882a593Smuzhiyun 	}
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun build:
2511*4882a593Smuzhiyun 	skb = build_skb(xdp->data_hard_start, buflen);
2512*4882a593Smuzhiyun 	if (!skb) {
2513*4882a593Smuzhiyun 		err = -ENOMEM;
2514*4882a593Smuzhiyun 		goto out;
2515*4882a593Smuzhiyun 	}
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2518*4882a593Smuzhiyun 	skb_put(skb, xdp->data_end - xdp->data);
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun 	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2521*4882a593Smuzhiyun 		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
2522*4882a593Smuzhiyun 		kfree_skb(skb);
2523*4882a593Smuzhiyun 		err = -EINVAL;
2524*4882a593Smuzhiyun 		goto out;
2525*4882a593Smuzhiyun 	}
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, tun->dev);
2528*4882a593Smuzhiyun 	skb_reset_network_header(skb);
2529*4882a593Smuzhiyun 	skb_probe_transport_header(skb);
2530*4882a593Smuzhiyun 	skb_record_rx_queue(skb, tfile->queue_index);
2531*4882a593Smuzhiyun 
2532*4882a593Smuzhiyun 	if (skb_xdp) {
2533*4882a593Smuzhiyun 		err = do_xdp_generic(xdp_prog, skb);
2534*4882a593Smuzhiyun 		if (err != XDP_PASS)
2535*4882a593Smuzhiyun 			goto out;
2536*4882a593Smuzhiyun 	}
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2539*4882a593Smuzhiyun 	    !tfile->detached)
2540*4882a593Smuzhiyun 		rxhash = __skb_get_hash_symmetric(skb);
2541*4882a593Smuzhiyun 
2542*4882a593Smuzhiyun 	netif_receive_skb(skb);
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun 	/* No need for get_cpu_ptr() here since this function is
2545*4882a593Smuzhiyun 	 * always called with bh disabled
2546*4882a593Smuzhiyun 	 */
2547*4882a593Smuzhiyun 	stats = this_cpu_ptr(tun->pcpu_stats);
2548*4882a593Smuzhiyun 	u64_stats_update_begin(&stats->syncp);
2549*4882a593Smuzhiyun 	u64_stats_inc(&stats->rx_packets);
2550*4882a593Smuzhiyun 	u64_stats_add(&stats->rx_bytes, datasize);
2551*4882a593Smuzhiyun 	u64_stats_update_end(&stats->syncp);
2552*4882a593Smuzhiyun 
2553*4882a593Smuzhiyun 	if (rxhash)
2554*4882a593Smuzhiyun 		tun_flow_update(tun, rxhash, tfile);
2555*4882a593Smuzhiyun 
2556*4882a593Smuzhiyun out:
2557*4882a593Smuzhiyun 	return err;
2558*4882a593Smuzhiyun }
2559*4882a593Smuzhiyun 
tun_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)2560*4882a593Smuzhiyun static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2561*4882a593Smuzhiyun {
2562*4882a593Smuzhiyun 	int ret, i;
2563*4882a593Smuzhiyun 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2564*4882a593Smuzhiyun 	struct tun_struct *tun = tun_get(tfile);
2565*4882a593Smuzhiyun 	struct tun_msg_ctl *ctl = m->msg_control;
2566*4882a593Smuzhiyun 	struct xdp_buff *xdp;
2567*4882a593Smuzhiyun 
2568*4882a593Smuzhiyun 	if (!tun)
2569*4882a593Smuzhiyun 		return -EBADFD;
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun 	if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
2572*4882a593Smuzhiyun 	    ctl && ctl->type == TUN_MSG_PTR) {
2573*4882a593Smuzhiyun 		struct tun_page tpage;
2574*4882a593Smuzhiyun 		int n = ctl->num;
2575*4882a593Smuzhiyun 		int flush = 0;
2576*4882a593Smuzhiyun 
2577*4882a593Smuzhiyun 		memset(&tpage, 0, sizeof(tpage));
2578*4882a593Smuzhiyun 
2579*4882a593Smuzhiyun 		local_bh_disable();
2580*4882a593Smuzhiyun 		rcu_read_lock();
2581*4882a593Smuzhiyun 
2582*4882a593Smuzhiyun 		for (i = 0; i < n; i++) {
2583*4882a593Smuzhiyun 			xdp = &((struct xdp_buff *)ctl->ptr)[i];
2584*4882a593Smuzhiyun 			tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2585*4882a593Smuzhiyun 		}
2586*4882a593Smuzhiyun 
2587*4882a593Smuzhiyun 		if (flush)
2588*4882a593Smuzhiyun 			xdp_do_flush();
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 		rcu_read_unlock();
2591*4882a593Smuzhiyun 		local_bh_enable();
2592*4882a593Smuzhiyun 
2593*4882a593Smuzhiyun 		tun_put_page(&tpage);
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 		ret = total_len;
2596*4882a593Smuzhiyun 		goto out;
2597*4882a593Smuzhiyun 	}
2598*4882a593Smuzhiyun 
2599*4882a593Smuzhiyun 	ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2600*4882a593Smuzhiyun 			   m->msg_flags & MSG_DONTWAIT,
2601*4882a593Smuzhiyun 			   m->msg_flags & MSG_MORE);
2602*4882a593Smuzhiyun out:
2603*4882a593Smuzhiyun 	tun_put(tun);
2604*4882a593Smuzhiyun 	return ret;
2605*4882a593Smuzhiyun }
2606*4882a593Smuzhiyun 
tun_recvmsg(struct socket * sock,struct msghdr * m,size_t total_len,int flags)2607*4882a593Smuzhiyun static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2608*4882a593Smuzhiyun 		       int flags)
2609*4882a593Smuzhiyun {
2610*4882a593Smuzhiyun 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2611*4882a593Smuzhiyun 	struct tun_struct *tun = tun_get(tfile);
2612*4882a593Smuzhiyun 	void *ptr = m->msg_control;
2613*4882a593Smuzhiyun 	int ret;
2614*4882a593Smuzhiyun 
2615*4882a593Smuzhiyun 	if (!tun) {
2616*4882a593Smuzhiyun 		ret = -EBADFD;
2617*4882a593Smuzhiyun 		goto out_free;
2618*4882a593Smuzhiyun 	}
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun 	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2621*4882a593Smuzhiyun 		ret = -EINVAL;
2622*4882a593Smuzhiyun 		goto out_put_tun;
2623*4882a593Smuzhiyun 	}
2624*4882a593Smuzhiyun 	if (flags & MSG_ERRQUEUE) {
2625*4882a593Smuzhiyun 		ret = sock_recv_errqueue(sock->sk, m, total_len,
2626*4882a593Smuzhiyun 					 SOL_PACKET, TUN_TX_TIMESTAMP);
2627*4882a593Smuzhiyun 		goto out;
2628*4882a593Smuzhiyun 	}
2629*4882a593Smuzhiyun 	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2630*4882a593Smuzhiyun 	if (ret > (ssize_t)total_len) {
2631*4882a593Smuzhiyun 		m->msg_flags |= MSG_TRUNC;
2632*4882a593Smuzhiyun 		ret = flags & MSG_TRUNC ? ret : total_len;
2633*4882a593Smuzhiyun 	}
2634*4882a593Smuzhiyun out:
2635*4882a593Smuzhiyun 	tun_put(tun);
2636*4882a593Smuzhiyun 	return ret;
2637*4882a593Smuzhiyun 
2638*4882a593Smuzhiyun out_put_tun:
2639*4882a593Smuzhiyun 	tun_put(tun);
2640*4882a593Smuzhiyun out_free:
2641*4882a593Smuzhiyun 	tun_ptr_free(ptr);
2642*4882a593Smuzhiyun 	return ret;
2643*4882a593Smuzhiyun }
2644*4882a593Smuzhiyun 
tun_ptr_peek_len(void * ptr)2645*4882a593Smuzhiyun static int tun_ptr_peek_len(void *ptr)
2646*4882a593Smuzhiyun {
2647*4882a593Smuzhiyun 	if (likely(ptr)) {
2648*4882a593Smuzhiyun 		if (tun_is_xdp_frame(ptr)) {
2649*4882a593Smuzhiyun 			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun 			return xdpf->len;
2652*4882a593Smuzhiyun 		}
2653*4882a593Smuzhiyun 		return __skb_array_len_with_tag(ptr);
2654*4882a593Smuzhiyun 	} else {
2655*4882a593Smuzhiyun 		return 0;
2656*4882a593Smuzhiyun 	}
2657*4882a593Smuzhiyun }
2658*4882a593Smuzhiyun 
tun_peek_len(struct socket * sock)2659*4882a593Smuzhiyun static int tun_peek_len(struct socket *sock)
2660*4882a593Smuzhiyun {
2661*4882a593Smuzhiyun 	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2662*4882a593Smuzhiyun 	struct tun_struct *tun;
2663*4882a593Smuzhiyun 	int ret = 0;
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun 	tun = tun_get(tfile);
2666*4882a593Smuzhiyun 	if (!tun)
2667*4882a593Smuzhiyun 		return 0;
2668*4882a593Smuzhiyun 
2669*4882a593Smuzhiyun 	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2670*4882a593Smuzhiyun 	tun_put(tun);
2671*4882a593Smuzhiyun 
2672*4882a593Smuzhiyun 	return ret;
2673*4882a593Smuzhiyun }
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun /* Ops structure to mimic raw sockets with tun */
2676*4882a593Smuzhiyun static const struct proto_ops tun_socket_ops = {
2677*4882a593Smuzhiyun 	.peek_len = tun_peek_len,
2678*4882a593Smuzhiyun 	.sendmsg = tun_sendmsg,
2679*4882a593Smuzhiyun 	.recvmsg = tun_recvmsg,
2680*4882a593Smuzhiyun };
2681*4882a593Smuzhiyun 
2682*4882a593Smuzhiyun static struct proto tun_proto = {
2683*4882a593Smuzhiyun 	.name		= "tun",
2684*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
2685*4882a593Smuzhiyun 	.obj_size	= sizeof(struct tun_file),
2686*4882a593Smuzhiyun };
2687*4882a593Smuzhiyun 
tun_flags(struct tun_struct * tun)2688*4882a593Smuzhiyun static int tun_flags(struct tun_struct *tun)
2689*4882a593Smuzhiyun {
2690*4882a593Smuzhiyun 	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2691*4882a593Smuzhiyun }
2692*4882a593Smuzhiyun 
tun_show_flags(struct device * dev,struct device_attribute * attr,char * buf)2693*4882a593Smuzhiyun static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2694*4882a593Smuzhiyun 			      char *buf)
2695*4882a593Smuzhiyun {
2696*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2697*4882a593Smuzhiyun 	return sprintf(buf, "0x%x\n", tun_flags(tun));
2698*4882a593Smuzhiyun }
2699*4882a593Smuzhiyun 
tun_show_owner(struct device * dev,struct device_attribute * attr,char * buf)2700*4882a593Smuzhiyun static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2701*4882a593Smuzhiyun 			      char *buf)
2702*4882a593Smuzhiyun {
2703*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2704*4882a593Smuzhiyun 	return uid_valid(tun->owner)?
2705*4882a593Smuzhiyun 		sprintf(buf, "%u\n",
2706*4882a593Smuzhiyun 			from_kuid_munged(current_user_ns(), tun->owner)):
2707*4882a593Smuzhiyun 		sprintf(buf, "-1\n");
2708*4882a593Smuzhiyun }
2709*4882a593Smuzhiyun 
tun_show_group(struct device * dev,struct device_attribute * attr,char * buf)2710*4882a593Smuzhiyun static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2711*4882a593Smuzhiyun 			      char *buf)
2712*4882a593Smuzhiyun {
2713*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2714*4882a593Smuzhiyun 	return gid_valid(tun->group) ?
2715*4882a593Smuzhiyun 		sprintf(buf, "%u\n",
2716*4882a593Smuzhiyun 			from_kgid_munged(current_user_ns(), tun->group)):
2717*4882a593Smuzhiyun 		sprintf(buf, "-1\n");
2718*4882a593Smuzhiyun }
2719*4882a593Smuzhiyun 
2720*4882a593Smuzhiyun static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2721*4882a593Smuzhiyun static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2722*4882a593Smuzhiyun static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2723*4882a593Smuzhiyun 
2724*4882a593Smuzhiyun static struct attribute *tun_dev_attrs[] = {
2725*4882a593Smuzhiyun 	&dev_attr_tun_flags.attr,
2726*4882a593Smuzhiyun 	&dev_attr_owner.attr,
2727*4882a593Smuzhiyun 	&dev_attr_group.attr,
2728*4882a593Smuzhiyun 	NULL
2729*4882a593Smuzhiyun };
2730*4882a593Smuzhiyun 
2731*4882a593Smuzhiyun static const struct attribute_group tun_attr_group = {
2732*4882a593Smuzhiyun 	.attrs = tun_dev_attrs
2733*4882a593Smuzhiyun };
2734*4882a593Smuzhiyun 
tun_set_iff(struct net * net,struct file * file,struct ifreq * ifr)2735*4882a593Smuzhiyun static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2736*4882a593Smuzhiyun {
2737*4882a593Smuzhiyun 	struct tun_struct *tun;
2738*4882a593Smuzhiyun 	struct tun_file *tfile = file->private_data;
2739*4882a593Smuzhiyun 	struct net_device *dev;
2740*4882a593Smuzhiyun 	int err;
2741*4882a593Smuzhiyun 
2742*4882a593Smuzhiyun 	if (tfile->detached)
2743*4882a593Smuzhiyun 		return -EINVAL;
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun 	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2746*4882a593Smuzhiyun 		if (!capable(CAP_NET_ADMIN))
2747*4882a593Smuzhiyun 			return -EPERM;
2748*4882a593Smuzhiyun 
2749*4882a593Smuzhiyun 		if (!(ifr->ifr_flags & IFF_NAPI) ||
2750*4882a593Smuzhiyun 		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2751*4882a593Smuzhiyun 			return -EINVAL;
2752*4882a593Smuzhiyun 	}
2753*4882a593Smuzhiyun 
2754*4882a593Smuzhiyun 	dev = __dev_get_by_name(net, ifr->ifr_name);
2755*4882a593Smuzhiyun 	if (dev) {
2756*4882a593Smuzhiyun 		if (ifr->ifr_flags & IFF_TUN_EXCL)
2757*4882a593Smuzhiyun 			return -EBUSY;
2758*4882a593Smuzhiyun 		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2759*4882a593Smuzhiyun 			tun = netdev_priv(dev);
2760*4882a593Smuzhiyun 		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2761*4882a593Smuzhiyun 			tun = netdev_priv(dev);
2762*4882a593Smuzhiyun 		else
2763*4882a593Smuzhiyun 			return -EINVAL;
2764*4882a593Smuzhiyun 
2765*4882a593Smuzhiyun 		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2766*4882a593Smuzhiyun 		    !!(tun->flags & IFF_MULTI_QUEUE))
2767*4882a593Smuzhiyun 			return -EINVAL;
2768*4882a593Smuzhiyun 
2769*4882a593Smuzhiyun 		if (tun_not_capable(tun))
2770*4882a593Smuzhiyun 			return -EPERM;
2771*4882a593Smuzhiyun 		err = security_tun_dev_open(tun->security);
2772*4882a593Smuzhiyun 		if (err < 0)
2773*4882a593Smuzhiyun 			return err;
2774*4882a593Smuzhiyun 
2775*4882a593Smuzhiyun 		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2776*4882a593Smuzhiyun 				 ifr->ifr_flags & IFF_NAPI,
2777*4882a593Smuzhiyun 				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2778*4882a593Smuzhiyun 		if (err < 0)
2779*4882a593Smuzhiyun 			return err;
2780*4882a593Smuzhiyun 
2781*4882a593Smuzhiyun 		if (tun->flags & IFF_MULTI_QUEUE &&
2782*4882a593Smuzhiyun 		    (tun->numqueues + tun->numdisabled > 1)) {
2783*4882a593Smuzhiyun 			/* One or more queue has already been attached, no need
2784*4882a593Smuzhiyun 			 * to initialize the device again.
2785*4882a593Smuzhiyun 			 */
2786*4882a593Smuzhiyun 			netdev_state_change(dev);
2787*4882a593Smuzhiyun 			return 0;
2788*4882a593Smuzhiyun 		}
2789*4882a593Smuzhiyun 
2790*4882a593Smuzhiyun 		tun->flags = (tun->flags & ~TUN_FEATURES) |
2791*4882a593Smuzhiyun 			      (ifr->ifr_flags & TUN_FEATURES);
2792*4882a593Smuzhiyun 
2793*4882a593Smuzhiyun 		netdev_state_change(dev);
2794*4882a593Smuzhiyun 	} else {
2795*4882a593Smuzhiyun 		char *name;
2796*4882a593Smuzhiyun 		unsigned long flags = 0;
2797*4882a593Smuzhiyun 		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2798*4882a593Smuzhiyun 			     MAX_TAP_QUEUES : 1;
2799*4882a593Smuzhiyun 
2800*4882a593Smuzhiyun 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2801*4882a593Smuzhiyun 			return -EPERM;
2802*4882a593Smuzhiyun 		err = security_tun_dev_create();
2803*4882a593Smuzhiyun 		if (err < 0)
2804*4882a593Smuzhiyun 			return err;
2805*4882a593Smuzhiyun 
2806*4882a593Smuzhiyun 		/* Set dev type */
2807*4882a593Smuzhiyun 		if (ifr->ifr_flags & IFF_TUN) {
2808*4882a593Smuzhiyun 			/* TUN device */
2809*4882a593Smuzhiyun 			flags |= IFF_TUN;
2810*4882a593Smuzhiyun 			name = "tun%d";
2811*4882a593Smuzhiyun 		} else if (ifr->ifr_flags & IFF_TAP) {
2812*4882a593Smuzhiyun 			/* TAP device */
2813*4882a593Smuzhiyun 			flags |= IFF_TAP;
2814*4882a593Smuzhiyun 			name = "tap%d";
2815*4882a593Smuzhiyun 		} else
2816*4882a593Smuzhiyun 			return -EINVAL;
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun 		if (*ifr->ifr_name)
2819*4882a593Smuzhiyun 			name = ifr->ifr_name;
2820*4882a593Smuzhiyun 
2821*4882a593Smuzhiyun 		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2822*4882a593Smuzhiyun 				       NET_NAME_UNKNOWN, tun_setup, queues,
2823*4882a593Smuzhiyun 				       queues);
2824*4882a593Smuzhiyun 
2825*4882a593Smuzhiyun 		if (!dev)
2826*4882a593Smuzhiyun 			return -ENOMEM;
2827*4882a593Smuzhiyun 
2828*4882a593Smuzhiyun 		dev_net_set(dev, net);
2829*4882a593Smuzhiyun 		dev->rtnl_link_ops = &tun_link_ops;
2830*4882a593Smuzhiyun 		dev->ifindex = tfile->ifindex;
2831*4882a593Smuzhiyun 		dev->sysfs_groups[0] = &tun_attr_group;
2832*4882a593Smuzhiyun 
2833*4882a593Smuzhiyun 		tun = netdev_priv(dev);
2834*4882a593Smuzhiyun 		tun->dev = dev;
2835*4882a593Smuzhiyun 		tun->flags = flags;
2836*4882a593Smuzhiyun 		tun->txflt.count = 0;
2837*4882a593Smuzhiyun 		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2838*4882a593Smuzhiyun 
2839*4882a593Smuzhiyun 		tun->align = NET_SKB_PAD;
2840*4882a593Smuzhiyun 		tun->filter_attached = false;
2841*4882a593Smuzhiyun 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2842*4882a593Smuzhiyun 		tun->rx_batched = 0;
2843*4882a593Smuzhiyun 		RCU_INIT_POINTER(tun->steering_prog, NULL);
2844*4882a593Smuzhiyun 
2845*4882a593Smuzhiyun 		tun->ifr = ifr;
2846*4882a593Smuzhiyun 		tun->file = file;
2847*4882a593Smuzhiyun 
2848*4882a593Smuzhiyun 		tun_net_initialize(dev);
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun 		err = register_netdevice(tun->dev);
2851*4882a593Smuzhiyun 		if (err < 0) {
2852*4882a593Smuzhiyun 			free_netdev(dev);
2853*4882a593Smuzhiyun 			return err;
2854*4882a593Smuzhiyun 		}
2855*4882a593Smuzhiyun 		/* free_netdev() won't check refcnt, to aovid race
2856*4882a593Smuzhiyun 		 * with dev_put() we need publish tun after registration.
2857*4882a593Smuzhiyun 		 */
2858*4882a593Smuzhiyun 		rcu_assign_pointer(tfile->tun, tun);
2859*4882a593Smuzhiyun 	}
2860*4882a593Smuzhiyun 
2861*4882a593Smuzhiyun 	netif_carrier_on(tun->dev);
2862*4882a593Smuzhiyun 
2863*4882a593Smuzhiyun 	/* Make sure persistent devices do not get stuck in
2864*4882a593Smuzhiyun 	 * xoff state.
2865*4882a593Smuzhiyun 	 */
2866*4882a593Smuzhiyun 	if (netif_running(tun->dev))
2867*4882a593Smuzhiyun 		netif_tx_wake_all_queues(tun->dev);
2868*4882a593Smuzhiyun 
2869*4882a593Smuzhiyun 	strcpy(ifr->ifr_name, tun->dev->name);
2870*4882a593Smuzhiyun 	return 0;
2871*4882a593Smuzhiyun }
2872*4882a593Smuzhiyun 
tun_get_iff(struct tun_struct * tun,struct ifreq * ifr)2873*4882a593Smuzhiyun static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2874*4882a593Smuzhiyun {
2875*4882a593Smuzhiyun 	strcpy(ifr->ifr_name, tun->dev->name);
2876*4882a593Smuzhiyun 
2877*4882a593Smuzhiyun 	ifr->ifr_flags = tun_flags(tun);
2878*4882a593Smuzhiyun 
2879*4882a593Smuzhiyun }
2880*4882a593Smuzhiyun 
2881*4882a593Smuzhiyun /* This is like a cut-down ethtool ops, except done via tun fd so no
2882*4882a593Smuzhiyun  * privs required. */
set_offload(struct tun_struct * tun,unsigned long arg)2883*4882a593Smuzhiyun static int set_offload(struct tun_struct *tun, unsigned long arg)
2884*4882a593Smuzhiyun {
2885*4882a593Smuzhiyun 	netdev_features_t features = 0;
2886*4882a593Smuzhiyun 
2887*4882a593Smuzhiyun 	if (arg & TUN_F_CSUM) {
2888*4882a593Smuzhiyun 		features |= NETIF_F_HW_CSUM;
2889*4882a593Smuzhiyun 		arg &= ~TUN_F_CSUM;
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2892*4882a593Smuzhiyun 			if (arg & TUN_F_TSO_ECN) {
2893*4882a593Smuzhiyun 				features |= NETIF_F_TSO_ECN;
2894*4882a593Smuzhiyun 				arg &= ~TUN_F_TSO_ECN;
2895*4882a593Smuzhiyun 			}
2896*4882a593Smuzhiyun 			if (arg & TUN_F_TSO4)
2897*4882a593Smuzhiyun 				features |= NETIF_F_TSO;
2898*4882a593Smuzhiyun 			if (arg & TUN_F_TSO6)
2899*4882a593Smuzhiyun 				features |= NETIF_F_TSO6;
2900*4882a593Smuzhiyun 			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2901*4882a593Smuzhiyun 		}
2902*4882a593Smuzhiyun 
2903*4882a593Smuzhiyun 		arg &= ~TUN_F_UFO;
2904*4882a593Smuzhiyun 	}
2905*4882a593Smuzhiyun 
2906*4882a593Smuzhiyun 	/* This gives the user a way to test for new features in future by
2907*4882a593Smuzhiyun 	 * trying to set them. */
2908*4882a593Smuzhiyun 	if (arg)
2909*4882a593Smuzhiyun 		return -EINVAL;
2910*4882a593Smuzhiyun 
2911*4882a593Smuzhiyun 	tun->set_features = features;
2912*4882a593Smuzhiyun 	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2913*4882a593Smuzhiyun 	tun->dev->wanted_features |= features;
2914*4882a593Smuzhiyun 	netdev_update_features(tun->dev);
2915*4882a593Smuzhiyun 
2916*4882a593Smuzhiyun 	return 0;
2917*4882a593Smuzhiyun }
2918*4882a593Smuzhiyun 
tun_detach_filter(struct tun_struct * tun,int n)2919*4882a593Smuzhiyun static void tun_detach_filter(struct tun_struct *tun, int n)
2920*4882a593Smuzhiyun {
2921*4882a593Smuzhiyun 	int i;
2922*4882a593Smuzhiyun 	struct tun_file *tfile;
2923*4882a593Smuzhiyun 
2924*4882a593Smuzhiyun 	for (i = 0; i < n; i++) {
2925*4882a593Smuzhiyun 		tfile = rtnl_dereference(tun->tfiles[i]);
2926*4882a593Smuzhiyun 		lock_sock(tfile->socket.sk);
2927*4882a593Smuzhiyun 		sk_detach_filter(tfile->socket.sk);
2928*4882a593Smuzhiyun 		release_sock(tfile->socket.sk);
2929*4882a593Smuzhiyun 	}
2930*4882a593Smuzhiyun 
2931*4882a593Smuzhiyun 	tun->filter_attached = false;
2932*4882a593Smuzhiyun }
2933*4882a593Smuzhiyun 
tun_attach_filter(struct tun_struct * tun)2934*4882a593Smuzhiyun static int tun_attach_filter(struct tun_struct *tun)
2935*4882a593Smuzhiyun {
2936*4882a593Smuzhiyun 	int i, ret = 0;
2937*4882a593Smuzhiyun 	struct tun_file *tfile;
2938*4882a593Smuzhiyun 
2939*4882a593Smuzhiyun 	for (i = 0; i < tun->numqueues; i++) {
2940*4882a593Smuzhiyun 		tfile = rtnl_dereference(tun->tfiles[i]);
2941*4882a593Smuzhiyun 		lock_sock(tfile->socket.sk);
2942*4882a593Smuzhiyun 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2943*4882a593Smuzhiyun 		release_sock(tfile->socket.sk);
2944*4882a593Smuzhiyun 		if (ret) {
2945*4882a593Smuzhiyun 			tun_detach_filter(tun, i);
2946*4882a593Smuzhiyun 			return ret;
2947*4882a593Smuzhiyun 		}
2948*4882a593Smuzhiyun 	}
2949*4882a593Smuzhiyun 
2950*4882a593Smuzhiyun 	tun->filter_attached = true;
2951*4882a593Smuzhiyun 	return ret;
2952*4882a593Smuzhiyun }
2953*4882a593Smuzhiyun 
tun_set_sndbuf(struct tun_struct * tun)2954*4882a593Smuzhiyun static void tun_set_sndbuf(struct tun_struct *tun)
2955*4882a593Smuzhiyun {
2956*4882a593Smuzhiyun 	struct tun_file *tfile;
2957*4882a593Smuzhiyun 	int i;
2958*4882a593Smuzhiyun 
2959*4882a593Smuzhiyun 	for (i = 0; i < tun->numqueues; i++) {
2960*4882a593Smuzhiyun 		tfile = rtnl_dereference(tun->tfiles[i]);
2961*4882a593Smuzhiyun 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2962*4882a593Smuzhiyun 	}
2963*4882a593Smuzhiyun }
2964*4882a593Smuzhiyun 
tun_set_queue(struct file * file,struct ifreq * ifr)2965*4882a593Smuzhiyun static int tun_set_queue(struct file *file, struct ifreq *ifr)
2966*4882a593Smuzhiyun {
2967*4882a593Smuzhiyun 	struct tun_file *tfile = file->private_data;
2968*4882a593Smuzhiyun 	struct tun_struct *tun;
2969*4882a593Smuzhiyun 	int ret = 0;
2970*4882a593Smuzhiyun 
2971*4882a593Smuzhiyun 	rtnl_lock();
2972*4882a593Smuzhiyun 
2973*4882a593Smuzhiyun 	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2974*4882a593Smuzhiyun 		tun = tfile->detached;
2975*4882a593Smuzhiyun 		if (!tun) {
2976*4882a593Smuzhiyun 			ret = -EINVAL;
2977*4882a593Smuzhiyun 			goto unlock;
2978*4882a593Smuzhiyun 		}
2979*4882a593Smuzhiyun 		ret = security_tun_dev_attach_queue(tun->security);
2980*4882a593Smuzhiyun 		if (ret < 0)
2981*4882a593Smuzhiyun 			goto unlock;
2982*4882a593Smuzhiyun 		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2983*4882a593Smuzhiyun 				 tun->flags & IFF_NAPI_FRAGS, true);
2984*4882a593Smuzhiyun 	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2985*4882a593Smuzhiyun 		tun = rtnl_dereference(tfile->tun);
2986*4882a593Smuzhiyun 		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
2987*4882a593Smuzhiyun 			ret = -EINVAL;
2988*4882a593Smuzhiyun 		else
2989*4882a593Smuzhiyun 			__tun_detach(tfile, false);
2990*4882a593Smuzhiyun 	} else
2991*4882a593Smuzhiyun 		ret = -EINVAL;
2992*4882a593Smuzhiyun 
2993*4882a593Smuzhiyun 	if (ret >= 0)
2994*4882a593Smuzhiyun 		netdev_state_change(tun->dev);
2995*4882a593Smuzhiyun 
2996*4882a593Smuzhiyun unlock:
2997*4882a593Smuzhiyun 	rtnl_unlock();
2998*4882a593Smuzhiyun 	return ret;
2999*4882a593Smuzhiyun }
3000*4882a593Smuzhiyun 
tun_set_ebpf(struct tun_struct * tun,struct tun_prog __rcu ** prog_p,void __user * data)3001*4882a593Smuzhiyun static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
3002*4882a593Smuzhiyun 			void __user *data)
3003*4882a593Smuzhiyun {
3004*4882a593Smuzhiyun 	struct bpf_prog *prog;
3005*4882a593Smuzhiyun 	int fd;
3006*4882a593Smuzhiyun 
3007*4882a593Smuzhiyun 	if (copy_from_user(&fd, data, sizeof(fd)))
3008*4882a593Smuzhiyun 		return -EFAULT;
3009*4882a593Smuzhiyun 
3010*4882a593Smuzhiyun 	if (fd == -1) {
3011*4882a593Smuzhiyun 		prog = NULL;
3012*4882a593Smuzhiyun 	} else {
3013*4882a593Smuzhiyun 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
3014*4882a593Smuzhiyun 		if (IS_ERR(prog))
3015*4882a593Smuzhiyun 			return PTR_ERR(prog);
3016*4882a593Smuzhiyun 	}
3017*4882a593Smuzhiyun 
3018*4882a593Smuzhiyun 	return __tun_set_ebpf(tun, prog_p, prog);
3019*4882a593Smuzhiyun }
3020*4882a593Smuzhiyun 
3021*4882a593Smuzhiyun /* Return correct value for tun->dev->addr_len based on tun->dev->type. */
tun_get_addr_len(unsigned short type)3022*4882a593Smuzhiyun static unsigned char tun_get_addr_len(unsigned short type)
3023*4882a593Smuzhiyun {
3024*4882a593Smuzhiyun 	switch (type) {
3025*4882a593Smuzhiyun 	case ARPHRD_IP6GRE:
3026*4882a593Smuzhiyun 	case ARPHRD_TUNNEL6:
3027*4882a593Smuzhiyun 		return sizeof(struct in6_addr);
3028*4882a593Smuzhiyun 	case ARPHRD_IPGRE:
3029*4882a593Smuzhiyun 	case ARPHRD_TUNNEL:
3030*4882a593Smuzhiyun 	case ARPHRD_SIT:
3031*4882a593Smuzhiyun 		return 4;
3032*4882a593Smuzhiyun 	case ARPHRD_ETHER:
3033*4882a593Smuzhiyun 		return ETH_ALEN;
3034*4882a593Smuzhiyun 	case ARPHRD_IEEE802154:
3035*4882a593Smuzhiyun 	case ARPHRD_IEEE802154_MONITOR:
3036*4882a593Smuzhiyun 		return IEEE802154_EXTENDED_ADDR_LEN;
3037*4882a593Smuzhiyun 	case ARPHRD_PHONET_PIPE:
3038*4882a593Smuzhiyun 	case ARPHRD_PPP:
3039*4882a593Smuzhiyun 	case ARPHRD_NONE:
3040*4882a593Smuzhiyun 		return 0;
3041*4882a593Smuzhiyun 	case ARPHRD_6LOWPAN:
3042*4882a593Smuzhiyun 		return EUI64_ADDR_LEN;
3043*4882a593Smuzhiyun 	case ARPHRD_FDDI:
3044*4882a593Smuzhiyun 		return FDDI_K_ALEN;
3045*4882a593Smuzhiyun 	case ARPHRD_HIPPI:
3046*4882a593Smuzhiyun 		return HIPPI_ALEN;
3047*4882a593Smuzhiyun 	case ARPHRD_IEEE802:
3048*4882a593Smuzhiyun 		return FC_ALEN;
3049*4882a593Smuzhiyun 	case ARPHRD_ROSE:
3050*4882a593Smuzhiyun 		return ROSE_ADDR_LEN;
3051*4882a593Smuzhiyun 	case ARPHRD_NETROM:
3052*4882a593Smuzhiyun 		return AX25_ADDR_LEN;
3053*4882a593Smuzhiyun 	case ARPHRD_LOCALTLK:
3054*4882a593Smuzhiyun 		return LTALK_ALEN;
3055*4882a593Smuzhiyun 	default:
3056*4882a593Smuzhiyun 		return 0;
3057*4882a593Smuzhiyun 	}
3058*4882a593Smuzhiyun }
3059*4882a593Smuzhiyun 
__tun_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg,int ifreq_len)3060*4882a593Smuzhiyun static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
3061*4882a593Smuzhiyun 			    unsigned long arg, int ifreq_len)
3062*4882a593Smuzhiyun {
3063*4882a593Smuzhiyun 	struct tun_file *tfile = file->private_data;
3064*4882a593Smuzhiyun 	struct net *net = sock_net(&tfile->sk);
3065*4882a593Smuzhiyun 	struct tun_struct *tun;
3066*4882a593Smuzhiyun 	void __user* argp = (void __user*)arg;
3067*4882a593Smuzhiyun 	unsigned int ifindex, carrier;
3068*4882a593Smuzhiyun 	struct ifreq ifr;
3069*4882a593Smuzhiyun 	kuid_t owner;
3070*4882a593Smuzhiyun 	kgid_t group;
3071*4882a593Smuzhiyun 	int sndbuf;
3072*4882a593Smuzhiyun 	int vnet_hdr_sz;
3073*4882a593Smuzhiyun 	int le;
3074*4882a593Smuzhiyun 	int ret;
3075*4882a593Smuzhiyun 	bool do_notify = false;
3076*4882a593Smuzhiyun 
3077*4882a593Smuzhiyun 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3078*4882a593Smuzhiyun 	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
3079*4882a593Smuzhiyun 		if (copy_from_user(&ifr, argp, ifreq_len))
3080*4882a593Smuzhiyun 			return -EFAULT;
3081*4882a593Smuzhiyun 	} else {
3082*4882a593Smuzhiyun 		memset(&ifr, 0, sizeof(ifr));
3083*4882a593Smuzhiyun 	}
3084*4882a593Smuzhiyun 	if (cmd == TUNGETFEATURES) {
3085*4882a593Smuzhiyun 		/* Currently this just means: "what IFF flags are valid?".
3086*4882a593Smuzhiyun 		 * This is needed because we never checked for invalid flags on
3087*4882a593Smuzhiyun 		 * TUNSETIFF.
3088*4882a593Smuzhiyun 		 */
3089*4882a593Smuzhiyun 		return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
3090*4882a593Smuzhiyun 				(unsigned int __user*)argp);
3091*4882a593Smuzhiyun 	} else if (cmd == TUNSETQUEUE) {
3092*4882a593Smuzhiyun 		return tun_set_queue(file, &ifr);
3093*4882a593Smuzhiyun 	} else if (cmd == SIOCGSKNS) {
3094*4882a593Smuzhiyun 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3095*4882a593Smuzhiyun 			return -EPERM;
3096*4882a593Smuzhiyun 		return open_related_ns(&net->ns, get_net_ns);
3097*4882a593Smuzhiyun 	}
3098*4882a593Smuzhiyun 
3099*4882a593Smuzhiyun 	ret = 0;
3100*4882a593Smuzhiyun 	rtnl_lock();
3101*4882a593Smuzhiyun 
3102*4882a593Smuzhiyun 	tun = tun_get(tfile);
3103*4882a593Smuzhiyun 	if (cmd == TUNSETIFF) {
3104*4882a593Smuzhiyun 		ret = -EEXIST;
3105*4882a593Smuzhiyun 		if (tun)
3106*4882a593Smuzhiyun 			goto unlock;
3107*4882a593Smuzhiyun 
3108*4882a593Smuzhiyun 		ifr.ifr_name[IFNAMSIZ-1] = '\0';
3109*4882a593Smuzhiyun 
3110*4882a593Smuzhiyun 		ret = tun_set_iff(net, file, &ifr);
3111*4882a593Smuzhiyun 
3112*4882a593Smuzhiyun 		if (ret)
3113*4882a593Smuzhiyun 			goto unlock;
3114*4882a593Smuzhiyun 
3115*4882a593Smuzhiyun 		if (copy_to_user(argp, &ifr, ifreq_len))
3116*4882a593Smuzhiyun 			ret = -EFAULT;
3117*4882a593Smuzhiyun 		goto unlock;
3118*4882a593Smuzhiyun 	}
3119*4882a593Smuzhiyun 	if (cmd == TUNSETIFINDEX) {
3120*4882a593Smuzhiyun 		ret = -EPERM;
3121*4882a593Smuzhiyun 		if (tun)
3122*4882a593Smuzhiyun 			goto unlock;
3123*4882a593Smuzhiyun 
3124*4882a593Smuzhiyun 		ret = -EFAULT;
3125*4882a593Smuzhiyun 		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3126*4882a593Smuzhiyun 			goto unlock;
3127*4882a593Smuzhiyun 
3128*4882a593Smuzhiyun 		ret = 0;
3129*4882a593Smuzhiyun 		tfile->ifindex = ifindex;
3130*4882a593Smuzhiyun 		goto unlock;
3131*4882a593Smuzhiyun 	}
3132*4882a593Smuzhiyun 
3133*4882a593Smuzhiyun 	ret = -EBADFD;
3134*4882a593Smuzhiyun 	if (!tun)
3135*4882a593Smuzhiyun 		goto unlock;
3136*4882a593Smuzhiyun 
3137*4882a593Smuzhiyun 	netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3138*4882a593Smuzhiyun 
3139*4882a593Smuzhiyun 	net = dev_net(tun->dev);
3140*4882a593Smuzhiyun 	ret = 0;
3141*4882a593Smuzhiyun 	switch (cmd) {
3142*4882a593Smuzhiyun 	case TUNGETIFF:
3143*4882a593Smuzhiyun 		tun_get_iff(tun, &ifr);
3144*4882a593Smuzhiyun 
3145*4882a593Smuzhiyun 		if (tfile->detached)
3146*4882a593Smuzhiyun 			ifr.ifr_flags |= IFF_DETACH_QUEUE;
3147*4882a593Smuzhiyun 		if (!tfile->socket.sk->sk_filter)
3148*4882a593Smuzhiyun 			ifr.ifr_flags |= IFF_NOFILTER;
3149*4882a593Smuzhiyun 
3150*4882a593Smuzhiyun 		if (copy_to_user(argp, &ifr, ifreq_len))
3151*4882a593Smuzhiyun 			ret = -EFAULT;
3152*4882a593Smuzhiyun 		break;
3153*4882a593Smuzhiyun 
3154*4882a593Smuzhiyun 	case TUNSETNOCSUM:
3155*4882a593Smuzhiyun 		/* Disable/Enable checksum */
3156*4882a593Smuzhiyun 
3157*4882a593Smuzhiyun 		/* [unimplemented] */
3158*4882a593Smuzhiyun 		netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3159*4882a593Smuzhiyun 			   arg ? "disabled" : "enabled");
3160*4882a593Smuzhiyun 		break;
3161*4882a593Smuzhiyun 
3162*4882a593Smuzhiyun 	case TUNSETPERSIST:
3163*4882a593Smuzhiyun 		/* Disable/Enable persist mode. Keep an extra reference to the
3164*4882a593Smuzhiyun 		 * module to prevent the module being unprobed.
3165*4882a593Smuzhiyun 		 */
3166*4882a593Smuzhiyun 		if (arg && !(tun->flags & IFF_PERSIST)) {
3167*4882a593Smuzhiyun 			tun->flags |= IFF_PERSIST;
3168*4882a593Smuzhiyun 			__module_get(THIS_MODULE);
3169*4882a593Smuzhiyun 			do_notify = true;
3170*4882a593Smuzhiyun 		}
3171*4882a593Smuzhiyun 		if (!arg && (tun->flags & IFF_PERSIST)) {
3172*4882a593Smuzhiyun 			tun->flags &= ~IFF_PERSIST;
3173*4882a593Smuzhiyun 			module_put(THIS_MODULE);
3174*4882a593Smuzhiyun 			do_notify = true;
3175*4882a593Smuzhiyun 		}
3176*4882a593Smuzhiyun 
3177*4882a593Smuzhiyun 		netif_info(tun, drv, tun->dev, "persist %s\n",
3178*4882a593Smuzhiyun 			   arg ? "enabled" : "disabled");
3179*4882a593Smuzhiyun 		break;
3180*4882a593Smuzhiyun 
3181*4882a593Smuzhiyun 	case TUNSETOWNER:
3182*4882a593Smuzhiyun 		/* Set owner of the device */
3183*4882a593Smuzhiyun 		owner = make_kuid(current_user_ns(), arg);
3184*4882a593Smuzhiyun 		if (!uid_valid(owner)) {
3185*4882a593Smuzhiyun 			ret = -EINVAL;
3186*4882a593Smuzhiyun 			break;
3187*4882a593Smuzhiyun 		}
3188*4882a593Smuzhiyun 		tun->owner = owner;
3189*4882a593Smuzhiyun 		do_notify = true;
3190*4882a593Smuzhiyun 		netif_info(tun, drv, tun->dev, "owner set to %u\n",
3191*4882a593Smuzhiyun 			   from_kuid(&init_user_ns, tun->owner));
3192*4882a593Smuzhiyun 		break;
3193*4882a593Smuzhiyun 
3194*4882a593Smuzhiyun 	case TUNSETGROUP:
3195*4882a593Smuzhiyun 		/* Set group of the device */
3196*4882a593Smuzhiyun 		group = make_kgid(current_user_ns(), arg);
3197*4882a593Smuzhiyun 		if (!gid_valid(group)) {
3198*4882a593Smuzhiyun 			ret = -EINVAL;
3199*4882a593Smuzhiyun 			break;
3200*4882a593Smuzhiyun 		}
3201*4882a593Smuzhiyun 		tun->group = group;
3202*4882a593Smuzhiyun 		do_notify = true;
3203*4882a593Smuzhiyun 		netif_info(tun, drv, tun->dev, "group set to %u\n",
3204*4882a593Smuzhiyun 			   from_kgid(&init_user_ns, tun->group));
3205*4882a593Smuzhiyun 		break;
3206*4882a593Smuzhiyun 
3207*4882a593Smuzhiyun 	case TUNSETLINK:
3208*4882a593Smuzhiyun 		/* Only allow setting the type when the interface is down */
3209*4882a593Smuzhiyun 		if (tun->dev->flags & IFF_UP) {
3210*4882a593Smuzhiyun 			netif_info(tun, drv, tun->dev,
3211*4882a593Smuzhiyun 				   "Linktype set failed because interface is up\n");
3212*4882a593Smuzhiyun 			ret = -EBUSY;
3213*4882a593Smuzhiyun 		} else {
3214*4882a593Smuzhiyun 			tun->dev->type = (int) arg;
3215*4882a593Smuzhiyun 			tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
3216*4882a593Smuzhiyun 			netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3217*4882a593Smuzhiyun 				   tun->dev->type);
3218*4882a593Smuzhiyun 			ret = 0;
3219*4882a593Smuzhiyun 		}
3220*4882a593Smuzhiyun 		break;
3221*4882a593Smuzhiyun 
3222*4882a593Smuzhiyun 	case TUNSETDEBUG:
3223*4882a593Smuzhiyun 		tun->msg_enable = (u32)arg;
3224*4882a593Smuzhiyun 		break;
3225*4882a593Smuzhiyun 
3226*4882a593Smuzhiyun 	case TUNSETOFFLOAD:
3227*4882a593Smuzhiyun 		ret = set_offload(tun, arg);
3228*4882a593Smuzhiyun 		break;
3229*4882a593Smuzhiyun 
3230*4882a593Smuzhiyun 	case TUNSETTXFILTER:
3231*4882a593Smuzhiyun 		/* Can be set only for TAPs */
3232*4882a593Smuzhiyun 		ret = -EINVAL;
3233*4882a593Smuzhiyun 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3234*4882a593Smuzhiyun 			break;
3235*4882a593Smuzhiyun 		ret = update_filter(&tun->txflt, (void __user *)arg);
3236*4882a593Smuzhiyun 		break;
3237*4882a593Smuzhiyun 
3238*4882a593Smuzhiyun 	case SIOCGIFHWADDR:
3239*4882a593Smuzhiyun 		/* Get hw address */
3240*4882a593Smuzhiyun 		dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
3241*4882a593Smuzhiyun 		if (copy_to_user(argp, &ifr, ifreq_len))
3242*4882a593Smuzhiyun 			ret = -EFAULT;
3243*4882a593Smuzhiyun 		break;
3244*4882a593Smuzhiyun 
3245*4882a593Smuzhiyun 	case SIOCSIFHWADDR:
3246*4882a593Smuzhiyun 		/* Set hw address */
3247*4882a593Smuzhiyun 		ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
3248*4882a593Smuzhiyun 		break;
3249*4882a593Smuzhiyun 
3250*4882a593Smuzhiyun 	case TUNGETSNDBUF:
3251*4882a593Smuzhiyun 		sndbuf = tfile->socket.sk->sk_sndbuf;
3252*4882a593Smuzhiyun 		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3253*4882a593Smuzhiyun 			ret = -EFAULT;
3254*4882a593Smuzhiyun 		break;
3255*4882a593Smuzhiyun 
3256*4882a593Smuzhiyun 	case TUNSETSNDBUF:
3257*4882a593Smuzhiyun 		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3258*4882a593Smuzhiyun 			ret = -EFAULT;
3259*4882a593Smuzhiyun 			break;
3260*4882a593Smuzhiyun 		}
3261*4882a593Smuzhiyun 		if (sndbuf <= 0) {
3262*4882a593Smuzhiyun 			ret = -EINVAL;
3263*4882a593Smuzhiyun 			break;
3264*4882a593Smuzhiyun 		}
3265*4882a593Smuzhiyun 
3266*4882a593Smuzhiyun 		tun->sndbuf = sndbuf;
3267*4882a593Smuzhiyun 		tun_set_sndbuf(tun);
3268*4882a593Smuzhiyun 		break;
3269*4882a593Smuzhiyun 
3270*4882a593Smuzhiyun 	case TUNGETVNETHDRSZ:
3271*4882a593Smuzhiyun 		vnet_hdr_sz = tun->vnet_hdr_sz;
3272*4882a593Smuzhiyun 		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3273*4882a593Smuzhiyun 			ret = -EFAULT;
3274*4882a593Smuzhiyun 		break;
3275*4882a593Smuzhiyun 
3276*4882a593Smuzhiyun 	case TUNSETVNETHDRSZ:
3277*4882a593Smuzhiyun 		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3278*4882a593Smuzhiyun 			ret = -EFAULT;
3279*4882a593Smuzhiyun 			break;
3280*4882a593Smuzhiyun 		}
3281*4882a593Smuzhiyun 		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3282*4882a593Smuzhiyun 			ret = -EINVAL;
3283*4882a593Smuzhiyun 			break;
3284*4882a593Smuzhiyun 		}
3285*4882a593Smuzhiyun 
3286*4882a593Smuzhiyun 		tun->vnet_hdr_sz = vnet_hdr_sz;
3287*4882a593Smuzhiyun 		break;
3288*4882a593Smuzhiyun 
3289*4882a593Smuzhiyun 	case TUNGETVNETLE:
3290*4882a593Smuzhiyun 		le = !!(tun->flags & TUN_VNET_LE);
3291*4882a593Smuzhiyun 		if (put_user(le, (int __user *)argp))
3292*4882a593Smuzhiyun 			ret = -EFAULT;
3293*4882a593Smuzhiyun 		break;
3294*4882a593Smuzhiyun 
3295*4882a593Smuzhiyun 	case TUNSETVNETLE:
3296*4882a593Smuzhiyun 		if (get_user(le, (int __user *)argp)) {
3297*4882a593Smuzhiyun 			ret = -EFAULT;
3298*4882a593Smuzhiyun 			break;
3299*4882a593Smuzhiyun 		}
3300*4882a593Smuzhiyun 		if (le)
3301*4882a593Smuzhiyun 			tun->flags |= TUN_VNET_LE;
3302*4882a593Smuzhiyun 		else
3303*4882a593Smuzhiyun 			tun->flags &= ~TUN_VNET_LE;
3304*4882a593Smuzhiyun 		break;
3305*4882a593Smuzhiyun 
3306*4882a593Smuzhiyun 	case TUNGETVNETBE:
3307*4882a593Smuzhiyun 		ret = tun_get_vnet_be(tun, argp);
3308*4882a593Smuzhiyun 		break;
3309*4882a593Smuzhiyun 
3310*4882a593Smuzhiyun 	case TUNSETVNETBE:
3311*4882a593Smuzhiyun 		ret = tun_set_vnet_be(tun, argp);
3312*4882a593Smuzhiyun 		break;
3313*4882a593Smuzhiyun 
3314*4882a593Smuzhiyun 	case TUNATTACHFILTER:
3315*4882a593Smuzhiyun 		/* Can be set only for TAPs */
3316*4882a593Smuzhiyun 		ret = -EINVAL;
3317*4882a593Smuzhiyun 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3318*4882a593Smuzhiyun 			break;
3319*4882a593Smuzhiyun 		ret = -EFAULT;
3320*4882a593Smuzhiyun 		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3321*4882a593Smuzhiyun 			break;
3322*4882a593Smuzhiyun 
3323*4882a593Smuzhiyun 		ret = tun_attach_filter(tun);
3324*4882a593Smuzhiyun 		break;
3325*4882a593Smuzhiyun 
3326*4882a593Smuzhiyun 	case TUNDETACHFILTER:
3327*4882a593Smuzhiyun 		/* Can be set only for TAPs */
3328*4882a593Smuzhiyun 		ret = -EINVAL;
3329*4882a593Smuzhiyun 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3330*4882a593Smuzhiyun 			break;
3331*4882a593Smuzhiyun 		ret = 0;
3332*4882a593Smuzhiyun 		tun_detach_filter(tun, tun->numqueues);
3333*4882a593Smuzhiyun 		break;
3334*4882a593Smuzhiyun 
3335*4882a593Smuzhiyun 	case TUNGETFILTER:
3336*4882a593Smuzhiyun 		ret = -EINVAL;
3337*4882a593Smuzhiyun 		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3338*4882a593Smuzhiyun 			break;
3339*4882a593Smuzhiyun 		ret = -EFAULT;
3340*4882a593Smuzhiyun 		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3341*4882a593Smuzhiyun 			break;
3342*4882a593Smuzhiyun 		ret = 0;
3343*4882a593Smuzhiyun 		break;
3344*4882a593Smuzhiyun 
3345*4882a593Smuzhiyun 	case TUNSETSTEERINGEBPF:
3346*4882a593Smuzhiyun 		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3347*4882a593Smuzhiyun 		break;
3348*4882a593Smuzhiyun 
3349*4882a593Smuzhiyun 	case TUNSETFILTEREBPF:
3350*4882a593Smuzhiyun 		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3351*4882a593Smuzhiyun 		break;
3352*4882a593Smuzhiyun 
3353*4882a593Smuzhiyun 	case TUNSETCARRIER:
3354*4882a593Smuzhiyun 		ret = -EFAULT;
3355*4882a593Smuzhiyun 		if (copy_from_user(&carrier, argp, sizeof(carrier)))
3356*4882a593Smuzhiyun 			goto unlock;
3357*4882a593Smuzhiyun 
3358*4882a593Smuzhiyun 		ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3359*4882a593Smuzhiyun 		break;
3360*4882a593Smuzhiyun 
3361*4882a593Smuzhiyun 	case TUNGETDEVNETNS:
3362*4882a593Smuzhiyun 		ret = -EPERM;
3363*4882a593Smuzhiyun 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3364*4882a593Smuzhiyun 			goto unlock;
3365*4882a593Smuzhiyun 		ret = open_related_ns(&net->ns, get_net_ns);
3366*4882a593Smuzhiyun 		break;
3367*4882a593Smuzhiyun 
3368*4882a593Smuzhiyun 	default:
3369*4882a593Smuzhiyun 		ret = -EINVAL;
3370*4882a593Smuzhiyun 		break;
3371*4882a593Smuzhiyun 	}
3372*4882a593Smuzhiyun 
3373*4882a593Smuzhiyun 	if (do_notify)
3374*4882a593Smuzhiyun 		netdev_state_change(tun->dev);
3375*4882a593Smuzhiyun 
3376*4882a593Smuzhiyun unlock:
3377*4882a593Smuzhiyun 	rtnl_unlock();
3378*4882a593Smuzhiyun 	if (tun)
3379*4882a593Smuzhiyun 		tun_put(tun);
3380*4882a593Smuzhiyun 	return ret;
3381*4882a593Smuzhiyun }
3382*4882a593Smuzhiyun 
tun_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3383*4882a593Smuzhiyun static long tun_chr_ioctl(struct file *file,
3384*4882a593Smuzhiyun 			  unsigned int cmd, unsigned long arg)
3385*4882a593Smuzhiyun {
3386*4882a593Smuzhiyun 	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3387*4882a593Smuzhiyun }
3388*4882a593Smuzhiyun 
3389*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
tun_chr_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3390*4882a593Smuzhiyun static long tun_chr_compat_ioctl(struct file *file,
3391*4882a593Smuzhiyun 			 unsigned int cmd, unsigned long arg)
3392*4882a593Smuzhiyun {
3393*4882a593Smuzhiyun 	switch (cmd) {
3394*4882a593Smuzhiyun 	case TUNSETIFF:
3395*4882a593Smuzhiyun 	case TUNGETIFF:
3396*4882a593Smuzhiyun 	case TUNSETTXFILTER:
3397*4882a593Smuzhiyun 	case TUNGETSNDBUF:
3398*4882a593Smuzhiyun 	case TUNSETSNDBUF:
3399*4882a593Smuzhiyun 	case SIOCGIFHWADDR:
3400*4882a593Smuzhiyun 	case SIOCSIFHWADDR:
3401*4882a593Smuzhiyun 		arg = (unsigned long)compat_ptr(arg);
3402*4882a593Smuzhiyun 		break;
3403*4882a593Smuzhiyun 	default:
3404*4882a593Smuzhiyun 		arg = (compat_ulong_t)arg;
3405*4882a593Smuzhiyun 		break;
3406*4882a593Smuzhiyun 	}
3407*4882a593Smuzhiyun 
3408*4882a593Smuzhiyun 	/*
3409*4882a593Smuzhiyun 	 * compat_ifreq is shorter than ifreq, so we must not access beyond
3410*4882a593Smuzhiyun 	 * the end of that structure. All fields that are used in this
3411*4882a593Smuzhiyun 	 * driver are compatible though, we don't need to convert the
3412*4882a593Smuzhiyun 	 * contents.
3413*4882a593Smuzhiyun 	 */
3414*4882a593Smuzhiyun 	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3415*4882a593Smuzhiyun }
3416*4882a593Smuzhiyun #endif /* CONFIG_COMPAT */
3417*4882a593Smuzhiyun 
tun_chr_fasync(int fd,struct file * file,int on)3418*4882a593Smuzhiyun static int tun_chr_fasync(int fd, struct file *file, int on)
3419*4882a593Smuzhiyun {
3420*4882a593Smuzhiyun 	struct tun_file *tfile = file->private_data;
3421*4882a593Smuzhiyun 	int ret;
3422*4882a593Smuzhiyun 
3423*4882a593Smuzhiyun 	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3424*4882a593Smuzhiyun 		goto out;
3425*4882a593Smuzhiyun 
3426*4882a593Smuzhiyun 	if (on) {
3427*4882a593Smuzhiyun 		__f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3428*4882a593Smuzhiyun 		tfile->flags |= TUN_FASYNC;
3429*4882a593Smuzhiyun 	} else
3430*4882a593Smuzhiyun 		tfile->flags &= ~TUN_FASYNC;
3431*4882a593Smuzhiyun 	ret = 0;
3432*4882a593Smuzhiyun out:
3433*4882a593Smuzhiyun 	return ret;
3434*4882a593Smuzhiyun }
3435*4882a593Smuzhiyun 
tun_chr_open(struct inode * inode,struct file * file)3436*4882a593Smuzhiyun static int tun_chr_open(struct inode *inode, struct file * file)
3437*4882a593Smuzhiyun {
3438*4882a593Smuzhiyun 	struct net *net = current->nsproxy->net_ns;
3439*4882a593Smuzhiyun 	struct tun_file *tfile;
3440*4882a593Smuzhiyun 
3441*4882a593Smuzhiyun 	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3442*4882a593Smuzhiyun 					    &tun_proto, 0);
3443*4882a593Smuzhiyun 	if (!tfile)
3444*4882a593Smuzhiyun 		return -ENOMEM;
3445*4882a593Smuzhiyun 	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3446*4882a593Smuzhiyun 		sk_free(&tfile->sk);
3447*4882a593Smuzhiyun 		return -ENOMEM;
3448*4882a593Smuzhiyun 	}
3449*4882a593Smuzhiyun 
3450*4882a593Smuzhiyun 	mutex_init(&tfile->napi_mutex);
3451*4882a593Smuzhiyun 	RCU_INIT_POINTER(tfile->tun, NULL);
3452*4882a593Smuzhiyun 	tfile->flags = 0;
3453*4882a593Smuzhiyun 	tfile->ifindex = 0;
3454*4882a593Smuzhiyun 
3455*4882a593Smuzhiyun 	init_waitqueue_head(&tfile->socket.wq.wait);
3456*4882a593Smuzhiyun 
3457*4882a593Smuzhiyun 	tfile->socket.file = file;
3458*4882a593Smuzhiyun 	tfile->socket.ops = &tun_socket_ops;
3459*4882a593Smuzhiyun 
3460*4882a593Smuzhiyun 	sock_init_data(&tfile->socket, &tfile->sk);
3461*4882a593Smuzhiyun 
3462*4882a593Smuzhiyun 	tfile->sk.sk_write_space = tun_sock_write_space;
3463*4882a593Smuzhiyun 	tfile->sk.sk_sndbuf = INT_MAX;
3464*4882a593Smuzhiyun 
3465*4882a593Smuzhiyun 	file->private_data = tfile;
3466*4882a593Smuzhiyun 	INIT_LIST_HEAD(&tfile->next);
3467*4882a593Smuzhiyun 
3468*4882a593Smuzhiyun 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3469*4882a593Smuzhiyun 
3470*4882a593Smuzhiyun 	return 0;
3471*4882a593Smuzhiyun }
3472*4882a593Smuzhiyun 
tun_chr_close(struct inode * inode,struct file * file)3473*4882a593Smuzhiyun static int tun_chr_close(struct inode *inode, struct file *file)
3474*4882a593Smuzhiyun {
3475*4882a593Smuzhiyun 	struct tun_file *tfile = file->private_data;
3476*4882a593Smuzhiyun 
3477*4882a593Smuzhiyun 	tun_detach(tfile, true);
3478*4882a593Smuzhiyun 
3479*4882a593Smuzhiyun 	return 0;
3480*4882a593Smuzhiyun }
3481*4882a593Smuzhiyun 
3482*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
tun_chr_show_fdinfo(struct seq_file * m,struct file * file)3483*4882a593Smuzhiyun static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3484*4882a593Smuzhiyun {
3485*4882a593Smuzhiyun 	struct tun_file *tfile = file->private_data;
3486*4882a593Smuzhiyun 	struct tun_struct *tun;
3487*4882a593Smuzhiyun 	struct ifreq ifr;
3488*4882a593Smuzhiyun 
3489*4882a593Smuzhiyun 	memset(&ifr, 0, sizeof(ifr));
3490*4882a593Smuzhiyun 
3491*4882a593Smuzhiyun 	rtnl_lock();
3492*4882a593Smuzhiyun 	tun = tun_get(tfile);
3493*4882a593Smuzhiyun 	if (tun)
3494*4882a593Smuzhiyun 		tun_get_iff(tun, &ifr);
3495*4882a593Smuzhiyun 	rtnl_unlock();
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun 	if (tun)
3498*4882a593Smuzhiyun 		tun_put(tun);
3499*4882a593Smuzhiyun 
3500*4882a593Smuzhiyun 	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3501*4882a593Smuzhiyun }
3502*4882a593Smuzhiyun #endif
3503*4882a593Smuzhiyun 
3504*4882a593Smuzhiyun static const struct file_operations tun_fops = {
3505*4882a593Smuzhiyun 	.owner	= THIS_MODULE,
3506*4882a593Smuzhiyun 	.llseek = no_llseek,
3507*4882a593Smuzhiyun 	.read_iter  = tun_chr_read_iter,
3508*4882a593Smuzhiyun 	.write_iter = tun_chr_write_iter,
3509*4882a593Smuzhiyun 	.poll	= tun_chr_poll,
3510*4882a593Smuzhiyun 	.unlocked_ioctl	= tun_chr_ioctl,
3511*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
3512*4882a593Smuzhiyun 	.compat_ioctl = tun_chr_compat_ioctl,
3513*4882a593Smuzhiyun #endif
3514*4882a593Smuzhiyun 	.open	= tun_chr_open,
3515*4882a593Smuzhiyun 	.release = tun_chr_close,
3516*4882a593Smuzhiyun 	.fasync = tun_chr_fasync,
3517*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
3518*4882a593Smuzhiyun 	.show_fdinfo = tun_chr_show_fdinfo,
3519*4882a593Smuzhiyun #endif
3520*4882a593Smuzhiyun };
3521*4882a593Smuzhiyun 
3522*4882a593Smuzhiyun static struct miscdevice tun_miscdev = {
3523*4882a593Smuzhiyun 	.minor = TUN_MINOR,
3524*4882a593Smuzhiyun 	.name = "tun",
3525*4882a593Smuzhiyun 	.nodename = "net/tun",
3526*4882a593Smuzhiyun 	.fops = &tun_fops,
3527*4882a593Smuzhiyun };
3528*4882a593Smuzhiyun 
3529*4882a593Smuzhiyun /* ethtool interface */
3530*4882a593Smuzhiyun 
tun_default_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)3531*4882a593Smuzhiyun static void tun_default_link_ksettings(struct net_device *dev,
3532*4882a593Smuzhiyun 				       struct ethtool_link_ksettings *cmd)
3533*4882a593Smuzhiyun {
3534*4882a593Smuzhiyun 	ethtool_link_ksettings_zero_link_mode(cmd, supported);
3535*4882a593Smuzhiyun 	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3536*4882a593Smuzhiyun 	cmd->base.speed		= SPEED_10;
3537*4882a593Smuzhiyun 	cmd->base.duplex	= DUPLEX_FULL;
3538*4882a593Smuzhiyun 	cmd->base.port		= PORT_TP;
3539*4882a593Smuzhiyun 	cmd->base.phy_address	= 0;
3540*4882a593Smuzhiyun 	cmd->base.autoneg	= AUTONEG_DISABLE;
3541*4882a593Smuzhiyun }
3542*4882a593Smuzhiyun 
tun_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)3543*4882a593Smuzhiyun static int tun_get_link_ksettings(struct net_device *dev,
3544*4882a593Smuzhiyun 				  struct ethtool_link_ksettings *cmd)
3545*4882a593Smuzhiyun {
3546*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
3547*4882a593Smuzhiyun 
3548*4882a593Smuzhiyun 	memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3549*4882a593Smuzhiyun 	return 0;
3550*4882a593Smuzhiyun }
3551*4882a593Smuzhiyun 
tun_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)3552*4882a593Smuzhiyun static int tun_set_link_ksettings(struct net_device *dev,
3553*4882a593Smuzhiyun 				  const struct ethtool_link_ksettings *cmd)
3554*4882a593Smuzhiyun {
3555*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
3556*4882a593Smuzhiyun 
3557*4882a593Smuzhiyun 	memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3558*4882a593Smuzhiyun 	return 0;
3559*4882a593Smuzhiyun }
3560*4882a593Smuzhiyun 
tun_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)3561*4882a593Smuzhiyun static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3562*4882a593Smuzhiyun {
3563*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
3564*4882a593Smuzhiyun 
3565*4882a593Smuzhiyun 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
3566*4882a593Smuzhiyun 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
3567*4882a593Smuzhiyun 
3568*4882a593Smuzhiyun 	switch (tun->flags & TUN_TYPE_MASK) {
3569*4882a593Smuzhiyun 	case IFF_TUN:
3570*4882a593Smuzhiyun 		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
3571*4882a593Smuzhiyun 		break;
3572*4882a593Smuzhiyun 	case IFF_TAP:
3573*4882a593Smuzhiyun 		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
3574*4882a593Smuzhiyun 		break;
3575*4882a593Smuzhiyun 	}
3576*4882a593Smuzhiyun }
3577*4882a593Smuzhiyun 
tun_get_msglevel(struct net_device * dev)3578*4882a593Smuzhiyun static u32 tun_get_msglevel(struct net_device *dev)
3579*4882a593Smuzhiyun {
3580*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
3581*4882a593Smuzhiyun 
3582*4882a593Smuzhiyun 	return tun->msg_enable;
3583*4882a593Smuzhiyun }
3584*4882a593Smuzhiyun 
tun_set_msglevel(struct net_device * dev,u32 value)3585*4882a593Smuzhiyun static void tun_set_msglevel(struct net_device *dev, u32 value)
3586*4882a593Smuzhiyun {
3587*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
3588*4882a593Smuzhiyun 
3589*4882a593Smuzhiyun 	tun->msg_enable = value;
3590*4882a593Smuzhiyun }
3591*4882a593Smuzhiyun 
tun_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)3592*4882a593Smuzhiyun static int tun_get_coalesce(struct net_device *dev,
3593*4882a593Smuzhiyun 			    struct ethtool_coalesce *ec)
3594*4882a593Smuzhiyun {
3595*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
3596*4882a593Smuzhiyun 
3597*4882a593Smuzhiyun 	ec->rx_max_coalesced_frames = tun->rx_batched;
3598*4882a593Smuzhiyun 
3599*4882a593Smuzhiyun 	return 0;
3600*4882a593Smuzhiyun }
3601*4882a593Smuzhiyun 
tun_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec)3602*4882a593Smuzhiyun static int tun_set_coalesce(struct net_device *dev,
3603*4882a593Smuzhiyun 			    struct ethtool_coalesce *ec)
3604*4882a593Smuzhiyun {
3605*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
3606*4882a593Smuzhiyun 
3607*4882a593Smuzhiyun 	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3608*4882a593Smuzhiyun 		tun->rx_batched = NAPI_POLL_WEIGHT;
3609*4882a593Smuzhiyun 	else
3610*4882a593Smuzhiyun 		tun->rx_batched = ec->rx_max_coalesced_frames;
3611*4882a593Smuzhiyun 
3612*4882a593Smuzhiyun 	return 0;
3613*4882a593Smuzhiyun }
3614*4882a593Smuzhiyun 
3615*4882a593Smuzhiyun static const struct ethtool_ops tun_ethtool_ops = {
3616*4882a593Smuzhiyun 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
3617*4882a593Smuzhiyun 	.get_drvinfo	= tun_get_drvinfo,
3618*4882a593Smuzhiyun 	.get_msglevel	= tun_get_msglevel,
3619*4882a593Smuzhiyun 	.set_msglevel	= tun_set_msglevel,
3620*4882a593Smuzhiyun 	.get_link	= ethtool_op_get_link,
3621*4882a593Smuzhiyun 	.get_ts_info	= ethtool_op_get_ts_info,
3622*4882a593Smuzhiyun 	.get_coalesce   = tun_get_coalesce,
3623*4882a593Smuzhiyun 	.set_coalesce   = tun_set_coalesce,
3624*4882a593Smuzhiyun 	.get_link_ksettings = tun_get_link_ksettings,
3625*4882a593Smuzhiyun 	.set_link_ksettings = tun_set_link_ksettings,
3626*4882a593Smuzhiyun };
3627*4882a593Smuzhiyun 
tun_queue_resize(struct tun_struct * tun)3628*4882a593Smuzhiyun static int tun_queue_resize(struct tun_struct *tun)
3629*4882a593Smuzhiyun {
3630*4882a593Smuzhiyun 	struct net_device *dev = tun->dev;
3631*4882a593Smuzhiyun 	struct tun_file *tfile;
3632*4882a593Smuzhiyun 	struct ptr_ring **rings;
3633*4882a593Smuzhiyun 	int n = tun->numqueues + tun->numdisabled;
3634*4882a593Smuzhiyun 	int ret, i;
3635*4882a593Smuzhiyun 
3636*4882a593Smuzhiyun 	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3637*4882a593Smuzhiyun 	if (!rings)
3638*4882a593Smuzhiyun 		return -ENOMEM;
3639*4882a593Smuzhiyun 
3640*4882a593Smuzhiyun 	for (i = 0; i < tun->numqueues; i++) {
3641*4882a593Smuzhiyun 		tfile = rtnl_dereference(tun->tfiles[i]);
3642*4882a593Smuzhiyun 		rings[i] = &tfile->tx_ring;
3643*4882a593Smuzhiyun 	}
3644*4882a593Smuzhiyun 	list_for_each_entry(tfile, &tun->disabled, next)
3645*4882a593Smuzhiyun 		rings[i++] = &tfile->tx_ring;
3646*4882a593Smuzhiyun 
3647*4882a593Smuzhiyun 	ret = ptr_ring_resize_multiple(rings, n,
3648*4882a593Smuzhiyun 				       dev->tx_queue_len, GFP_KERNEL,
3649*4882a593Smuzhiyun 				       tun_ptr_free);
3650*4882a593Smuzhiyun 
3651*4882a593Smuzhiyun 	kfree(rings);
3652*4882a593Smuzhiyun 	return ret;
3653*4882a593Smuzhiyun }
3654*4882a593Smuzhiyun 
tun_device_event(struct notifier_block * unused,unsigned long event,void * ptr)3655*4882a593Smuzhiyun static int tun_device_event(struct notifier_block *unused,
3656*4882a593Smuzhiyun 			    unsigned long event, void *ptr)
3657*4882a593Smuzhiyun {
3658*4882a593Smuzhiyun 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3659*4882a593Smuzhiyun 	struct tun_struct *tun = netdev_priv(dev);
3660*4882a593Smuzhiyun 	int i;
3661*4882a593Smuzhiyun 
3662*4882a593Smuzhiyun 	if (dev->rtnl_link_ops != &tun_link_ops)
3663*4882a593Smuzhiyun 		return NOTIFY_DONE;
3664*4882a593Smuzhiyun 
3665*4882a593Smuzhiyun 	switch (event) {
3666*4882a593Smuzhiyun 	case NETDEV_CHANGE_TX_QUEUE_LEN:
3667*4882a593Smuzhiyun 		if (tun_queue_resize(tun))
3668*4882a593Smuzhiyun 			return NOTIFY_BAD;
3669*4882a593Smuzhiyun 		break;
3670*4882a593Smuzhiyun 	case NETDEV_UP:
3671*4882a593Smuzhiyun 		for (i = 0; i < tun->numqueues; i++) {
3672*4882a593Smuzhiyun 			struct tun_file *tfile;
3673*4882a593Smuzhiyun 
3674*4882a593Smuzhiyun 			tfile = rtnl_dereference(tun->tfiles[i]);
3675*4882a593Smuzhiyun 			tfile->socket.sk->sk_write_space(tfile->socket.sk);
3676*4882a593Smuzhiyun 		}
3677*4882a593Smuzhiyun 		break;
3678*4882a593Smuzhiyun 	default:
3679*4882a593Smuzhiyun 		break;
3680*4882a593Smuzhiyun 	}
3681*4882a593Smuzhiyun 
3682*4882a593Smuzhiyun 	return NOTIFY_DONE;
3683*4882a593Smuzhiyun }
3684*4882a593Smuzhiyun 
3685*4882a593Smuzhiyun static struct notifier_block tun_notifier_block __read_mostly = {
3686*4882a593Smuzhiyun 	.notifier_call	= tun_device_event,
3687*4882a593Smuzhiyun };
3688*4882a593Smuzhiyun 
tun_init(void)3689*4882a593Smuzhiyun static int __init tun_init(void)
3690*4882a593Smuzhiyun {
3691*4882a593Smuzhiyun 	int ret = 0;
3692*4882a593Smuzhiyun 
3693*4882a593Smuzhiyun 	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3694*4882a593Smuzhiyun 
3695*4882a593Smuzhiyun 	ret = rtnl_link_register(&tun_link_ops);
3696*4882a593Smuzhiyun 	if (ret) {
3697*4882a593Smuzhiyun 		pr_err("Can't register link_ops\n");
3698*4882a593Smuzhiyun 		goto err_linkops;
3699*4882a593Smuzhiyun 	}
3700*4882a593Smuzhiyun 
3701*4882a593Smuzhiyun 	ret = misc_register(&tun_miscdev);
3702*4882a593Smuzhiyun 	if (ret) {
3703*4882a593Smuzhiyun 		pr_err("Can't register misc device %d\n", TUN_MINOR);
3704*4882a593Smuzhiyun 		goto err_misc;
3705*4882a593Smuzhiyun 	}
3706*4882a593Smuzhiyun 
3707*4882a593Smuzhiyun 	ret = register_netdevice_notifier(&tun_notifier_block);
3708*4882a593Smuzhiyun 	if (ret) {
3709*4882a593Smuzhiyun 		pr_err("Can't register netdevice notifier\n");
3710*4882a593Smuzhiyun 		goto err_notifier;
3711*4882a593Smuzhiyun 	}
3712*4882a593Smuzhiyun 
3713*4882a593Smuzhiyun 	return  0;
3714*4882a593Smuzhiyun 
3715*4882a593Smuzhiyun err_notifier:
3716*4882a593Smuzhiyun 	misc_deregister(&tun_miscdev);
3717*4882a593Smuzhiyun err_misc:
3718*4882a593Smuzhiyun 	rtnl_link_unregister(&tun_link_ops);
3719*4882a593Smuzhiyun err_linkops:
3720*4882a593Smuzhiyun 	return ret;
3721*4882a593Smuzhiyun }
3722*4882a593Smuzhiyun 
tun_cleanup(void)3723*4882a593Smuzhiyun static void tun_cleanup(void)
3724*4882a593Smuzhiyun {
3725*4882a593Smuzhiyun 	misc_deregister(&tun_miscdev);
3726*4882a593Smuzhiyun 	rtnl_link_unregister(&tun_link_ops);
3727*4882a593Smuzhiyun 	unregister_netdevice_notifier(&tun_notifier_block);
3728*4882a593Smuzhiyun }
3729*4882a593Smuzhiyun 
3730*4882a593Smuzhiyun /* Get an underlying socket object from tun file.  Returns error unless file is
3731*4882a593Smuzhiyun  * attached to a device.  The returned object works like a packet socket, it
3732*4882a593Smuzhiyun  * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
3733*4882a593Smuzhiyun  * holding a reference to the file for as long as the socket is in use. */
tun_get_socket(struct file * file)3734*4882a593Smuzhiyun struct socket *tun_get_socket(struct file *file)
3735*4882a593Smuzhiyun {
3736*4882a593Smuzhiyun 	struct tun_file *tfile;
3737*4882a593Smuzhiyun 	if (file->f_op != &tun_fops)
3738*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
3739*4882a593Smuzhiyun 	tfile = file->private_data;
3740*4882a593Smuzhiyun 	if (!tfile)
3741*4882a593Smuzhiyun 		return ERR_PTR(-EBADFD);
3742*4882a593Smuzhiyun 	return &tfile->socket;
3743*4882a593Smuzhiyun }
3744*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tun_get_socket);
3745*4882a593Smuzhiyun 
tun_get_tx_ring(struct file * file)3746*4882a593Smuzhiyun struct ptr_ring *tun_get_tx_ring(struct file *file)
3747*4882a593Smuzhiyun {
3748*4882a593Smuzhiyun 	struct tun_file *tfile;
3749*4882a593Smuzhiyun 
3750*4882a593Smuzhiyun 	if (file->f_op != &tun_fops)
3751*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
3752*4882a593Smuzhiyun 	tfile = file->private_data;
3753*4882a593Smuzhiyun 	if (!tfile)
3754*4882a593Smuzhiyun 		return ERR_PTR(-EBADFD);
3755*4882a593Smuzhiyun 	return &tfile->tx_ring;
3756*4882a593Smuzhiyun }
3757*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3758*4882a593Smuzhiyun 
3759*4882a593Smuzhiyun module_init(tun_init);
3760*4882a593Smuzhiyun module_exit(tun_cleanup);
3761*4882a593Smuzhiyun MODULE_DESCRIPTION(DRV_DESCRIPTION);
3762*4882a593Smuzhiyun MODULE_AUTHOR(DRV_COPYRIGHT);
3763*4882a593Smuzhiyun MODULE_LICENSE("GPL");
3764*4882a593Smuzhiyun MODULE_ALIAS_MISCDEV(TUN_MINOR);
3765*4882a593Smuzhiyun MODULE_ALIAS("devname:net/tun");
3766