xref: /OK3568_Linux_fs/kernel/net/core/net-sysfs.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * net-sysfs.c - network device class and attributes
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/capability.h>
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/netdevice.h>
11*4882a593Smuzhiyun #include <linux/if_arp.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/sched/signal.h>
14*4882a593Smuzhiyun #include <linux/sched/isolation.h>
15*4882a593Smuzhiyun #include <linux/nsproxy.h>
16*4882a593Smuzhiyun #include <net/sock.h>
17*4882a593Smuzhiyun #include <net/net_namespace.h>
18*4882a593Smuzhiyun #include <linux/rtnetlink.h>
19*4882a593Smuzhiyun #include <linux/vmalloc.h>
20*4882a593Smuzhiyun #include <linux/export.h>
21*4882a593Smuzhiyun #include <linux/jiffies.h>
22*4882a593Smuzhiyun #include <linux/pm_runtime.h>
23*4882a593Smuzhiyun #include <linux/of.h>
24*4882a593Smuzhiyun #include <linux/of_net.h>
25*4882a593Smuzhiyun #include <linux/cpu.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include "net-sysfs.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
30*4882a593Smuzhiyun static const char fmt_hex[] = "%#x\n";
31*4882a593Smuzhiyun static const char fmt_dec[] = "%d\n";
32*4882a593Smuzhiyun static const char fmt_ulong[] = "%lu\n";
33*4882a593Smuzhiyun static const char fmt_u64[] = "%llu\n";
34*4882a593Smuzhiyun 
dev_isalive(const struct net_device * dev)35*4882a593Smuzhiyun static inline int dev_isalive(const struct net_device *dev)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	return dev->reg_state <= NETREG_REGISTERED;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /* use same locking rules as GIF* ioctl's */
netdev_show(const struct device * dev,struct device_attribute * attr,char * buf,ssize_t (* format)(const struct net_device *,char *))41*4882a593Smuzhiyun static ssize_t netdev_show(const struct device *dev,
42*4882a593Smuzhiyun 			   struct device_attribute *attr, char *buf,
43*4882a593Smuzhiyun 			   ssize_t (*format)(const struct net_device *, char *))
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	struct net_device *ndev = to_net_dev(dev);
46*4882a593Smuzhiyun 	ssize_t ret = -EINVAL;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	read_lock(&dev_base_lock);
49*4882a593Smuzhiyun 	if (dev_isalive(ndev))
50*4882a593Smuzhiyun 		ret = (*format)(ndev, buf);
51*4882a593Smuzhiyun 	read_unlock(&dev_base_lock);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	return ret;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* generate a show function for simple field */
57*4882a593Smuzhiyun #define NETDEVICE_SHOW(field, format_string)				\
58*4882a593Smuzhiyun static ssize_t format_##field(const struct net_device *dev, char *buf)	\
59*4882a593Smuzhiyun {									\
60*4882a593Smuzhiyun 	return sprintf(buf, format_string, dev->field);			\
61*4882a593Smuzhiyun }									\
62*4882a593Smuzhiyun static ssize_t field##_show(struct device *dev,				\
63*4882a593Smuzhiyun 			    struct device_attribute *attr, char *buf)	\
64*4882a593Smuzhiyun {									\
65*4882a593Smuzhiyun 	return netdev_show(dev, attr, buf, format_##field);		\
66*4882a593Smuzhiyun }									\
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define NETDEVICE_SHOW_RO(field, format_string)				\
69*4882a593Smuzhiyun NETDEVICE_SHOW(field, format_string);					\
70*4882a593Smuzhiyun static DEVICE_ATTR_RO(field)
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #define NETDEVICE_SHOW_RW(field, format_string)				\
73*4882a593Smuzhiyun NETDEVICE_SHOW(field, format_string);					\
74*4882a593Smuzhiyun static DEVICE_ATTR_RW(field)
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* use same locking and permission rules as SIF* ioctl's */
netdev_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len,int (* set)(struct net_device *,unsigned long))77*4882a593Smuzhiyun static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
78*4882a593Smuzhiyun 			    const char *buf, size_t len,
79*4882a593Smuzhiyun 			    int (*set)(struct net_device *, unsigned long))
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
82*4882a593Smuzhiyun 	struct net *net = dev_net(netdev);
83*4882a593Smuzhiyun 	unsigned long new;
84*4882a593Smuzhiyun 	int ret;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
87*4882a593Smuzhiyun 		return -EPERM;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	ret = kstrtoul(buf, 0, &new);
90*4882a593Smuzhiyun 	if (ret)
91*4882a593Smuzhiyun 		goto err;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (!rtnl_trylock())
94*4882a593Smuzhiyun 		return restart_syscall();
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if (dev_isalive(netdev)) {
97*4882a593Smuzhiyun 		ret = (*set)(netdev, new);
98*4882a593Smuzhiyun 		if (ret == 0)
99*4882a593Smuzhiyun 			ret = len;
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 	rtnl_unlock();
102*4882a593Smuzhiyun  err:
103*4882a593Smuzhiyun 	return ret;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun NETDEVICE_SHOW_RO(dev_id, fmt_hex);
107*4882a593Smuzhiyun NETDEVICE_SHOW_RO(dev_port, fmt_dec);
108*4882a593Smuzhiyun NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
109*4882a593Smuzhiyun NETDEVICE_SHOW_RO(addr_len, fmt_dec);
110*4882a593Smuzhiyun NETDEVICE_SHOW_RO(ifindex, fmt_dec);
111*4882a593Smuzhiyun NETDEVICE_SHOW_RO(type, fmt_dec);
112*4882a593Smuzhiyun NETDEVICE_SHOW_RO(link_mode, fmt_dec);
113*4882a593Smuzhiyun 
iflink_show(struct device * dev,struct device_attribute * attr,char * buf)114*4882a593Smuzhiyun static ssize_t iflink_show(struct device *dev, struct device_attribute *attr,
115*4882a593Smuzhiyun 			   char *buf)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct net_device *ndev = to_net_dev(dev);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	return sprintf(buf, fmt_dec, dev_get_iflink(ndev));
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun static DEVICE_ATTR_RO(iflink);
122*4882a593Smuzhiyun 
format_name_assign_type(const struct net_device * dev,char * buf)123*4882a593Smuzhiyun static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	return sprintf(buf, fmt_dec, dev->name_assign_type);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
name_assign_type_show(struct device * dev,struct device_attribute * attr,char * buf)128*4882a593Smuzhiyun static ssize_t name_assign_type_show(struct device *dev,
129*4882a593Smuzhiyun 				     struct device_attribute *attr,
130*4882a593Smuzhiyun 				     char *buf)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	struct net_device *ndev = to_net_dev(dev);
133*4882a593Smuzhiyun 	ssize_t ret = -EINVAL;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	if (ndev->name_assign_type != NET_NAME_UNKNOWN)
136*4882a593Smuzhiyun 		ret = netdev_show(dev, attr, buf, format_name_assign_type);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	return ret;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun static DEVICE_ATTR_RO(name_assign_type);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /* use same locking rules as GIFHWADDR ioctl's */
address_show(struct device * dev,struct device_attribute * attr,char * buf)143*4882a593Smuzhiyun static ssize_t address_show(struct device *dev, struct device_attribute *attr,
144*4882a593Smuzhiyun 			    char *buf)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct net_device *ndev = to_net_dev(dev);
147*4882a593Smuzhiyun 	ssize_t ret = -EINVAL;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	read_lock(&dev_base_lock);
150*4882a593Smuzhiyun 	if (dev_isalive(ndev))
151*4882a593Smuzhiyun 		ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
152*4882a593Smuzhiyun 	read_unlock(&dev_base_lock);
153*4882a593Smuzhiyun 	return ret;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun static DEVICE_ATTR_RO(address);
156*4882a593Smuzhiyun 
broadcast_show(struct device * dev,struct device_attribute * attr,char * buf)157*4882a593Smuzhiyun static ssize_t broadcast_show(struct device *dev,
158*4882a593Smuzhiyun 			      struct device_attribute *attr, char *buf)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	struct net_device *ndev = to_net_dev(dev);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (dev_isalive(ndev))
163*4882a593Smuzhiyun 		return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
164*4882a593Smuzhiyun 	return -EINVAL;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun static DEVICE_ATTR_RO(broadcast);
167*4882a593Smuzhiyun 
change_carrier(struct net_device * dev,unsigned long new_carrier)168*4882a593Smuzhiyun static int change_carrier(struct net_device *dev, unsigned long new_carrier)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	if (!netif_running(dev))
171*4882a593Smuzhiyun 		return -EINVAL;
172*4882a593Smuzhiyun 	return dev_change_carrier(dev, (bool)new_carrier);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
carrier_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)175*4882a593Smuzhiyun static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
176*4882a593Smuzhiyun 			     const char *buf, size_t len)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/* The check is also done in change_carrier; this helps returning early
181*4882a593Smuzhiyun 	 * without hitting the trylock/restart in netdev_store.
182*4882a593Smuzhiyun 	 */
183*4882a593Smuzhiyun 	if (!netdev->netdev_ops->ndo_change_carrier)
184*4882a593Smuzhiyun 		return -EOPNOTSUPP;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	return netdev_store(dev, attr, buf, len, change_carrier);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
carrier_show(struct device * dev,struct device_attribute * attr,char * buf)189*4882a593Smuzhiyun static ssize_t carrier_show(struct device *dev,
190*4882a593Smuzhiyun 			    struct device_attribute *attr, char *buf)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (netif_running(netdev))
195*4882a593Smuzhiyun 		return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	return -EINVAL;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun static DEVICE_ATTR_RW(carrier);
200*4882a593Smuzhiyun 
speed_show(struct device * dev,struct device_attribute * attr,char * buf)201*4882a593Smuzhiyun static ssize_t speed_show(struct device *dev,
202*4882a593Smuzhiyun 			  struct device_attribute *attr, char *buf)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
205*4882a593Smuzhiyun 	int ret = -EINVAL;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* The check is also done in __ethtool_get_link_ksettings; this helps
208*4882a593Smuzhiyun 	 * returning early without hitting the trylock/restart below.
209*4882a593Smuzhiyun 	 */
210*4882a593Smuzhiyun 	if (!netdev->ethtool_ops->get_link_ksettings)
211*4882a593Smuzhiyun 		return ret;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	if (!rtnl_trylock())
214*4882a593Smuzhiyun 		return restart_syscall();
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (netif_running(netdev) && netif_device_present(netdev)) {
217*4882a593Smuzhiyun 		struct ethtool_link_ksettings cmd;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 		if (!__ethtool_get_link_ksettings(netdev, &cmd))
220*4882a593Smuzhiyun 			ret = sprintf(buf, fmt_dec, cmd.base.speed);
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 	rtnl_unlock();
223*4882a593Smuzhiyun 	return ret;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun static DEVICE_ATTR_RO(speed);
226*4882a593Smuzhiyun 
duplex_show(struct device * dev,struct device_attribute * attr,char * buf)227*4882a593Smuzhiyun static ssize_t duplex_show(struct device *dev,
228*4882a593Smuzhiyun 			   struct device_attribute *attr, char *buf)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
231*4882a593Smuzhiyun 	int ret = -EINVAL;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* The check is also done in __ethtool_get_link_ksettings; this helps
234*4882a593Smuzhiyun 	 * returning early without hitting the trylock/restart below.
235*4882a593Smuzhiyun 	 */
236*4882a593Smuzhiyun 	if (!netdev->ethtool_ops->get_link_ksettings)
237*4882a593Smuzhiyun 		return ret;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	if (!rtnl_trylock())
240*4882a593Smuzhiyun 		return restart_syscall();
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (netif_running(netdev)) {
243*4882a593Smuzhiyun 		struct ethtool_link_ksettings cmd;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		if (!__ethtool_get_link_ksettings(netdev, &cmd)) {
246*4882a593Smuzhiyun 			const char *duplex;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 			switch (cmd.base.duplex) {
249*4882a593Smuzhiyun 			case DUPLEX_HALF:
250*4882a593Smuzhiyun 				duplex = "half";
251*4882a593Smuzhiyun 				break;
252*4882a593Smuzhiyun 			case DUPLEX_FULL:
253*4882a593Smuzhiyun 				duplex = "full";
254*4882a593Smuzhiyun 				break;
255*4882a593Smuzhiyun 			default:
256*4882a593Smuzhiyun 				duplex = "unknown";
257*4882a593Smuzhiyun 				break;
258*4882a593Smuzhiyun 			}
259*4882a593Smuzhiyun 			ret = sprintf(buf, "%s\n", duplex);
260*4882a593Smuzhiyun 		}
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 	rtnl_unlock();
263*4882a593Smuzhiyun 	return ret;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun static DEVICE_ATTR_RO(duplex);
266*4882a593Smuzhiyun 
testing_show(struct device * dev,struct device_attribute * attr,char * buf)267*4882a593Smuzhiyun static ssize_t testing_show(struct device *dev,
268*4882a593Smuzhiyun 			    struct device_attribute *attr, char *buf)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (netif_running(netdev))
273*4882a593Smuzhiyun 		return sprintf(buf, fmt_dec, !!netif_testing(netdev));
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	return -EINVAL;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun static DEVICE_ATTR_RO(testing);
278*4882a593Smuzhiyun 
dormant_show(struct device * dev,struct device_attribute * attr,char * buf)279*4882a593Smuzhiyun static ssize_t dormant_show(struct device *dev,
280*4882a593Smuzhiyun 			    struct device_attribute *attr, char *buf)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (netif_running(netdev))
285*4882a593Smuzhiyun 		return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	return -EINVAL;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun static DEVICE_ATTR_RO(dormant);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun static const char *const operstates[] = {
292*4882a593Smuzhiyun 	"unknown",
293*4882a593Smuzhiyun 	"notpresent", /* currently unused */
294*4882a593Smuzhiyun 	"down",
295*4882a593Smuzhiyun 	"lowerlayerdown",
296*4882a593Smuzhiyun 	"testing",
297*4882a593Smuzhiyun 	"dormant",
298*4882a593Smuzhiyun 	"up"
299*4882a593Smuzhiyun };
300*4882a593Smuzhiyun 
operstate_show(struct device * dev,struct device_attribute * attr,char * buf)301*4882a593Smuzhiyun static ssize_t operstate_show(struct device *dev,
302*4882a593Smuzhiyun 			      struct device_attribute *attr, char *buf)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	const struct net_device *netdev = to_net_dev(dev);
305*4882a593Smuzhiyun 	unsigned char operstate;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	read_lock(&dev_base_lock);
308*4882a593Smuzhiyun 	operstate = netdev->operstate;
309*4882a593Smuzhiyun 	if (!netif_running(netdev))
310*4882a593Smuzhiyun 		operstate = IF_OPER_DOWN;
311*4882a593Smuzhiyun 	read_unlock(&dev_base_lock);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	if (operstate >= ARRAY_SIZE(operstates))
314*4882a593Smuzhiyun 		return -EINVAL; /* should not happen */
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", operstates[operstate]);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun static DEVICE_ATTR_RO(operstate);
319*4882a593Smuzhiyun 
carrier_changes_show(struct device * dev,struct device_attribute * attr,char * buf)320*4882a593Smuzhiyun static ssize_t carrier_changes_show(struct device *dev,
321*4882a593Smuzhiyun 				    struct device_attribute *attr,
322*4882a593Smuzhiyun 				    char *buf)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	return sprintf(buf, fmt_dec,
327*4882a593Smuzhiyun 		       atomic_read(&netdev->carrier_up_count) +
328*4882a593Smuzhiyun 		       atomic_read(&netdev->carrier_down_count));
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun static DEVICE_ATTR_RO(carrier_changes);
331*4882a593Smuzhiyun 
carrier_up_count_show(struct device * dev,struct device_attribute * attr,char * buf)332*4882a593Smuzhiyun static ssize_t carrier_up_count_show(struct device *dev,
333*4882a593Smuzhiyun 				     struct device_attribute *attr,
334*4882a593Smuzhiyun 				     char *buf)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_up_count));
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun static DEVICE_ATTR_RO(carrier_up_count);
341*4882a593Smuzhiyun 
carrier_down_count_show(struct device * dev,struct device_attribute * attr,char * buf)342*4882a593Smuzhiyun static ssize_t carrier_down_count_show(struct device *dev,
343*4882a593Smuzhiyun 				       struct device_attribute *attr,
344*4882a593Smuzhiyun 				       char *buf)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_down_count));
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun static DEVICE_ATTR_RO(carrier_down_count);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun /* read-write attributes */
353*4882a593Smuzhiyun 
change_mtu(struct net_device * dev,unsigned long new_mtu)354*4882a593Smuzhiyun static int change_mtu(struct net_device *dev, unsigned long new_mtu)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	return dev_set_mtu(dev, (int)new_mtu);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
mtu_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)359*4882a593Smuzhiyun static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
360*4882a593Smuzhiyun 			 const char *buf, size_t len)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	return netdev_store(dev, attr, buf, len, change_mtu);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun NETDEVICE_SHOW_RW(mtu, fmt_dec);
365*4882a593Smuzhiyun 
change_flags(struct net_device * dev,unsigned long new_flags)366*4882a593Smuzhiyun static int change_flags(struct net_device *dev, unsigned long new_flags)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	return dev_change_flags(dev, (unsigned int)new_flags, NULL);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
flags_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)371*4882a593Smuzhiyun static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
372*4882a593Smuzhiyun 			   const char *buf, size_t len)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	return netdev_store(dev, attr, buf, len, change_flags);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun NETDEVICE_SHOW_RW(flags, fmt_hex);
377*4882a593Smuzhiyun 
tx_queue_len_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)378*4882a593Smuzhiyun static ssize_t tx_queue_len_store(struct device *dev,
379*4882a593Smuzhiyun 				  struct device_attribute *attr,
380*4882a593Smuzhiyun 				  const char *buf, size_t len)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	if (!capable(CAP_NET_ADMIN))
383*4882a593Smuzhiyun 		return -EPERM;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	return netdev_store(dev, attr, buf, len, dev_change_tx_queue_len);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
388*4882a593Smuzhiyun 
change_gro_flush_timeout(struct net_device * dev,unsigned long val)389*4882a593Smuzhiyun static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	WRITE_ONCE(dev->gro_flush_timeout, val);
392*4882a593Smuzhiyun 	return 0;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
gro_flush_timeout_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)395*4882a593Smuzhiyun static ssize_t gro_flush_timeout_store(struct device *dev,
396*4882a593Smuzhiyun 				       struct device_attribute *attr,
397*4882a593Smuzhiyun 				       const char *buf, size_t len)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	if (!capable(CAP_NET_ADMIN))
400*4882a593Smuzhiyun 		return -EPERM;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
405*4882a593Smuzhiyun 
change_napi_defer_hard_irqs(struct net_device * dev,unsigned long val)406*4882a593Smuzhiyun static int change_napi_defer_hard_irqs(struct net_device *dev, unsigned long val)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	WRITE_ONCE(dev->napi_defer_hard_irqs, val);
409*4882a593Smuzhiyun 	return 0;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
napi_defer_hard_irqs_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)412*4882a593Smuzhiyun static ssize_t napi_defer_hard_irqs_store(struct device *dev,
413*4882a593Smuzhiyun 					  struct device_attribute *attr,
414*4882a593Smuzhiyun 					  const char *buf, size_t len)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun 	if (!capable(CAP_NET_ADMIN))
417*4882a593Smuzhiyun 		return -EPERM;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	return netdev_store(dev, attr, buf, len, change_napi_defer_hard_irqs);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun NETDEVICE_SHOW_RW(napi_defer_hard_irqs, fmt_dec);
422*4882a593Smuzhiyun 
ifalias_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)423*4882a593Smuzhiyun static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
424*4882a593Smuzhiyun 			     const char *buf, size_t len)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
427*4882a593Smuzhiyun 	struct net *net = dev_net(netdev);
428*4882a593Smuzhiyun 	size_t count = len;
429*4882a593Smuzhiyun 	ssize_t ret = 0;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
432*4882a593Smuzhiyun 		return -EPERM;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	/* ignore trailing newline */
435*4882a593Smuzhiyun 	if (len >  0 && buf[len - 1] == '\n')
436*4882a593Smuzhiyun 		--count;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (!rtnl_trylock())
439*4882a593Smuzhiyun 		return restart_syscall();
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	if (dev_isalive(netdev)) {
442*4882a593Smuzhiyun 		ret = dev_set_alias(netdev, buf, count);
443*4882a593Smuzhiyun 		if (ret < 0)
444*4882a593Smuzhiyun 			goto err;
445*4882a593Smuzhiyun 		ret = len;
446*4882a593Smuzhiyun 		netdev_state_change(netdev);
447*4882a593Smuzhiyun 	}
448*4882a593Smuzhiyun err:
449*4882a593Smuzhiyun 	rtnl_unlock();
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	return ret;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
ifalias_show(struct device * dev,struct device_attribute * attr,char * buf)454*4882a593Smuzhiyun static ssize_t ifalias_show(struct device *dev,
455*4882a593Smuzhiyun 			    struct device_attribute *attr, char *buf)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	const struct net_device *netdev = to_net_dev(dev);
458*4882a593Smuzhiyun 	char tmp[IFALIASZ];
459*4882a593Smuzhiyun 	ssize_t ret = 0;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	ret = dev_get_alias(netdev, tmp, sizeof(tmp));
462*4882a593Smuzhiyun 	if (ret > 0)
463*4882a593Smuzhiyun 		ret = sprintf(buf, "%s\n", tmp);
464*4882a593Smuzhiyun 	return ret;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun static DEVICE_ATTR_RW(ifalias);
467*4882a593Smuzhiyun 
change_group(struct net_device * dev,unsigned long new_group)468*4882a593Smuzhiyun static int change_group(struct net_device *dev, unsigned long new_group)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	dev_set_group(dev, (int)new_group);
471*4882a593Smuzhiyun 	return 0;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun 
group_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)474*4882a593Smuzhiyun static ssize_t group_store(struct device *dev, struct device_attribute *attr,
475*4882a593Smuzhiyun 			   const char *buf, size_t len)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	return netdev_store(dev, attr, buf, len, change_group);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun NETDEVICE_SHOW(group, fmt_dec);
480*4882a593Smuzhiyun static DEVICE_ATTR(netdev_group, 0644, group_show, group_store);
481*4882a593Smuzhiyun 
change_proto_down(struct net_device * dev,unsigned long proto_down)482*4882a593Smuzhiyun static int change_proto_down(struct net_device *dev, unsigned long proto_down)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	return dev_change_proto_down(dev, (bool)proto_down);
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
proto_down_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)487*4882a593Smuzhiyun static ssize_t proto_down_store(struct device *dev,
488*4882a593Smuzhiyun 				struct device_attribute *attr,
489*4882a593Smuzhiyun 				const char *buf, size_t len)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	/* The check is also done in change_proto_down; this helps returning
494*4882a593Smuzhiyun 	 * early without hitting the trylock/restart in netdev_store.
495*4882a593Smuzhiyun 	 */
496*4882a593Smuzhiyun 	if (!netdev->netdev_ops->ndo_change_proto_down)
497*4882a593Smuzhiyun 		return -EOPNOTSUPP;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	return netdev_store(dev, attr, buf, len, change_proto_down);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun NETDEVICE_SHOW_RW(proto_down, fmt_dec);
502*4882a593Smuzhiyun 
phys_port_id_show(struct device * dev,struct device_attribute * attr,char * buf)503*4882a593Smuzhiyun static ssize_t phys_port_id_show(struct device *dev,
504*4882a593Smuzhiyun 				 struct device_attribute *attr, char *buf)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
507*4882a593Smuzhiyun 	ssize_t ret = -EINVAL;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	/* The check is also done in dev_get_phys_port_id; this helps returning
510*4882a593Smuzhiyun 	 * early without hitting the trylock/restart below.
511*4882a593Smuzhiyun 	 */
512*4882a593Smuzhiyun 	if (!netdev->netdev_ops->ndo_get_phys_port_id)
513*4882a593Smuzhiyun 		return -EOPNOTSUPP;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	if (!rtnl_trylock())
516*4882a593Smuzhiyun 		return restart_syscall();
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	if (dev_isalive(netdev)) {
519*4882a593Smuzhiyun 		struct netdev_phys_item_id ppid;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 		ret = dev_get_phys_port_id(netdev, &ppid);
522*4882a593Smuzhiyun 		if (!ret)
523*4882a593Smuzhiyun 			ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 	rtnl_unlock();
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	return ret;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun static DEVICE_ATTR_RO(phys_port_id);
530*4882a593Smuzhiyun 
phys_port_name_show(struct device * dev,struct device_attribute * attr,char * buf)531*4882a593Smuzhiyun static ssize_t phys_port_name_show(struct device *dev,
532*4882a593Smuzhiyun 				   struct device_attribute *attr, char *buf)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
535*4882a593Smuzhiyun 	ssize_t ret = -EINVAL;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* The checks are also done in dev_get_phys_port_name; this helps
538*4882a593Smuzhiyun 	 * returning early without hitting the trylock/restart below.
539*4882a593Smuzhiyun 	 */
540*4882a593Smuzhiyun 	if (!netdev->netdev_ops->ndo_get_phys_port_name &&
541*4882a593Smuzhiyun 	    !netdev->netdev_ops->ndo_get_devlink_port)
542*4882a593Smuzhiyun 		return -EOPNOTSUPP;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	if (!rtnl_trylock())
545*4882a593Smuzhiyun 		return restart_syscall();
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	if (dev_isalive(netdev)) {
548*4882a593Smuzhiyun 		char name[IFNAMSIZ];
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 		ret = dev_get_phys_port_name(netdev, name, sizeof(name));
551*4882a593Smuzhiyun 		if (!ret)
552*4882a593Smuzhiyun 			ret = sprintf(buf, "%s\n", name);
553*4882a593Smuzhiyun 	}
554*4882a593Smuzhiyun 	rtnl_unlock();
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	return ret;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun static DEVICE_ATTR_RO(phys_port_name);
559*4882a593Smuzhiyun 
phys_switch_id_show(struct device * dev,struct device_attribute * attr,char * buf)560*4882a593Smuzhiyun static ssize_t phys_switch_id_show(struct device *dev,
561*4882a593Smuzhiyun 				   struct device_attribute *attr, char *buf)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	struct net_device *netdev = to_net_dev(dev);
564*4882a593Smuzhiyun 	ssize_t ret = -EINVAL;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	/* The checks are also done in dev_get_phys_port_name; this helps
567*4882a593Smuzhiyun 	 * returning early without hitting the trylock/restart below. This works
568*4882a593Smuzhiyun 	 * because recurse is false when calling dev_get_port_parent_id.
569*4882a593Smuzhiyun 	 */
570*4882a593Smuzhiyun 	if (!netdev->netdev_ops->ndo_get_port_parent_id &&
571*4882a593Smuzhiyun 	    !netdev->netdev_ops->ndo_get_devlink_port)
572*4882a593Smuzhiyun 		return -EOPNOTSUPP;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if (!rtnl_trylock())
575*4882a593Smuzhiyun 		return restart_syscall();
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (dev_isalive(netdev)) {
578*4882a593Smuzhiyun 		struct netdev_phys_item_id ppid = { };
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 		ret = dev_get_port_parent_id(netdev, &ppid, false);
581*4882a593Smuzhiyun 		if (!ret)
582*4882a593Smuzhiyun 			ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 	rtnl_unlock();
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	return ret;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun static DEVICE_ATTR_RO(phys_switch_id);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun static struct attribute *net_class_attrs[] __ro_after_init = {
591*4882a593Smuzhiyun 	&dev_attr_netdev_group.attr,
592*4882a593Smuzhiyun 	&dev_attr_type.attr,
593*4882a593Smuzhiyun 	&dev_attr_dev_id.attr,
594*4882a593Smuzhiyun 	&dev_attr_dev_port.attr,
595*4882a593Smuzhiyun 	&dev_attr_iflink.attr,
596*4882a593Smuzhiyun 	&dev_attr_ifindex.attr,
597*4882a593Smuzhiyun 	&dev_attr_name_assign_type.attr,
598*4882a593Smuzhiyun 	&dev_attr_addr_assign_type.attr,
599*4882a593Smuzhiyun 	&dev_attr_addr_len.attr,
600*4882a593Smuzhiyun 	&dev_attr_link_mode.attr,
601*4882a593Smuzhiyun 	&dev_attr_address.attr,
602*4882a593Smuzhiyun 	&dev_attr_broadcast.attr,
603*4882a593Smuzhiyun 	&dev_attr_speed.attr,
604*4882a593Smuzhiyun 	&dev_attr_duplex.attr,
605*4882a593Smuzhiyun 	&dev_attr_dormant.attr,
606*4882a593Smuzhiyun 	&dev_attr_testing.attr,
607*4882a593Smuzhiyun 	&dev_attr_operstate.attr,
608*4882a593Smuzhiyun 	&dev_attr_carrier_changes.attr,
609*4882a593Smuzhiyun 	&dev_attr_ifalias.attr,
610*4882a593Smuzhiyun 	&dev_attr_carrier.attr,
611*4882a593Smuzhiyun 	&dev_attr_mtu.attr,
612*4882a593Smuzhiyun 	&dev_attr_flags.attr,
613*4882a593Smuzhiyun 	&dev_attr_tx_queue_len.attr,
614*4882a593Smuzhiyun 	&dev_attr_gro_flush_timeout.attr,
615*4882a593Smuzhiyun 	&dev_attr_napi_defer_hard_irqs.attr,
616*4882a593Smuzhiyun 	&dev_attr_phys_port_id.attr,
617*4882a593Smuzhiyun 	&dev_attr_phys_port_name.attr,
618*4882a593Smuzhiyun 	&dev_attr_phys_switch_id.attr,
619*4882a593Smuzhiyun 	&dev_attr_proto_down.attr,
620*4882a593Smuzhiyun 	&dev_attr_carrier_up_count.attr,
621*4882a593Smuzhiyun 	&dev_attr_carrier_down_count.attr,
622*4882a593Smuzhiyun 	NULL,
623*4882a593Smuzhiyun };
624*4882a593Smuzhiyun ATTRIBUTE_GROUPS(net_class);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun /* Show a given an attribute in the statistics group */
netstat_show(const struct device * d,struct device_attribute * attr,char * buf,unsigned long offset)627*4882a593Smuzhiyun static ssize_t netstat_show(const struct device *d,
628*4882a593Smuzhiyun 			    struct device_attribute *attr, char *buf,
629*4882a593Smuzhiyun 			    unsigned long offset)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun 	struct net_device *dev = to_net_dev(d);
632*4882a593Smuzhiyun 	ssize_t ret = -EINVAL;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
635*4882a593Smuzhiyun 		offset % sizeof(u64) != 0);
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	read_lock(&dev_base_lock);
638*4882a593Smuzhiyun 	if (dev_isalive(dev)) {
639*4882a593Smuzhiyun 		struct rtnl_link_stats64 temp;
640*4882a593Smuzhiyun 		const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 		ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset));
643*4882a593Smuzhiyun 	}
644*4882a593Smuzhiyun 	read_unlock(&dev_base_lock);
645*4882a593Smuzhiyun 	return ret;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun /* generate a read-only statistics attribute */
649*4882a593Smuzhiyun #define NETSTAT_ENTRY(name)						\
650*4882a593Smuzhiyun static ssize_t name##_show(struct device *d,				\
651*4882a593Smuzhiyun 			   struct device_attribute *attr, char *buf)	\
652*4882a593Smuzhiyun {									\
653*4882a593Smuzhiyun 	return netstat_show(d, attr, buf,				\
654*4882a593Smuzhiyun 			    offsetof(struct rtnl_link_stats64, name));	\
655*4882a593Smuzhiyun }									\
656*4882a593Smuzhiyun static DEVICE_ATTR_RO(name)
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun NETSTAT_ENTRY(rx_packets);
659*4882a593Smuzhiyun NETSTAT_ENTRY(tx_packets);
660*4882a593Smuzhiyun NETSTAT_ENTRY(rx_bytes);
661*4882a593Smuzhiyun NETSTAT_ENTRY(tx_bytes);
662*4882a593Smuzhiyun NETSTAT_ENTRY(rx_errors);
663*4882a593Smuzhiyun NETSTAT_ENTRY(tx_errors);
664*4882a593Smuzhiyun NETSTAT_ENTRY(rx_dropped);
665*4882a593Smuzhiyun NETSTAT_ENTRY(tx_dropped);
666*4882a593Smuzhiyun NETSTAT_ENTRY(multicast);
667*4882a593Smuzhiyun NETSTAT_ENTRY(collisions);
668*4882a593Smuzhiyun NETSTAT_ENTRY(rx_length_errors);
669*4882a593Smuzhiyun NETSTAT_ENTRY(rx_over_errors);
670*4882a593Smuzhiyun NETSTAT_ENTRY(rx_crc_errors);
671*4882a593Smuzhiyun NETSTAT_ENTRY(rx_frame_errors);
672*4882a593Smuzhiyun NETSTAT_ENTRY(rx_fifo_errors);
673*4882a593Smuzhiyun NETSTAT_ENTRY(rx_missed_errors);
674*4882a593Smuzhiyun NETSTAT_ENTRY(tx_aborted_errors);
675*4882a593Smuzhiyun NETSTAT_ENTRY(tx_carrier_errors);
676*4882a593Smuzhiyun NETSTAT_ENTRY(tx_fifo_errors);
677*4882a593Smuzhiyun NETSTAT_ENTRY(tx_heartbeat_errors);
678*4882a593Smuzhiyun NETSTAT_ENTRY(tx_window_errors);
679*4882a593Smuzhiyun NETSTAT_ENTRY(rx_compressed);
680*4882a593Smuzhiyun NETSTAT_ENTRY(tx_compressed);
681*4882a593Smuzhiyun NETSTAT_ENTRY(rx_nohandler);
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun static struct attribute *netstat_attrs[] __ro_after_init = {
684*4882a593Smuzhiyun 	&dev_attr_rx_packets.attr,
685*4882a593Smuzhiyun 	&dev_attr_tx_packets.attr,
686*4882a593Smuzhiyun 	&dev_attr_rx_bytes.attr,
687*4882a593Smuzhiyun 	&dev_attr_tx_bytes.attr,
688*4882a593Smuzhiyun 	&dev_attr_rx_errors.attr,
689*4882a593Smuzhiyun 	&dev_attr_tx_errors.attr,
690*4882a593Smuzhiyun 	&dev_attr_rx_dropped.attr,
691*4882a593Smuzhiyun 	&dev_attr_tx_dropped.attr,
692*4882a593Smuzhiyun 	&dev_attr_multicast.attr,
693*4882a593Smuzhiyun 	&dev_attr_collisions.attr,
694*4882a593Smuzhiyun 	&dev_attr_rx_length_errors.attr,
695*4882a593Smuzhiyun 	&dev_attr_rx_over_errors.attr,
696*4882a593Smuzhiyun 	&dev_attr_rx_crc_errors.attr,
697*4882a593Smuzhiyun 	&dev_attr_rx_frame_errors.attr,
698*4882a593Smuzhiyun 	&dev_attr_rx_fifo_errors.attr,
699*4882a593Smuzhiyun 	&dev_attr_rx_missed_errors.attr,
700*4882a593Smuzhiyun 	&dev_attr_tx_aborted_errors.attr,
701*4882a593Smuzhiyun 	&dev_attr_tx_carrier_errors.attr,
702*4882a593Smuzhiyun 	&dev_attr_tx_fifo_errors.attr,
703*4882a593Smuzhiyun 	&dev_attr_tx_heartbeat_errors.attr,
704*4882a593Smuzhiyun 	&dev_attr_tx_window_errors.attr,
705*4882a593Smuzhiyun 	&dev_attr_rx_compressed.attr,
706*4882a593Smuzhiyun 	&dev_attr_tx_compressed.attr,
707*4882a593Smuzhiyun 	&dev_attr_rx_nohandler.attr,
708*4882a593Smuzhiyun 	NULL
709*4882a593Smuzhiyun };
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun static const struct attribute_group netstat_group = {
712*4882a593Smuzhiyun 	.name  = "statistics",
713*4882a593Smuzhiyun 	.attrs  = netstat_attrs,
714*4882a593Smuzhiyun };
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
717*4882a593Smuzhiyun static struct attribute *wireless_attrs[] = {
718*4882a593Smuzhiyun 	NULL
719*4882a593Smuzhiyun };
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun static const struct attribute_group wireless_group = {
722*4882a593Smuzhiyun 	.name = "wireless",
723*4882a593Smuzhiyun 	.attrs = wireless_attrs,
724*4882a593Smuzhiyun };
725*4882a593Smuzhiyun #endif
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun #else /* CONFIG_SYSFS */
728*4882a593Smuzhiyun #define net_class_groups	NULL
729*4882a593Smuzhiyun #endif /* CONFIG_SYSFS */
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
732*4882a593Smuzhiyun #define to_rx_queue_attr(_attr) \
733*4882a593Smuzhiyun 	container_of(_attr, struct rx_queue_attribute, attr)
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
736*4882a593Smuzhiyun 
rx_queue_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)737*4882a593Smuzhiyun static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
738*4882a593Smuzhiyun 				  char *buf)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun 	const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
741*4882a593Smuzhiyun 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	if (!attribute->show)
744*4882a593Smuzhiyun 		return -EIO;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	return attribute->show(queue, buf);
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun 
rx_queue_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)749*4882a593Smuzhiyun static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
750*4882a593Smuzhiyun 				   const char *buf, size_t count)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun 	const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
753*4882a593Smuzhiyun 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	if (!attribute->store)
756*4882a593Smuzhiyun 		return -EIO;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	return attribute->store(queue, buf, count);
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun static const struct sysfs_ops rx_queue_sysfs_ops = {
762*4882a593Smuzhiyun 	.show = rx_queue_attr_show,
763*4882a593Smuzhiyun 	.store = rx_queue_attr_store,
764*4882a593Smuzhiyun };
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun #ifdef CONFIG_RPS
show_rps_map(struct netdev_rx_queue * queue,char * buf)767*4882a593Smuzhiyun static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 	struct rps_map *map;
770*4882a593Smuzhiyun 	cpumask_var_t mask;
771*4882a593Smuzhiyun 	int i, len;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
774*4882a593Smuzhiyun 		return -ENOMEM;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	rcu_read_lock();
777*4882a593Smuzhiyun 	map = rcu_dereference(queue->rps_map);
778*4882a593Smuzhiyun 	if (map)
779*4882a593Smuzhiyun 		for (i = 0; i < map->len; i++)
780*4882a593Smuzhiyun 			cpumask_set_cpu(map->cpus[i], mask);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
783*4882a593Smuzhiyun 	rcu_read_unlock();
784*4882a593Smuzhiyun 	free_cpumask_var(mask);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	return len < PAGE_SIZE ? len : -EINVAL;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun 
store_rps_map(struct netdev_rx_queue * queue,const char * buf,size_t len)789*4882a593Smuzhiyun static ssize_t store_rps_map(struct netdev_rx_queue *queue,
790*4882a593Smuzhiyun 			     const char *buf, size_t len)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun 	struct rps_map *old_map, *map;
793*4882a593Smuzhiyun 	cpumask_var_t mask;
794*4882a593Smuzhiyun 	int err, cpu, i, hk_flags;
795*4882a593Smuzhiyun 	static DEFINE_MUTEX(rps_map_mutex);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	if (!capable(CAP_NET_ADMIN))
798*4882a593Smuzhiyun 		return -EPERM;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
801*4882a593Smuzhiyun 		return -ENOMEM;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
804*4882a593Smuzhiyun 	if (err) {
805*4882a593Smuzhiyun 		free_cpumask_var(mask);
806*4882a593Smuzhiyun 		return err;
807*4882a593Smuzhiyun 	}
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	if (!cpumask_empty(mask)) {
810*4882a593Smuzhiyun 		hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
811*4882a593Smuzhiyun 		cpumask_and(mask, mask, housekeeping_cpumask(hk_flags));
812*4882a593Smuzhiyun 		if (cpumask_empty(mask)) {
813*4882a593Smuzhiyun 			free_cpumask_var(mask);
814*4882a593Smuzhiyun 			return -EINVAL;
815*4882a593Smuzhiyun 		}
816*4882a593Smuzhiyun 	}
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	map = kzalloc(max_t(unsigned int,
819*4882a593Smuzhiyun 			    RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
820*4882a593Smuzhiyun 		      GFP_KERNEL);
821*4882a593Smuzhiyun 	if (!map) {
822*4882a593Smuzhiyun 		free_cpumask_var(mask);
823*4882a593Smuzhiyun 		return -ENOMEM;
824*4882a593Smuzhiyun 	}
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	i = 0;
827*4882a593Smuzhiyun 	for_each_cpu_and(cpu, mask, cpu_online_mask)
828*4882a593Smuzhiyun 		map->cpus[i++] = cpu;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	if (i) {
831*4882a593Smuzhiyun 		map->len = i;
832*4882a593Smuzhiyun 	} else {
833*4882a593Smuzhiyun 		kfree(map);
834*4882a593Smuzhiyun 		map = NULL;
835*4882a593Smuzhiyun 	}
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	mutex_lock(&rps_map_mutex);
838*4882a593Smuzhiyun 	old_map = rcu_dereference_protected(queue->rps_map,
839*4882a593Smuzhiyun 					    mutex_is_locked(&rps_map_mutex));
840*4882a593Smuzhiyun 	rcu_assign_pointer(queue->rps_map, map);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	if (map)
843*4882a593Smuzhiyun 		static_branch_inc(&rps_needed);
844*4882a593Smuzhiyun 	if (old_map)
845*4882a593Smuzhiyun 		static_branch_dec(&rps_needed);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	mutex_unlock(&rps_map_mutex);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	if (old_map)
850*4882a593Smuzhiyun 		kfree_rcu(old_map, rcu);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	free_cpumask_var(mask);
853*4882a593Smuzhiyun 	return len;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun 
show_rps_dev_flow_table_cnt(struct netdev_rx_queue * queue,char * buf)856*4882a593Smuzhiyun static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
857*4882a593Smuzhiyun 					   char *buf)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun 	struct rps_dev_flow_table *flow_table;
860*4882a593Smuzhiyun 	unsigned long val = 0;
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	rcu_read_lock();
863*4882a593Smuzhiyun 	flow_table = rcu_dereference(queue->rps_flow_table);
864*4882a593Smuzhiyun 	if (flow_table)
865*4882a593Smuzhiyun 		val = (unsigned long)flow_table->mask + 1;
866*4882a593Smuzhiyun 	rcu_read_unlock();
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", val);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun 
rps_dev_flow_table_release(struct rcu_head * rcu)871*4882a593Smuzhiyun static void rps_dev_flow_table_release(struct rcu_head *rcu)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun 	struct rps_dev_flow_table *table = container_of(rcu,
874*4882a593Smuzhiyun 	    struct rps_dev_flow_table, rcu);
875*4882a593Smuzhiyun 	vfree(table);
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun 
store_rps_dev_flow_table_cnt(struct netdev_rx_queue * queue,const char * buf,size_t len)878*4882a593Smuzhiyun static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
879*4882a593Smuzhiyun 					    const char *buf, size_t len)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun 	unsigned long mask, count;
882*4882a593Smuzhiyun 	struct rps_dev_flow_table *table, *old_table;
883*4882a593Smuzhiyun 	static DEFINE_SPINLOCK(rps_dev_flow_lock);
884*4882a593Smuzhiyun 	int rc;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	if (!capable(CAP_NET_ADMIN))
887*4882a593Smuzhiyun 		return -EPERM;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	rc = kstrtoul(buf, 0, &count);
890*4882a593Smuzhiyun 	if (rc < 0)
891*4882a593Smuzhiyun 		return rc;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	if (count) {
894*4882a593Smuzhiyun 		mask = count - 1;
895*4882a593Smuzhiyun 		/* mask = roundup_pow_of_two(count) - 1;
896*4882a593Smuzhiyun 		 * without overflows...
897*4882a593Smuzhiyun 		 */
898*4882a593Smuzhiyun 		while ((mask | (mask >> 1)) != mask)
899*4882a593Smuzhiyun 			mask |= (mask >> 1);
900*4882a593Smuzhiyun 		/* On 64 bit arches, must check mask fits in table->mask (u32),
901*4882a593Smuzhiyun 		 * and on 32bit arches, must check
902*4882a593Smuzhiyun 		 * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
903*4882a593Smuzhiyun 		 */
904*4882a593Smuzhiyun #if BITS_PER_LONG > 32
905*4882a593Smuzhiyun 		if (mask > (unsigned long)(u32)mask)
906*4882a593Smuzhiyun 			return -EINVAL;
907*4882a593Smuzhiyun #else
908*4882a593Smuzhiyun 		if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
909*4882a593Smuzhiyun 				/ sizeof(struct rps_dev_flow)) {
910*4882a593Smuzhiyun 			/* Enforce a limit to prevent overflow */
911*4882a593Smuzhiyun 			return -EINVAL;
912*4882a593Smuzhiyun 		}
913*4882a593Smuzhiyun #endif
914*4882a593Smuzhiyun 		table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
915*4882a593Smuzhiyun 		if (!table)
916*4882a593Smuzhiyun 			return -ENOMEM;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 		table->mask = mask;
919*4882a593Smuzhiyun 		for (count = 0; count <= mask; count++)
920*4882a593Smuzhiyun 			table->flows[count].cpu = RPS_NO_CPU;
921*4882a593Smuzhiyun 	} else {
922*4882a593Smuzhiyun 		table = NULL;
923*4882a593Smuzhiyun 	}
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	spin_lock(&rps_dev_flow_lock);
926*4882a593Smuzhiyun 	old_table = rcu_dereference_protected(queue->rps_flow_table,
927*4882a593Smuzhiyun 					      lockdep_is_held(&rps_dev_flow_lock));
928*4882a593Smuzhiyun 	rcu_assign_pointer(queue->rps_flow_table, table);
929*4882a593Smuzhiyun 	spin_unlock(&rps_dev_flow_lock);
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	if (old_table)
932*4882a593Smuzhiyun 		call_rcu(&old_table->rcu, rps_dev_flow_table_release);
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	return len;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun static struct rx_queue_attribute rps_cpus_attribute __ro_after_init
938*4882a593Smuzhiyun 	= __ATTR(rps_cpus, 0644, show_rps_map, store_rps_map);
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init
941*4882a593Smuzhiyun 	= __ATTR(rps_flow_cnt, 0644,
942*4882a593Smuzhiyun 		 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
943*4882a593Smuzhiyun #endif /* CONFIG_RPS */
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun static struct attribute *rx_queue_default_attrs[] __ro_after_init = {
946*4882a593Smuzhiyun #ifdef CONFIG_RPS
947*4882a593Smuzhiyun 	&rps_cpus_attribute.attr,
948*4882a593Smuzhiyun 	&rps_dev_flow_table_cnt_attribute.attr,
949*4882a593Smuzhiyun #endif
950*4882a593Smuzhiyun 	NULL
951*4882a593Smuzhiyun };
952*4882a593Smuzhiyun ATTRIBUTE_GROUPS(rx_queue_default);
953*4882a593Smuzhiyun 
rx_queue_release(struct kobject * kobj)954*4882a593Smuzhiyun static void rx_queue_release(struct kobject *kobj)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
957*4882a593Smuzhiyun #ifdef CONFIG_RPS
958*4882a593Smuzhiyun 	struct rps_map *map;
959*4882a593Smuzhiyun 	struct rps_dev_flow_table *flow_table;
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	map = rcu_dereference_protected(queue->rps_map, 1);
962*4882a593Smuzhiyun 	if (map) {
963*4882a593Smuzhiyun 		RCU_INIT_POINTER(queue->rps_map, NULL);
964*4882a593Smuzhiyun 		kfree_rcu(map, rcu);
965*4882a593Smuzhiyun 	}
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
968*4882a593Smuzhiyun 	if (flow_table) {
969*4882a593Smuzhiyun 		RCU_INIT_POINTER(queue->rps_flow_table, NULL);
970*4882a593Smuzhiyun 		call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
971*4882a593Smuzhiyun 	}
972*4882a593Smuzhiyun #endif
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	memset(kobj, 0, sizeof(*kobj));
975*4882a593Smuzhiyun 	dev_put(queue->dev);
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun 
rx_queue_namespace(struct kobject * kobj)978*4882a593Smuzhiyun static const void *rx_queue_namespace(struct kobject *kobj)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
981*4882a593Smuzhiyun 	struct device *dev = &queue->dev->dev;
982*4882a593Smuzhiyun 	const void *ns = NULL;
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	if (dev->class && dev->class->ns_type)
985*4882a593Smuzhiyun 		ns = dev->class->namespace(dev);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	return ns;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
rx_queue_get_ownership(struct kobject * kobj,kuid_t * uid,kgid_t * gid)990*4882a593Smuzhiyun static void rx_queue_get_ownership(struct kobject *kobj,
991*4882a593Smuzhiyun 				   kuid_t *uid, kgid_t *gid)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	const struct net *net = rx_queue_namespace(kobj);
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	net_ns_get_ownership(net, uid, gid);
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun static struct kobj_type rx_queue_ktype __ro_after_init = {
999*4882a593Smuzhiyun 	.sysfs_ops = &rx_queue_sysfs_ops,
1000*4882a593Smuzhiyun 	.release = rx_queue_release,
1001*4882a593Smuzhiyun 	.default_groups = rx_queue_default_groups,
1002*4882a593Smuzhiyun 	.namespace = rx_queue_namespace,
1003*4882a593Smuzhiyun 	.get_ownership = rx_queue_get_ownership,
1004*4882a593Smuzhiyun };
1005*4882a593Smuzhiyun 
rx_queue_add_kobject(struct net_device * dev,int index)1006*4882a593Smuzhiyun static int rx_queue_add_kobject(struct net_device *dev, int index)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun 	struct netdev_rx_queue *queue = dev->_rx + index;
1009*4882a593Smuzhiyun 	struct kobject *kobj = &queue->kobj;
1010*4882a593Smuzhiyun 	int error = 0;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	/* Kobject_put later will trigger rx_queue_release call which
1013*4882a593Smuzhiyun 	 * decreases dev refcount: Take that reference here
1014*4882a593Smuzhiyun 	 */
1015*4882a593Smuzhiyun 	dev_hold(queue->dev);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	kobj->kset = dev->queues_kset;
1018*4882a593Smuzhiyun 	error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
1019*4882a593Smuzhiyun 				     "rx-%u", index);
1020*4882a593Smuzhiyun 	if (error)
1021*4882a593Smuzhiyun 		goto err;
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	if (dev->sysfs_rx_queue_group) {
1024*4882a593Smuzhiyun 		error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
1025*4882a593Smuzhiyun 		if (error)
1026*4882a593Smuzhiyun 			goto err;
1027*4882a593Smuzhiyun 	}
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	kobject_uevent(kobj, KOBJ_ADD);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	return error;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun err:
1034*4882a593Smuzhiyun 	kobject_put(kobj);
1035*4882a593Smuzhiyun 	return error;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun 
rx_queue_change_owner(struct net_device * dev,int index,kuid_t kuid,kgid_t kgid)1038*4882a593Smuzhiyun static int rx_queue_change_owner(struct net_device *dev, int index, kuid_t kuid,
1039*4882a593Smuzhiyun 				 kgid_t kgid)
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun 	struct netdev_rx_queue *queue = dev->_rx + index;
1042*4882a593Smuzhiyun 	struct kobject *kobj = &queue->kobj;
1043*4882a593Smuzhiyun 	int error;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	error = sysfs_change_owner(kobj, kuid, kgid);
1046*4882a593Smuzhiyun 	if (error)
1047*4882a593Smuzhiyun 		return error;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	if (dev->sysfs_rx_queue_group)
1050*4882a593Smuzhiyun 		error = sysfs_group_change_owner(
1051*4882a593Smuzhiyun 			kobj, dev->sysfs_rx_queue_group, kuid, kgid);
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	return error;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun #endif /* CONFIG_SYSFS */
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun int
net_rx_queue_update_kobjects(struct net_device * dev,int old_num,int new_num)1058*4882a593Smuzhiyun net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1061*4882a593Smuzhiyun 	int i;
1062*4882a593Smuzhiyun 	int error = 0;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun #ifndef CONFIG_RPS
1065*4882a593Smuzhiyun 	if (!dev->sysfs_rx_queue_group)
1066*4882a593Smuzhiyun 		return 0;
1067*4882a593Smuzhiyun #endif
1068*4882a593Smuzhiyun 	for (i = old_num; i < new_num; i++) {
1069*4882a593Smuzhiyun 		error = rx_queue_add_kobject(dev, i);
1070*4882a593Smuzhiyun 		if (error) {
1071*4882a593Smuzhiyun 			new_num = old_num;
1072*4882a593Smuzhiyun 			break;
1073*4882a593Smuzhiyun 		}
1074*4882a593Smuzhiyun 	}
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	while (--i >= new_num) {
1077*4882a593Smuzhiyun 		struct kobject *kobj = &dev->_rx[i].kobj;
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 		if (!refcount_read(&dev_net(dev)->count))
1080*4882a593Smuzhiyun 			kobj->uevent_suppress = 1;
1081*4882a593Smuzhiyun 		if (dev->sysfs_rx_queue_group)
1082*4882a593Smuzhiyun 			sysfs_remove_group(kobj, dev->sysfs_rx_queue_group);
1083*4882a593Smuzhiyun 		kobject_put(kobj);
1084*4882a593Smuzhiyun 	}
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	return error;
1087*4882a593Smuzhiyun #else
1088*4882a593Smuzhiyun 	return 0;
1089*4882a593Smuzhiyun #endif
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun 
net_rx_queue_change_owner(struct net_device * dev,int num,kuid_t kuid,kgid_t kgid)1092*4882a593Smuzhiyun static int net_rx_queue_change_owner(struct net_device *dev, int num,
1093*4882a593Smuzhiyun 				     kuid_t kuid, kgid_t kgid)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1096*4882a593Smuzhiyun 	int error = 0;
1097*4882a593Smuzhiyun 	int i;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun #ifndef CONFIG_RPS
1100*4882a593Smuzhiyun 	if (!dev->sysfs_rx_queue_group)
1101*4882a593Smuzhiyun 		return 0;
1102*4882a593Smuzhiyun #endif
1103*4882a593Smuzhiyun 	for (i = 0; i < num; i++) {
1104*4882a593Smuzhiyun 		error = rx_queue_change_owner(dev, i, kuid, kgid);
1105*4882a593Smuzhiyun 		if (error)
1106*4882a593Smuzhiyun 			break;
1107*4882a593Smuzhiyun 	}
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	return error;
1110*4882a593Smuzhiyun #else
1111*4882a593Smuzhiyun 	return 0;
1112*4882a593Smuzhiyun #endif
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1116*4882a593Smuzhiyun /*
1117*4882a593Smuzhiyun  * netdev_queue sysfs structures and functions.
1118*4882a593Smuzhiyun  */
1119*4882a593Smuzhiyun struct netdev_queue_attribute {
1120*4882a593Smuzhiyun 	struct attribute attr;
1121*4882a593Smuzhiyun 	ssize_t (*show)(struct netdev_queue *queue, char *buf);
1122*4882a593Smuzhiyun 	ssize_t (*store)(struct netdev_queue *queue,
1123*4882a593Smuzhiyun 			 const char *buf, size_t len);
1124*4882a593Smuzhiyun };
1125*4882a593Smuzhiyun #define to_netdev_queue_attr(_attr) \
1126*4882a593Smuzhiyun 	container_of(_attr, struct netdev_queue_attribute, attr)
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
1129*4882a593Smuzhiyun 
netdev_queue_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1130*4882a593Smuzhiyun static ssize_t netdev_queue_attr_show(struct kobject *kobj,
1131*4882a593Smuzhiyun 				      struct attribute *attr, char *buf)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun 	const struct netdev_queue_attribute *attribute
1134*4882a593Smuzhiyun 		= to_netdev_queue_attr(attr);
1135*4882a593Smuzhiyun 	struct netdev_queue *queue = to_netdev_queue(kobj);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	if (!attribute->show)
1138*4882a593Smuzhiyun 		return -EIO;
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	return attribute->show(queue, buf);
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun 
netdev_queue_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)1143*4882a593Smuzhiyun static ssize_t netdev_queue_attr_store(struct kobject *kobj,
1144*4882a593Smuzhiyun 				       struct attribute *attr,
1145*4882a593Smuzhiyun 				       const char *buf, size_t count)
1146*4882a593Smuzhiyun {
1147*4882a593Smuzhiyun 	const struct netdev_queue_attribute *attribute
1148*4882a593Smuzhiyun 		= to_netdev_queue_attr(attr);
1149*4882a593Smuzhiyun 	struct netdev_queue *queue = to_netdev_queue(kobj);
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	if (!attribute->store)
1152*4882a593Smuzhiyun 		return -EIO;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	return attribute->store(queue, buf, count);
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun static const struct sysfs_ops netdev_queue_sysfs_ops = {
1158*4882a593Smuzhiyun 	.show = netdev_queue_attr_show,
1159*4882a593Smuzhiyun 	.store = netdev_queue_attr_store,
1160*4882a593Smuzhiyun };
1161*4882a593Smuzhiyun 
tx_timeout_show(struct netdev_queue * queue,char * buf)1162*4882a593Smuzhiyun static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
1163*4882a593Smuzhiyun {
1164*4882a593Smuzhiyun 	unsigned long trans_timeout;
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 	spin_lock_irq(&queue->_xmit_lock);
1167*4882a593Smuzhiyun 	trans_timeout = queue->trans_timeout;
1168*4882a593Smuzhiyun 	spin_unlock_irq(&queue->_xmit_lock);
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	return sprintf(buf, fmt_ulong, trans_timeout);
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun 
get_netdev_queue_index(struct netdev_queue * queue)1173*4882a593Smuzhiyun static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun 	struct net_device *dev = queue->dev;
1176*4882a593Smuzhiyun 	unsigned int i;
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	i = queue - dev->_tx;
1179*4882a593Smuzhiyun 	BUG_ON(i >= dev->num_tx_queues);
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	return i;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun 
traffic_class_show(struct netdev_queue * queue,char * buf)1184*4882a593Smuzhiyun static ssize_t traffic_class_show(struct netdev_queue *queue,
1185*4882a593Smuzhiyun 				  char *buf)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun 	struct net_device *dev = queue->dev;
1188*4882a593Smuzhiyun 	int index;
1189*4882a593Smuzhiyun 	int tc;
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	if (!netif_is_multiqueue(dev))
1192*4882a593Smuzhiyun 		return -ENOENT;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	index = get_netdev_queue_index(queue);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	/* If queue belongs to subordinate dev use its TC mapping */
1197*4882a593Smuzhiyun 	dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	tc = netdev_txq_to_tc(dev, index);
1200*4882a593Smuzhiyun 	if (tc < 0)
1201*4882a593Smuzhiyun 		return -EINVAL;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	/* We can report the traffic class one of two ways:
1204*4882a593Smuzhiyun 	 * Subordinate device traffic classes are reported with the traffic
1205*4882a593Smuzhiyun 	 * class first, and then the subordinate class so for example TC0 on
1206*4882a593Smuzhiyun 	 * subordinate device 2 will be reported as "0-2". If the queue
1207*4882a593Smuzhiyun 	 * belongs to the root device it will be reported with just the
1208*4882a593Smuzhiyun 	 * traffic class, so just "0" for TC 0 for example.
1209*4882a593Smuzhiyun 	 */
1210*4882a593Smuzhiyun 	return dev->num_tc < 0 ? sprintf(buf, "%d%d\n", tc, dev->num_tc) :
1211*4882a593Smuzhiyun 				 sprintf(buf, "%d\n", tc);
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun #ifdef CONFIG_XPS
tx_maxrate_show(struct netdev_queue * queue,char * buf)1215*4882a593Smuzhiyun static ssize_t tx_maxrate_show(struct netdev_queue *queue,
1216*4882a593Smuzhiyun 			       char *buf)
1217*4882a593Smuzhiyun {
1218*4882a593Smuzhiyun 	return sprintf(buf, "%lu\n", queue->tx_maxrate);
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun 
tx_maxrate_store(struct netdev_queue * queue,const char * buf,size_t len)1221*4882a593Smuzhiyun static ssize_t tx_maxrate_store(struct netdev_queue *queue,
1222*4882a593Smuzhiyun 				const char *buf, size_t len)
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun 	struct net_device *dev = queue->dev;
1225*4882a593Smuzhiyun 	int err, index = get_netdev_queue_index(queue);
1226*4882a593Smuzhiyun 	u32 rate = 0;
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	if (!capable(CAP_NET_ADMIN))
1229*4882a593Smuzhiyun 		return -EPERM;
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	/* The check is also done later; this helps returning early without
1232*4882a593Smuzhiyun 	 * hitting the trylock/restart below.
1233*4882a593Smuzhiyun 	 */
1234*4882a593Smuzhiyun 	if (!dev->netdev_ops->ndo_set_tx_maxrate)
1235*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	err = kstrtou32(buf, 10, &rate);
1238*4882a593Smuzhiyun 	if (err < 0)
1239*4882a593Smuzhiyun 		return err;
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	if (!rtnl_trylock())
1242*4882a593Smuzhiyun 		return restart_syscall();
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	err = -EOPNOTSUPP;
1245*4882a593Smuzhiyun 	if (dev->netdev_ops->ndo_set_tx_maxrate)
1246*4882a593Smuzhiyun 		err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	rtnl_unlock();
1249*4882a593Smuzhiyun 	if (!err) {
1250*4882a593Smuzhiyun 		queue->tx_maxrate = rate;
1251*4882a593Smuzhiyun 		return len;
1252*4882a593Smuzhiyun 	}
1253*4882a593Smuzhiyun 	return err;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init
1257*4882a593Smuzhiyun 	= __ATTR_RW(tx_maxrate);
1258*4882a593Smuzhiyun #endif
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun static struct netdev_queue_attribute queue_trans_timeout __ro_after_init
1261*4882a593Smuzhiyun 	= __ATTR_RO(tx_timeout);
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun static struct netdev_queue_attribute queue_traffic_class __ro_after_init
1264*4882a593Smuzhiyun 	= __ATTR_RO(traffic_class);
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun #ifdef CONFIG_BQL
1267*4882a593Smuzhiyun /*
1268*4882a593Smuzhiyun  * Byte queue limits sysfs structures and functions.
1269*4882a593Smuzhiyun  */
bql_show(char * buf,unsigned int value)1270*4882a593Smuzhiyun static ssize_t bql_show(char *buf, unsigned int value)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", value);
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun 
bql_set(const char * buf,const size_t count,unsigned int * pvalue)1275*4882a593Smuzhiyun static ssize_t bql_set(const char *buf, const size_t count,
1276*4882a593Smuzhiyun 		       unsigned int *pvalue)
1277*4882a593Smuzhiyun {
1278*4882a593Smuzhiyun 	unsigned int value;
1279*4882a593Smuzhiyun 	int err;
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 	if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) {
1282*4882a593Smuzhiyun 		value = DQL_MAX_LIMIT;
1283*4882a593Smuzhiyun 	} else {
1284*4882a593Smuzhiyun 		err = kstrtouint(buf, 10, &value);
1285*4882a593Smuzhiyun 		if (err < 0)
1286*4882a593Smuzhiyun 			return err;
1287*4882a593Smuzhiyun 		if (value > DQL_MAX_LIMIT)
1288*4882a593Smuzhiyun 			return -EINVAL;
1289*4882a593Smuzhiyun 	}
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	*pvalue = value;
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	return count;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun 
bql_show_hold_time(struct netdev_queue * queue,char * buf)1296*4882a593Smuzhiyun static ssize_t bql_show_hold_time(struct netdev_queue *queue,
1297*4882a593Smuzhiyun 				  char *buf)
1298*4882a593Smuzhiyun {
1299*4882a593Smuzhiyun 	struct dql *dql = &queue->dql;
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun 
bql_set_hold_time(struct netdev_queue * queue,const char * buf,size_t len)1304*4882a593Smuzhiyun static ssize_t bql_set_hold_time(struct netdev_queue *queue,
1305*4882a593Smuzhiyun 				 const char *buf, size_t len)
1306*4882a593Smuzhiyun {
1307*4882a593Smuzhiyun 	struct dql *dql = &queue->dql;
1308*4882a593Smuzhiyun 	unsigned int value;
1309*4882a593Smuzhiyun 	int err;
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	err = kstrtouint(buf, 10, &value);
1312*4882a593Smuzhiyun 	if (err < 0)
1313*4882a593Smuzhiyun 		return err;
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	dql->slack_hold_time = msecs_to_jiffies(value);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	return len;
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
1321*4882a593Smuzhiyun 	= __ATTR(hold_time, 0644,
1322*4882a593Smuzhiyun 		 bql_show_hold_time, bql_set_hold_time);
1323*4882a593Smuzhiyun 
bql_show_inflight(struct netdev_queue * queue,char * buf)1324*4882a593Smuzhiyun static ssize_t bql_show_inflight(struct netdev_queue *queue,
1325*4882a593Smuzhiyun 				 char *buf)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun 	struct dql *dql = &queue->dql;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init =
1333*4882a593Smuzhiyun 	__ATTR(inflight, 0444, bql_show_inflight, NULL);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun #define BQL_ATTR(NAME, FIELD)						\
1336*4882a593Smuzhiyun static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,		\
1337*4882a593Smuzhiyun 				 char *buf)				\
1338*4882a593Smuzhiyun {									\
1339*4882a593Smuzhiyun 	return bql_show(buf, queue->dql.FIELD);				\
1340*4882a593Smuzhiyun }									\
1341*4882a593Smuzhiyun 									\
1342*4882a593Smuzhiyun static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,		\
1343*4882a593Smuzhiyun 				const char *buf, size_t len)		\
1344*4882a593Smuzhiyun {									\
1345*4882a593Smuzhiyun 	return bql_set(buf, len, &queue->dql.FIELD);			\
1346*4882a593Smuzhiyun }									\
1347*4882a593Smuzhiyun 									\
1348*4882a593Smuzhiyun static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
1349*4882a593Smuzhiyun 	= __ATTR(NAME, 0644,				\
1350*4882a593Smuzhiyun 		 bql_show_ ## NAME, bql_set_ ## NAME)
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun BQL_ATTR(limit, limit);
1353*4882a593Smuzhiyun BQL_ATTR(limit_max, max_limit);
1354*4882a593Smuzhiyun BQL_ATTR(limit_min, min_limit);
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun static struct attribute *dql_attrs[] __ro_after_init = {
1357*4882a593Smuzhiyun 	&bql_limit_attribute.attr,
1358*4882a593Smuzhiyun 	&bql_limit_max_attribute.attr,
1359*4882a593Smuzhiyun 	&bql_limit_min_attribute.attr,
1360*4882a593Smuzhiyun 	&bql_hold_time_attribute.attr,
1361*4882a593Smuzhiyun 	&bql_inflight_attribute.attr,
1362*4882a593Smuzhiyun 	NULL
1363*4882a593Smuzhiyun };
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun static const struct attribute_group dql_group = {
1366*4882a593Smuzhiyun 	.name  = "byte_queue_limits",
1367*4882a593Smuzhiyun 	.attrs  = dql_attrs,
1368*4882a593Smuzhiyun };
1369*4882a593Smuzhiyun #endif /* CONFIG_BQL */
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun #ifdef CONFIG_XPS
xps_cpus_show(struct netdev_queue * queue,char * buf)1372*4882a593Smuzhiyun static ssize_t xps_cpus_show(struct netdev_queue *queue,
1373*4882a593Smuzhiyun 			     char *buf)
1374*4882a593Smuzhiyun {
1375*4882a593Smuzhiyun 	int cpu, len, ret, num_tc = 1, tc = 0;
1376*4882a593Smuzhiyun 	struct net_device *dev = queue->dev;
1377*4882a593Smuzhiyun 	struct xps_dev_maps *dev_maps;
1378*4882a593Smuzhiyun 	cpumask_var_t mask;
1379*4882a593Smuzhiyun 	unsigned long index;
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	if (!netif_is_multiqueue(dev))
1382*4882a593Smuzhiyun 		return -ENOENT;
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	index = get_netdev_queue_index(queue);
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 	if (!rtnl_trylock())
1387*4882a593Smuzhiyun 		return restart_syscall();
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 	if (dev->num_tc) {
1390*4882a593Smuzhiyun 		/* Do not allow XPS on subordinate device directly */
1391*4882a593Smuzhiyun 		num_tc = dev->num_tc;
1392*4882a593Smuzhiyun 		if (num_tc < 0) {
1393*4882a593Smuzhiyun 			ret = -EINVAL;
1394*4882a593Smuzhiyun 			goto err_rtnl_unlock;
1395*4882a593Smuzhiyun 		}
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 		/* If queue belongs to subordinate dev use its map */
1398*4882a593Smuzhiyun 		dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 		tc = netdev_txq_to_tc(dev, index);
1401*4882a593Smuzhiyun 		if (tc < 0) {
1402*4882a593Smuzhiyun 			ret = -EINVAL;
1403*4882a593Smuzhiyun 			goto err_rtnl_unlock;
1404*4882a593Smuzhiyun 		}
1405*4882a593Smuzhiyun 	}
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
1408*4882a593Smuzhiyun 		ret = -ENOMEM;
1409*4882a593Smuzhiyun 		goto err_rtnl_unlock;
1410*4882a593Smuzhiyun 	}
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	rcu_read_lock();
1413*4882a593Smuzhiyun 	dev_maps = rcu_dereference(dev->xps_cpus_map);
1414*4882a593Smuzhiyun 	if (dev_maps) {
1415*4882a593Smuzhiyun 		for_each_possible_cpu(cpu) {
1416*4882a593Smuzhiyun 			int i, tci = cpu * num_tc + tc;
1417*4882a593Smuzhiyun 			struct xps_map *map;
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 			map = rcu_dereference(dev_maps->attr_map[tci]);
1420*4882a593Smuzhiyun 			if (!map)
1421*4882a593Smuzhiyun 				continue;
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 			for (i = map->len; i--;) {
1424*4882a593Smuzhiyun 				if (map->queues[i] == index) {
1425*4882a593Smuzhiyun 					cpumask_set_cpu(cpu, mask);
1426*4882a593Smuzhiyun 					break;
1427*4882a593Smuzhiyun 				}
1428*4882a593Smuzhiyun 			}
1429*4882a593Smuzhiyun 		}
1430*4882a593Smuzhiyun 	}
1431*4882a593Smuzhiyun 	rcu_read_unlock();
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	rtnl_unlock();
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
1436*4882a593Smuzhiyun 	free_cpumask_var(mask);
1437*4882a593Smuzhiyun 	return len < PAGE_SIZE ? len : -EINVAL;
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun err_rtnl_unlock:
1440*4882a593Smuzhiyun 	rtnl_unlock();
1441*4882a593Smuzhiyun 	return ret;
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun 
xps_cpus_store(struct netdev_queue * queue,const char * buf,size_t len)1444*4882a593Smuzhiyun static ssize_t xps_cpus_store(struct netdev_queue *queue,
1445*4882a593Smuzhiyun 			      const char *buf, size_t len)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun 	struct net_device *dev = queue->dev;
1448*4882a593Smuzhiyun 	unsigned long index;
1449*4882a593Smuzhiyun 	cpumask_var_t mask;
1450*4882a593Smuzhiyun 	int err;
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	if (!netif_is_multiqueue(dev))
1453*4882a593Smuzhiyun 		return -ENOENT;
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	if (!capable(CAP_NET_ADMIN))
1456*4882a593Smuzhiyun 		return -EPERM;
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1459*4882a593Smuzhiyun 		return -ENOMEM;
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	index = get_netdev_queue_index(queue);
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1464*4882a593Smuzhiyun 	if (err) {
1465*4882a593Smuzhiyun 		free_cpumask_var(mask);
1466*4882a593Smuzhiyun 		return err;
1467*4882a593Smuzhiyun 	}
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 	if (!rtnl_trylock()) {
1470*4882a593Smuzhiyun 		free_cpumask_var(mask);
1471*4882a593Smuzhiyun 		return restart_syscall();
1472*4882a593Smuzhiyun 	}
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	err = netif_set_xps_queue(dev, mask, index);
1475*4882a593Smuzhiyun 	rtnl_unlock();
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun 	free_cpumask_var(mask);
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 	return err ? : len;
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
1483*4882a593Smuzhiyun 	= __ATTR_RW(xps_cpus);
1484*4882a593Smuzhiyun 
xps_rxqs_show(struct netdev_queue * queue,char * buf)1485*4882a593Smuzhiyun static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
1486*4882a593Smuzhiyun {
1487*4882a593Smuzhiyun 	int j, len, ret, num_tc = 1, tc = 0;
1488*4882a593Smuzhiyun 	struct net_device *dev = queue->dev;
1489*4882a593Smuzhiyun 	struct xps_dev_maps *dev_maps;
1490*4882a593Smuzhiyun 	unsigned long *mask, index;
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	index = get_netdev_queue_index(queue);
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	if (!rtnl_trylock())
1495*4882a593Smuzhiyun 		return restart_syscall();
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	if (dev->num_tc) {
1498*4882a593Smuzhiyun 		num_tc = dev->num_tc;
1499*4882a593Smuzhiyun 		tc = netdev_txq_to_tc(dev, index);
1500*4882a593Smuzhiyun 		if (tc < 0) {
1501*4882a593Smuzhiyun 			ret = -EINVAL;
1502*4882a593Smuzhiyun 			goto err_rtnl_unlock;
1503*4882a593Smuzhiyun 		}
1504*4882a593Smuzhiyun 	}
1505*4882a593Smuzhiyun 	mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
1506*4882a593Smuzhiyun 	if (!mask) {
1507*4882a593Smuzhiyun 		ret = -ENOMEM;
1508*4882a593Smuzhiyun 		goto err_rtnl_unlock;
1509*4882a593Smuzhiyun 	}
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 	rcu_read_lock();
1512*4882a593Smuzhiyun 	dev_maps = rcu_dereference(dev->xps_rxqs_map);
1513*4882a593Smuzhiyun 	if (!dev_maps)
1514*4882a593Smuzhiyun 		goto out_no_maps;
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	for (j = -1; j = netif_attrmask_next(j, NULL, dev->num_rx_queues),
1517*4882a593Smuzhiyun 	     j < dev->num_rx_queues;) {
1518*4882a593Smuzhiyun 		int i, tci = j * num_tc + tc;
1519*4882a593Smuzhiyun 		struct xps_map *map;
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 		map = rcu_dereference(dev_maps->attr_map[tci]);
1522*4882a593Smuzhiyun 		if (!map)
1523*4882a593Smuzhiyun 			continue;
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 		for (i = map->len; i--;) {
1526*4882a593Smuzhiyun 			if (map->queues[i] == index) {
1527*4882a593Smuzhiyun 				set_bit(j, mask);
1528*4882a593Smuzhiyun 				break;
1529*4882a593Smuzhiyun 			}
1530*4882a593Smuzhiyun 		}
1531*4882a593Smuzhiyun 	}
1532*4882a593Smuzhiyun out_no_maps:
1533*4882a593Smuzhiyun 	rcu_read_unlock();
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	rtnl_unlock();
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
1538*4882a593Smuzhiyun 	bitmap_free(mask);
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	return len < PAGE_SIZE ? len : -EINVAL;
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun err_rtnl_unlock:
1543*4882a593Smuzhiyun 	rtnl_unlock();
1544*4882a593Smuzhiyun 	return ret;
1545*4882a593Smuzhiyun }
1546*4882a593Smuzhiyun 
xps_rxqs_store(struct netdev_queue * queue,const char * buf,size_t len)1547*4882a593Smuzhiyun static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
1548*4882a593Smuzhiyun 			      size_t len)
1549*4882a593Smuzhiyun {
1550*4882a593Smuzhiyun 	struct net_device *dev = queue->dev;
1551*4882a593Smuzhiyun 	struct net *net = dev_net(dev);
1552*4882a593Smuzhiyun 	unsigned long *mask, index;
1553*4882a593Smuzhiyun 	int err;
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1556*4882a593Smuzhiyun 		return -EPERM;
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 	mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
1559*4882a593Smuzhiyun 	if (!mask)
1560*4882a593Smuzhiyun 		return -ENOMEM;
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 	index = get_netdev_queue_index(queue);
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	err = bitmap_parse(buf, len, mask, dev->num_rx_queues);
1565*4882a593Smuzhiyun 	if (err) {
1566*4882a593Smuzhiyun 		bitmap_free(mask);
1567*4882a593Smuzhiyun 		return err;
1568*4882a593Smuzhiyun 	}
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	if (!rtnl_trylock()) {
1571*4882a593Smuzhiyun 		bitmap_free(mask);
1572*4882a593Smuzhiyun 		return restart_syscall();
1573*4882a593Smuzhiyun 	}
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 	cpus_read_lock();
1576*4882a593Smuzhiyun 	err = __netif_set_xps_queue(dev, mask, index, true);
1577*4882a593Smuzhiyun 	cpus_read_unlock();
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	rtnl_unlock();
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	bitmap_free(mask);
1582*4882a593Smuzhiyun 	return err ? : len;
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun static struct netdev_queue_attribute xps_rxqs_attribute __ro_after_init
1586*4882a593Smuzhiyun 	= __ATTR_RW(xps_rxqs);
1587*4882a593Smuzhiyun #endif /* CONFIG_XPS */
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun static struct attribute *netdev_queue_default_attrs[] __ro_after_init = {
1590*4882a593Smuzhiyun 	&queue_trans_timeout.attr,
1591*4882a593Smuzhiyun 	&queue_traffic_class.attr,
1592*4882a593Smuzhiyun #ifdef CONFIG_XPS
1593*4882a593Smuzhiyun 	&xps_cpus_attribute.attr,
1594*4882a593Smuzhiyun 	&xps_rxqs_attribute.attr,
1595*4882a593Smuzhiyun 	&queue_tx_maxrate.attr,
1596*4882a593Smuzhiyun #endif
1597*4882a593Smuzhiyun 	NULL
1598*4882a593Smuzhiyun };
1599*4882a593Smuzhiyun ATTRIBUTE_GROUPS(netdev_queue_default);
1600*4882a593Smuzhiyun 
netdev_queue_release(struct kobject * kobj)1601*4882a593Smuzhiyun static void netdev_queue_release(struct kobject *kobj)
1602*4882a593Smuzhiyun {
1603*4882a593Smuzhiyun 	struct netdev_queue *queue = to_netdev_queue(kobj);
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 	memset(kobj, 0, sizeof(*kobj));
1606*4882a593Smuzhiyun 	dev_put(queue->dev);
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun 
netdev_queue_namespace(struct kobject * kobj)1609*4882a593Smuzhiyun static const void *netdev_queue_namespace(struct kobject *kobj)
1610*4882a593Smuzhiyun {
1611*4882a593Smuzhiyun 	struct netdev_queue *queue = to_netdev_queue(kobj);
1612*4882a593Smuzhiyun 	struct device *dev = &queue->dev->dev;
1613*4882a593Smuzhiyun 	const void *ns = NULL;
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	if (dev->class && dev->class->ns_type)
1616*4882a593Smuzhiyun 		ns = dev->class->namespace(dev);
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	return ns;
1619*4882a593Smuzhiyun }
1620*4882a593Smuzhiyun 
netdev_queue_get_ownership(struct kobject * kobj,kuid_t * uid,kgid_t * gid)1621*4882a593Smuzhiyun static void netdev_queue_get_ownership(struct kobject *kobj,
1622*4882a593Smuzhiyun 				       kuid_t *uid, kgid_t *gid)
1623*4882a593Smuzhiyun {
1624*4882a593Smuzhiyun 	const struct net *net = netdev_queue_namespace(kobj);
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	net_ns_get_ownership(net, uid, gid);
1627*4882a593Smuzhiyun }
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun static struct kobj_type netdev_queue_ktype __ro_after_init = {
1630*4882a593Smuzhiyun 	.sysfs_ops = &netdev_queue_sysfs_ops,
1631*4882a593Smuzhiyun 	.release = netdev_queue_release,
1632*4882a593Smuzhiyun 	.default_groups = netdev_queue_default_groups,
1633*4882a593Smuzhiyun 	.namespace = netdev_queue_namespace,
1634*4882a593Smuzhiyun 	.get_ownership = netdev_queue_get_ownership,
1635*4882a593Smuzhiyun };
1636*4882a593Smuzhiyun 
netdev_queue_add_kobject(struct net_device * dev,int index)1637*4882a593Smuzhiyun static int netdev_queue_add_kobject(struct net_device *dev, int index)
1638*4882a593Smuzhiyun {
1639*4882a593Smuzhiyun 	struct netdev_queue *queue = dev->_tx + index;
1640*4882a593Smuzhiyun 	struct kobject *kobj = &queue->kobj;
1641*4882a593Smuzhiyun 	int error = 0;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	/* Kobject_put later will trigger netdev_queue_release call
1644*4882a593Smuzhiyun 	 * which decreases dev refcount: Take that reference here
1645*4882a593Smuzhiyun 	 */
1646*4882a593Smuzhiyun 	dev_hold(queue->dev);
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	kobj->kset = dev->queues_kset;
1649*4882a593Smuzhiyun 	error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1650*4882a593Smuzhiyun 				     "tx-%u", index);
1651*4882a593Smuzhiyun 	if (error)
1652*4882a593Smuzhiyun 		goto err;
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun #ifdef CONFIG_BQL
1655*4882a593Smuzhiyun 	error = sysfs_create_group(kobj, &dql_group);
1656*4882a593Smuzhiyun 	if (error)
1657*4882a593Smuzhiyun 		goto err;
1658*4882a593Smuzhiyun #endif
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	kobject_uevent(kobj, KOBJ_ADD);
1661*4882a593Smuzhiyun 	return 0;
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun err:
1664*4882a593Smuzhiyun 	kobject_put(kobj);
1665*4882a593Smuzhiyun 	return error;
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun 
tx_queue_change_owner(struct net_device * ndev,int index,kuid_t kuid,kgid_t kgid)1668*4882a593Smuzhiyun static int tx_queue_change_owner(struct net_device *ndev, int index,
1669*4882a593Smuzhiyun 				 kuid_t kuid, kgid_t kgid)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun 	struct netdev_queue *queue = ndev->_tx + index;
1672*4882a593Smuzhiyun 	struct kobject *kobj = &queue->kobj;
1673*4882a593Smuzhiyun 	int error;
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 	error = sysfs_change_owner(kobj, kuid, kgid);
1676*4882a593Smuzhiyun 	if (error)
1677*4882a593Smuzhiyun 		return error;
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun #ifdef CONFIG_BQL
1680*4882a593Smuzhiyun 	error = sysfs_group_change_owner(kobj, &dql_group, kuid, kgid);
1681*4882a593Smuzhiyun #endif
1682*4882a593Smuzhiyun 	return error;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun #endif /* CONFIG_SYSFS */
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun int
netdev_queue_update_kobjects(struct net_device * dev,int old_num,int new_num)1687*4882a593Smuzhiyun netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1688*4882a593Smuzhiyun {
1689*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1690*4882a593Smuzhiyun 	int i;
1691*4882a593Smuzhiyun 	int error = 0;
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 	for (i = old_num; i < new_num; i++) {
1694*4882a593Smuzhiyun 		error = netdev_queue_add_kobject(dev, i);
1695*4882a593Smuzhiyun 		if (error) {
1696*4882a593Smuzhiyun 			new_num = old_num;
1697*4882a593Smuzhiyun 			break;
1698*4882a593Smuzhiyun 		}
1699*4882a593Smuzhiyun 	}
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun 	while (--i >= new_num) {
1702*4882a593Smuzhiyun 		struct netdev_queue *queue = dev->_tx + i;
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun 		if (!refcount_read(&dev_net(dev)->count))
1705*4882a593Smuzhiyun 			queue->kobj.uevent_suppress = 1;
1706*4882a593Smuzhiyun #ifdef CONFIG_BQL
1707*4882a593Smuzhiyun 		sysfs_remove_group(&queue->kobj, &dql_group);
1708*4882a593Smuzhiyun #endif
1709*4882a593Smuzhiyun 		kobject_put(&queue->kobj);
1710*4882a593Smuzhiyun 	}
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 	return error;
1713*4882a593Smuzhiyun #else
1714*4882a593Smuzhiyun 	return 0;
1715*4882a593Smuzhiyun #endif /* CONFIG_SYSFS */
1716*4882a593Smuzhiyun }
1717*4882a593Smuzhiyun 
net_tx_queue_change_owner(struct net_device * dev,int num,kuid_t kuid,kgid_t kgid)1718*4882a593Smuzhiyun static int net_tx_queue_change_owner(struct net_device *dev, int num,
1719*4882a593Smuzhiyun 				     kuid_t kuid, kgid_t kgid)
1720*4882a593Smuzhiyun {
1721*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1722*4882a593Smuzhiyun 	int error = 0;
1723*4882a593Smuzhiyun 	int i;
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 	for (i = 0; i < num; i++) {
1726*4882a593Smuzhiyun 		error = tx_queue_change_owner(dev, i, kuid, kgid);
1727*4882a593Smuzhiyun 		if (error)
1728*4882a593Smuzhiyun 			break;
1729*4882a593Smuzhiyun 	}
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 	return error;
1732*4882a593Smuzhiyun #else
1733*4882a593Smuzhiyun 	return 0;
1734*4882a593Smuzhiyun #endif /* CONFIG_SYSFS */
1735*4882a593Smuzhiyun }
1736*4882a593Smuzhiyun 
register_queue_kobjects(struct net_device * dev)1737*4882a593Smuzhiyun static int register_queue_kobjects(struct net_device *dev)
1738*4882a593Smuzhiyun {
1739*4882a593Smuzhiyun 	int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1742*4882a593Smuzhiyun 	dev->queues_kset = kset_create_and_add("queues",
1743*4882a593Smuzhiyun 					       NULL, &dev->dev.kobj);
1744*4882a593Smuzhiyun 	if (!dev->queues_kset)
1745*4882a593Smuzhiyun 		return -ENOMEM;
1746*4882a593Smuzhiyun 	real_rx = dev->real_num_rx_queues;
1747*4882a593Smuzhiyun #endif
1748*4882a593Smuzhiyun 	real_tx = dev->real_num_tx_queues;
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	error = net_rx_queue_update_kobjects(dev, 0, real_rx);
1751*4882a593Smuzhiyun 	if (error)
1752*4882a593Smuzhiyun 		goto error;
1753*4882a593Smuzhiyun 	rxq = real_rx;
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 	error = netdev_queue_update_kobjects(dev, 0, real_tx);
1756*4882a593Smuzhiyun 	if (error)
1757*4882a593Smuzhiyun 		goto error;
1758*4882a593Smuzhiyun 	txq = real_tx;
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	return 0;
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun error:
1763*4882a593Smuzhiyun 	netdev_queue_update_kobjects(dev, txq, 0);
1764*4882a593Smuzhiyun 	net_rx_queue_update_kobjects(dev, rxq, 0);
1765*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1766*4882a593Smuzhiyun 	kset_unregister(dev->queues_kset);
1767*4882a593Smuzhiyun #endif
1768*4882a593Smuzhiyun 	return error;
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun 
queue_change_owner(struct net_device * ndev,kuid_t kuid,kgid_t kgid)1771*4882a593Smuzhiyun static int queue_change_owner(struct net_device *ndev, kuid_t kuid, kgid_t kgid)
1772*4882a593Smuzhiyun {
1773*4882a593Smuzhiyun 	int error = 0, real_rx = 0, real_tx = 0;
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1776*4882a593Smuzhiyun 	if (ndev->queues_kset) {
1777*4882a593Smuzhiyun 		error = sysfs_change_owner(&ndev->queues_kset->kobj, kuid, kgid);
1778*4882a593Smuzhiyun 		if (error)
1779*4882a593Smuzhiyun 			return error;
1780*4882a593Smuzhiyun 	}
1781*4882a593Smuzhiyun 	real_rx = ndev->real_num_rx_queues;
1782*4882a593Smuzhiyun #endif
1783*4882a593Smuzhiyun 	real_tx = ndev->real_num_tx_queues;
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	error = net_rx_queue_change_owner(ndev, real_rx, kuid, kgid);
1786*4882a593Smuzhiyun 	if (error)
1787*4882a593Smuzhiyun 		return error;
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	error = net_tx_queue_change_owner(ndev, real_tx, kuid, kgid);
1790*4882a593Smuzhiyun 	if (error)
1791*4882a593Smuzhiyun 		return error;
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun 	return 0;
1794*4882a593Smuzhiyun }
1795*4882a593Smuzhiyun 
remove_queue_kobjects(struct net_device * dev)1796*4882a593Smuzhiyun static void remove_queue_kobjects(struct net_device *dev)
1797*4882a593Smuzhiyun {
1798*4882a593Smuzhiyun 	int real_rx = 0, real_tx = 0;
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1801*4882a593Smuzhiyun 	real_rx = dev->real_num_rx_queues;
1802*4882a593Smuzhiyun #endif
1803*4882a593Smuzhiyun 	real_tx = dev->real_num_tx_queues;
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun 	net_rx_queue_update_kobjects(dev, real_rx, 0);
1806*4882a593Smuzhiyun 	netdev_queue_update_kobjects(dev, real_tx, 0);
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 	dev->real_num_rx_queues = 0;
1809*4882a593Smuzhiyun 	dev->real_num_tx_queues = 0;
1810*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1811*4882a593Smuzhiyun 	kset_unregister(dev->queues_kset);
1812*4882a593Smuzhiyun #endif
1813*4882a593Smuzhiyun }
1814*4882a593Smuzhiyun 
net_current_may_mount(void)1815*4882a593Smuzhiyun static bool net_current_may_mount(void)
1816*4882a593Smuzhiyun {
1817*4882a593Smuzhiyun 	struct net *net = current->nsproxy->net_ns;
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 	return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1820*4882a593Smuzhiyun }
1821*4882a593Smuzhiyun 
net_grab_current_ns(void)1822*4882a593Smuzhiyun static void *net_grab_current_ns(void)
1823*4882a593Smuzhiyun {
1824*4882a593Smuzhiyun 	struct net *ns = current->nsproxy->net_ns;
1825*4882a593Smuzhiyun #ifdef CONFIG_NET_NS
1826*4882a593Smuzhiyun 	if (ns)
1827*4882a593Smuzhiyun 		refcount_inc(&ns->passive);
1828*4882a593Smuzhiyun #endif
1829*4882a593Smuzhiyun 	return ns;
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun 
net_initial_ns(void)1832*4882a593Smuzhiyun static const void *net_initial_ns(void)
1833*4882a593Smuzhiyun {
1834*4882a593Smuzhiyun 	return &init_net;
1835*4882a593Smuzhiyun }
1836*4882a593Smuzhiyun 
net_netlink_ns(struct sock * sk)1837*4882a593Smuzhiyun static const void *net_netlink_ns(struct sock *sk)
1838*4882a593Smuzhiyun {
1839*4882a593Smuzhiyun 	return sock_net(sk);
1840*4882a593Smuzhiyun }
1841*4882a593Smuzhiyun 
1842*4882a593Smuzhiyun const struct kobj_ns_type_operations net_ns_type_operations = {
1843*4882a593Smuzhiyun 	.type = KOBJ_NS_TYPE_NET,
1844*4882a593Smuzhiyun 	.current_may_mount = net_current_may_mount,
1845*4882a593Smuzhiyun 	.grab_current_ns = net_grab_current_ns,
1846*4882a593Smuzhiyun 	.netlink_ns = net_netlink_ns,
1847*4882a593Smuzhiyun 	.initial_ns = net_initial_ns,
1848*4882a593Smuzhiyun 	.drop_ns = net_drop_ns,
1849*4882a593Smuzhiyun };
1850*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(net_ns_type_operations);
1851*4882a593Smuzhiyun 
netdev_uevent(struct device * d,struct kobj_uevent_env * env)1852*4882a593Smuzhiyun static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1853*4882a593Smuzhiyun {
1854*4882a593Smuzhiyun 	struct net_device *dev = to_net_dev(d);
1855*4882a593Smuzhiyun 	int retval;
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun 	/* pass interface to uevent. */
1858*4882a593Smuzhiyun 	retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1859*4882a593Smuzhiyun 	if (retval)
1860*4882a593Smuzhiyun 		goto exit;
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 	/* pass ifindex to uevent.
1863*4882a593Smuzhiyun 	 * ifindex is useful as it won't change (interface name may change)
1864*4882a593Smuzhiyun 	 * and is what RtNetlink uses natively.
1865*4882a593Smuzhiyun 	 */
1866*4882a593Smuzhiyun 	retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun exit:
1869*4882a593Smuzhiyun 	return retval;
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun /*
1873*4882a593Smuzhiyun  *	netdev_release -- destroy and free a dead device.
1874*4882a593Smuzhiyun  *	Called when last reference to device kobject is gone.
1875*4882a593Smuzhiyun  */
netdev_release(struct device * d)1876*4882a593Smuzhiyun static void netdev_release(struct device *d)
1877*4882a593Smuzhiyun {
1878*4882a593Smuzhiyun 	struct net_device *dev = to_net_dev(d);
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 	BUG_ON(dev->reg_state != NETREG_RELEASED);
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	/* no need to wait for rcu grace period:
1883*4882a593Smuzhiyun 	 * device is dead and about to be freed.
1884*4882a593Smuzhiyun 	 */
1885*4882a593Smuzhiyun 	kfree(rcu_access_pointer(dev->ifalias));
1886*4882a593Smuzhiyun 	netdev_freemem(dev);
1887*4882a593Smuzhiyun }
1888*4882a593Smuzhiyun 
net_namespace(struct device * d)1889*4882a593Smuzhiyun static const void *net_namespace(struct device *d)
1890*4882a593Smuzhiyun {
1891*4882a593Smuzhiyun 	struct net_device *dev = to_net_dev(d);
1892*4882a593Smuzhiyun 
1893*4882a593Smuzhiyun 	return dev_net(dev);
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun 
net_get_ownership(struct device * d,kuid_t * uid,kgid_t * gid)1896*4882a593Smuzhiyun static void net_get_ownership(struct device *d, kuid_t *uid, kgid_t *gid)
1897*4882a593Smuzhiyun {
1898*4882a593Smuzhiyun 	struct net_device *dev = to_net_dev(d);
1899*4882a593Smuzhiyun 	const struct net *net = dev_net(dev);
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 	net_ns_get_ownership(net, uid, gid);
1902*4882a593Smuzhiyun }
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun static struct class net_class __ro_after_init = {
1905*4882a593Smuzhiyun 	.name = "net",
1906*4882a593Smuzhiyun 	.dev_release = netdev_release,
1907*4882a593Smuzhiyun 	.dev_groups = net_class_groups,
1908*4882a593Smuzhiyun 	.dev_uevent = netdev_uevent,
1909*4882a593Smuzhiyun 	.ns_type = &net_ns_type_operations,
1910*4882a593Smuzhiyun 	.namespace = net_namespace,
1911*4882a593Smuzhiyun 	.get_ownership = net_get_ownership,
1912*4882a593Smuzhiyun };
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun #ifdef CONFIG_OF_NET
of_dev_node_match(struct device * dev,const void * data)1915*4882a593Smuzhiyun static int of_dev_node_match(struct device *dev, const void *data)
1916*4882a593Smuzhiyun {
1917*4882a593Smuzhiyun 	for (; dev; dev = dev->parent) {
1918*4882a593Smuzhiyun 		if (dev->of_node == data)
1919*4882a593Smuzhiyun 			return 1;
1920*4882a593Smuzhiyun 	}
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun 	return 0;
1923*4882a593Smuzhiyun }
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun /*
1926*4882a593Smuzhiyun  * of_find_net_device_by_node - lookup the net device for the device node
1927*4882a593Smuzhiyun  * @np: OF device node
1928*4882a593Smuzhiyun  *
1929*4882a593Smuzhiyun  * Looks up the net_device structure corresponding with the device node.
1930*4882a593Smuzhiyun  * If successful, returns a pointer to the net_device with the embedded
1931*4882a593Smuzhiyun  * struct device refcount incremented by one, or NULL on failure. The
1932*4882a593Smuzhiyun  * refcount must be dropped when done with the net_device.
1933*4882a593Smuzhiyun  */
of_find_net_device_by_node(struct device_node * np)1934*4882a593Smuzhiyun struct net_device *of_find_net_device_by_node(struct device_node *np)
1935*4882a593Smuzhiyun {
1936*4882a593Smuzhiyun 	struct device *dev;
1937*4882a593Smuzhiyun 
1938*4882a593Smuzhiyun 	dev = class_find_device(&net_class, NULL, np, of_dev_node_match);
1939*4882a593Smuzhiyun 	if (!dev)
1940*4882a593Smuzhiyun 		return NULL;
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun 	return to_net_dev(dev);
1943*4882a593Smuzhiyun }
1944*4882a593Smuzhiyun EXPORT_SYMBOL(of_find_net_device_by_node);
1945*4882a593Smuzhiyun #endif
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun /* Delete sysfs entries but hold kobject reference until after all
1948*4882a593Smuzhiyun  * netdev references are gone.
1949*4882a593Smuzhiyun  */
netdev_unregister_kobject(struct net_device * ndev)1950*4882a593Smuzhiyun void netdev_unregister_kobject(struct net_device *ndev)
1951*4882a593Smuzhiyun {
1952*4882a593Smuzhiyun 	struct device *dev = &ndev->dev;
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	if (!refcount_read(&dev_net(ndev)->count))
1955*4882a593Smuzhiyun 		dev_set_uevent_suppress(dev, 1);
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 	kobject_get(&dev->kobj);
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun 	remove_queue_kobjects(ndev);
1960*4882a593Smuzhiyun 
1961*4882a593Smuzhiyun 	pm_runtime_set_memalloc_noio(dev, false);
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 	device_del(dev);
1964*4882a593Smuzhiyun }
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun /* Create sysfs entries for network device. */
netdev_register_kobject(struct net_device * ndev)1967*4882a593Smuzhiyun int netdev_register_kobject(struct net_device *ndev)
1968*4882a593Smuzhiyun {
1969*4882a593Smuzhiyun 	struct device *dev = &ndev->dev;
1970*4882a593Smuzhiyun 	const struct attribute_group **groups = ndev->sysfs_groups;
1971*4882a593Smuzhiyun 	int error = 0;
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun 	device_initialize(dev);
1974*4882a593Smuzhiyun 	dev->class = &net_class;
1975*4882a593Smuzhiyun 	dev->platform_data = ndev;
1976*4882a593Smuzhiyun 	dev->groups = groups;
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 	dev_set_name(dev, "%s", ndev->name);
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
1981*4882a593Smuzhiyun 	/* Allow for a device specific group */
1982*4882a593Smuzhiyun 	if (*groups)
1983*4882a593Smuzhiyun 		groups++;
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 	*groups++ = &netstat_group;
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1988*4882a593Smuzhiyun 	if (ndev->ieee80211_ptr)
1989*4882a593Smuzhiyun 		*groups++ = &wireless_group;
1990*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1991*4882a593Smuzhiyun 	else if (ndev->wireless_handlers)
1992*4882a593Smuzhiyun 		*groups++ = &wireless_group;
1993*4882a593Smuzhiyun #endif
1994*4882a593Smuzhiyun #endif
1995*4882a593Smuzhiyun #endif /* CONFIG_SYSFS */
1996*4882a593Smuzhiyun 
1997*4882a593Smuzhiyun 	error = device_add(dev);
1998*4882a593Smuzhiyun 	if (error)
1999*4882a593Smuzhiyun 		return error;
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	error = register_queue_kobjects(ndev);
2002*4882a593Smuzhiyun 	if (error) {
2003*4882a593Smuzhiyun 		device_del(dev);
2004*4882a593Smuzhiyun 		return error;
2005*4882a593Smuzhiyun 	}
2006*4882a593Smuzhiyun 
2007*4882a593Smuzhiyun 	pm_runtime_set_memalloc_noio(dev, true);
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 	return error;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun /* Change owner for sysfs entries when moving network devices across network
2013*4882a593Smuzhiyun  * namespaces owned by different user namespaces.
2014*4882a593Smuzhiyun  */
netdev_change_owner(struct net_device * ndev,const struct net * net_old,const struct net * net_new)2015*4882a593Smuzhiyun int netdev_change_owner(struct net_device *ndev, const struct net *net_old,
2016*4882a593Smuzhiyun 			const struct net *net_new)
2017*4882a593Smuzhiyun {
2018*4882a593Smuzhiyun 	kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID;
2019*4882a593Smuzhiyun 	kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID;
2020*4882a593Smuzhiyun 	struct device *dev = &ndev->dev;
2021*4882a593Smuzhiyun 	int error;
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun 	net_ns_get_ownership(net_old, &old_uid, &old_gid);
2024*4882a593Smuzhiyun 	net_ns_get_ownership(net_new, &new_uid, &new_gid);
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 	/* The network namespace was changed but the owning user namespace is
2027*4882a593Smuzhiyun 	 * identical so there's no need to change the owner of sysfs entries.
2028*4882a593Smuzhiyun 	 */
2029*4882a593Smuzhiyun 	if (uid_eq(old_uid, new_uid) && gid_eq(old_gid, new_gid))
2030*4882a593Smuzhiyun 		return 0;
2031*4882a593Smuzhiyun 
2032*4882a593Smuzhiyun 	error = device_change_owner(dev, new_uid, new_gid);
2033*4882a593Smuzhiyun 	if (error)
2034*4882a593Smuzhiyun 		return error;
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 	error = queue_change_owner(ndev, new_uid, new_gid);
2037*4882a593Smuzhiyun 	if (error)
2038*4882a593Smuzhiyun 		return error;
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 	return 0;
2041*4882a593Smuzhiyun }
2042*4882a593Smuzhiyun 
netdev_class_create_file_ns(const struct class_attribute * class_attr,const void * ns)2043*4882a593Smuzhiyun int netdev_class_create_file_ns(const struct class_attribute *class_attr,
2044*4882a593Smuzhiyun 				const void *ns)
2045*4882a593Smuzhiyun {
2046*4882a593Smuzhiyun 	return class_create_file_ns(&net_class, class_attr, ns);
2047*4882a593Smuzhiyun }
2048*4882a593Smuzhiyun EXPORT_SYMBOL(netdev_class_create_file_ns);
2049*4882a593Smuzhiyun 
netdev_class_remove_file_ns(const struct class_attribute * class_attr,const void * ns)2050*4882a593Smuzhiyun void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
2051*4882a593Smuzhiyun 				 const void *ns)
2052*4882a593Smuzhiyun {
2053*4882a593Smuzhiyun 	class_remove_file_ns(&net_class, class_attr, ns);
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun EXPORT_SYMBOL(netdev_class_remove_file_ns);
2056*4882a593Smuzhiyun 
netdev_kobject_init(void)2057*4882a593Smuzhiyun int __init netdev_kobject_init(void)
2058*4882a593Smuzhiyun {
2059*4882a593Smuzhiyun 	kobj_ns_type_register(&net_ns_type_operations);
2060*4882a593Smuzhiyun 	return class_register(&net_class);
2061*4882a593Smuzhiyun }
2062