1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * INET An implementation of the TCP/IP protocol suite for the LINUX
4*4882a593Smuzhiyun * operating system. INET is implemented using the BSD Socket
5*4882a593Smuzhiyun * interface as the means of communication with the user level.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Definitions for the Interfaces handler.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Version: @(#)dev.h 1.0.10 08/12/93
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Authors: Ross Biro
12*4882a593Smuzhiyun * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13*4882a593Smuzhiyun * Corey Minyard <wf-rch!minyard@relay.EU.net>
14*4882a593Smuzhiyun * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
15*4882a593Smuzhiyun * Alan Cox, <alan@lxorguk.ukuu.org.uk>
16*4882a593Smuzhiyun * Bjorn Ekwall. <bj0rn@blox.se>
17*4882a593Smuzhiyun * Pekka Riikonen <priikone@poseidon.pspt.fi>
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Moved to /usr/include/linux for NET3
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun #ifndef _LINUX_NETDEVICE_H
22*4882a593Smuzhiyun #define _LINUX_NETDEVICE_H
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/timer.h>
25*4882a593Smuzhiyun #include <linux/bug.h>
26*4882a593Smuzhiyun #include <linux/delay.h>
27*4882a593Smuzhiyun #include <linux/atomic.h>
28*4882a593Smuzhiyun #include <linux/prefetch.h>
29*4882a593Smuzhiyun #include <asm/cache.h>
30*4882a593Smuzhiyun #include <asm/byteorder.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include <linux/percpu.h>
33*4882a593Smuzhiyun #include <linux/rculist.h>
34*4882a593Smuzhiyun #include <linux/workqueue.h>
35*4882a593Smuzhiyun #include <linux/dynamic_queue_limits.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include <linux/ethtool.h>
38*4882a593Smuzhiyun #include <net/net_namespace.h>
39*4882a593Smuzhiyun #ifdef CONFIG_DCB
40*4882a593Smuzhiyun #include <net/dcbnl.h>
41*4882a593Smuzhiyun #endif
42*4882a593Smuzhiyun #include <net/netprio_cgroup.h>
43*4882a593Smuzhiyun #include <net/xdp.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include <linux/netdev_features.h>
46*4882a593Smuzhiyun #include <linux/neighbour.h>
47*4882a593Smuzhiyun #include <uapi/linux/netdevice.h>
48*4882a593Smuzhiyun #include <uapi/linux/if_bonding.h>
49*4882a593Smuzhiyun #include <uapi/linux/pkt_cls.h>
50*4882a593Smuzhiyun #include <linux/hashtable.h>
51*4882a593Smuzhiyun #include <linux/android_kabi.h>
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct netpoll_info;
54*4882a593Smuzhiyun struct device;
55*4882a593Smuzhiyun struct phy_device;
56*4882a593Smuzhiyun struct dsa_port;
57*4882a593Smuzhiyun struct ip_tunnel_parm;
58*4882a593Smuzhiyun struct macsec_context;
59*4882a593Smuzhiyun struct macsec_ops;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun struct sfp_bus;
62*4882a593Smuzhiyun /* 802.11 specific */
63*4882a593Smuzhiyun struct wireless_dev;
64*4882a593Smuzhiyun /* 802.15.4 specific */
65*4882a593Smuzhiyun struct wpan_dev;
66*4882a593Smuzhiyun struct mpls_dev;
67*4882a593Smuzhiyun /* UDP Tunnel offloads */
68*4882a593Smuzhiyun struct udp_tunnel_info;
69*4882a593Smuzhiyun struct udp_tunnel_nic_info;
70*4882a593Smuzhiyun struct udp_tunnel_nic;
71*4882a593Smuzhiyun struct bpf_prog;
72*4882a593Smuzhiyun struct xdp_buff;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun void synchronize_net(void);
75*4882a593Smuzhiyun void netdev_set_default_ethtool_ops(struct net_device *dev,
76*4882a593Smuzhiyun const struct ethtool_ops *ops);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Backlog congestion levels */
79*4882a593Smuzhiyun #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
80*4882a593Smuzhiyun #define NET_RX_DROP 1 /* packet dropped */
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define MAX_NEST_DEV 8
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * Transmit return codes: transmit return codes originate from three different
86*4882a593Smuzhiyun * namespaces:
87*4882a593Smuzhiyun *
88*4882a593Smuzhiyun * - qdisc return codes
89*4882a593Smuzhiyun * - driver transmit return codes
90*4882a593Smuzhiyun * - errno values
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * Drivers are allowed to return any one of those in their hard_start_xmit()
93*4882a593Smuzhiyun * function. Real network devices commonly used with qdiscs should only return
94*4882a593Smuzhiyun * the driver transmit return codes though - when qdiscs are used, the actual
95*4882a593Smuzhiyun * transmission happens asynchronously, so the value is not propagated to
96*4882a593Smuzhiyun * higher layers. Virtual network devices transmit synchronously; in this case
97*4882a593Smuzhiyun * the driver transmit return codes are consumed by dev_queue_xmit(), and all
98*4882a593Smuzhiyun * others are propagated to higher layers.
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* qdisc ->enqueue() return codes. */
102*4882a593Smuzhiyun #define NET_XMIT_SUCCESS 0x00
103*4882a593Smuzhiyun #define NET_XMIT_DROP 0x01 /* skb dropped */
104*4882a593Smuzhiyun #define NET_XMIT_CN 0x02 /* congestion notification */
105*4882a593Smuzhiyun #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
108*4882a593Smuzhiyun * indicates that the device will soon be dropping packets, or already drops
109*4882a593Smuzhiyun * some packets of the same priority; prompting us to send less aggressively. */
110*4882a593Smuzhiyun #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
111*4882a593Smuzhiyun #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Driver transmit return codes */
114*4882a593Smuzhiyun #define NETDEV_TX_MASK 0xf0
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun enum netdev_tx {
117*4882a593Smuzhiyun __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
118*4882a593Smuzhiyun NETDEV_TX_OK = 0x00, /* driver took care of packet */
119*4882a593Smuzhiyun NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun typedef enum netdev_tx netdev_tx_t;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
125*4882a593Smuzhiyun * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
126*4882a593Smuzhiyun */
dev_xmit_complete(int rc)127*4882a593Smuzhiyun static inline bool dev_xmit_complete(int rc)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * Positive cases with an skb consumed by a driver:
131*4882a593Smuzhiyun * - successful transmission (rc == NETDEV_TX_OK)
132*4882a593Smuzhiyun * - error while transmitting (rc < 0)
133*4882a593Smuzhiyun * - error while queueing to a different device (rc & NET_XMIT_MASK)
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun if (likely(rc < NET_XMIT_MASK))
136*4882a593Smuzhiyun return true;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun return false;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * Compute the worst-case header length according to the protocols
143*4882a593Smuzhiyun * used.
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #if defined(CONFIG_HYPERV_NET)
147*4882a593Smuzhiyun # define LL_MAX_HEADER 128
148*4882a593Smuzhiyun #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
149*4882a593Smuzhiyun # if defined(CONFIG_MAC80211_MESH)
150*4882a593Smuzhiyun # define LL_MAX_HEADER 128
151*4882a593Smuzhiyun # else
152*4882a593Smuzhiyun # define LL_MAX_HEADER 96
153*4882a593Smuzhiyun # endif
154*4882a593Smuzhiyun #else
155*4882a593Smuzhiyun # define LL_MAX_HEADER 32
156*4882a593Smuzhiyun #endif
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
159*4882a593Smuzhiyun !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
160*4882a593Smuzhiyun #define MAX_HEADER LL_MAX_HEADER
161*4882a593Smuzhiyun #else
162*4882a593Smuzhiyun #define MAX_HEADER (LL_MAX_HEADER + 48)
163*4882a593Smuzhiyun #endif
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun * Old network device statistics. Fields are native words
167*4882a593Smuzhiyun * (unsigned long) so they can be read and written atomically.
168*4882a593Smuzhiyun */
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun struct net_device_stats {
171*4882a593Smuzhiyun unsigned long rx_packets;
172*4882a593Smuzhiyun unsigned long tx_packets;
173*4882a593Smuzhiyun unsigned long rx_bytes;
174*4882a593Smuzhiyun unsigned long tx_bytes;
175*4882a593Smuzhiyun unsigned long rx_errors;
176*4882a593Smuzhiyun unsigned long tx_errors;
177*4882a593Smuzhiyun unsigned long rx_dropped;
178*4882a593Smuzhiyun unsigned long tx_dropped;
179*4882a593Smuzhiyun unsigned long multicast;
180*4882a593Smuzhiyun unsigned long collisions;
181*4882a593Smuzhiyun unsigned long rx_length_errors;
182*4882a593Smuzhiyun unsigned long rx_over_errors;
183*4882a593Smuzhiyun unsigned long rx_crc_errors;
184*4882a593Smuzhiyun unsigned long rx_frame_errors;
185*4882a593Smuzhiyun unsigned long rx_fifo_errors;
186*4882a593Smuzhiyun unsigned long rx_missed_errors;
187*4882a593Smuzhiyun unsigned long tx_aborted_errors;
188*4882a593Smuzhiyun unsigned long tx_carrier_errors;
189*4882a593Smuzhiyun unsigned long tx_fifo_errors;
190*4882a593Smuzhiyun unsigned long tx_heartbeat_errors;
191*4882a593Smuzhiyun unsigned long tx_window_errors;
192*4882a593Smuzhiyun unsigned long rx_compressed;
193*4882a593Smuzhiyun unsigned long tx_compressed;
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun #include <linux/cache.h>
198*4882a593Smuzhiyun #include <linux/skbuff.h>
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun #ifdef CONFIG_RPS
201*4882a593Smuzhiyun #include <linux/static_key.h>
202*4882a593Smuzhiyun extern struct static_key_false rps_needed;
203*4882a593Smuzhiyun extern struct static_key_false rfs_needed;
204*4882a593Smuzhiyun #endif
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun struct neighbour;
207*4882a593Smuzhiyun struct neigh_parms;
208*4882a593Smuzhiyun struct sk_buff;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun struct netdev_hw_addr {
211*4882a593Smuzhiyun struct list_head list;
212*4882a593Smuzhiyun unsigned char addr[MAX_ADDR_LEN];
213*4882a593Smuzhiyun unsigned char type;
214*4882a593Smuzhiyun #define NETDEV_HW_ADDR_T_LAN 1
215*4882a593Smuzhiyun #define NETDEV_HW_ADDR_T_SAN 2
216*4882a593Smuzhiyun #define NETDEV_HW_ADDR_T_UNICAST 3
217*4882a593Smuzhiyun #define NETDEV_HW_ADDR_T_MULTICAST 4
218*4882a593Smuzhiyun bool global_use;
219*4882a593Smuzhiyun int sync_cnt;
220*4882a593Smuzhiyun int refcount;
221*4882a593Smuzhiyun int synced;
222*4882a593Smuzhiyun struct rcu_head rcu_head;
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun struct netdev_hw_addr_list {
226*4882a593Smuzhiyun struct list_head list;
227*4882a593Smuzhiyun int count;
228*4882a593Smuzhiyun };
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun #define netdev_hw_addr_list_count(l) ((l)->count)
231*4882a593Smuzhiyun #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
232*4882a593Smuzhiyun #define netdev_hw_addr_list_for_each(ha, l) \
233*4882a593Smuzhiyun list_for_each_entry(ha, &(l)->list, list)
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
236*4882a593Smuzhiyun #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
237*4882a593Smuzhiyun #define netdev_for_each_uc_addr(ha, dev) \
238*4882a593Smuzhiyun netdev_hw_addr_list_for_each(ha, &(dev)->uc)
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
241*4882a593Smuzhiyun #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
242*4882a593Smuzhiyun #define netdev_for_each_mc_addr(ha, dev) \
243*4882a593Smuzhiyun netdev_hw_addr_list_for_each(ha, &(dev)->mc)
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun struct hh_cache {
246*4882a593Smuzhiyun unsigned int hh_len;
247*4882a593Smuzhiyun seqlock_t hh_lock;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* cached hardware header; allow for machine alignment needs. */
250*4882a593Smuzhiyun #define HH_DATA_MOD 16
251*4882a593Smuzhiyun #define HH_DATA_OFF(__len) \
252*4882a593Smuzhiyun (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
253*4882a593Smuzhiyun #define HH_DATA_ALIGN(__len) \
254*4882a593Smuzhiyun (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
255*4882a593Smuzhiyun unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
256*4882a593Smuzhiyun };
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
259*4882a593Smuzhiyun * Alternative is:
260*4882a593Smuzhiyun * dev->hard_header_len ? (dev->hard_header_len +
261*4882a593Smuzhiyun * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
262*4882a593Smuzhiyun *
263*4882a593Smuzhiyun * We could use other alignment values, but we must maintain the
264*4882a593Smuzhiyun * relationship HH alignment <= LL alignment.
265*4882a593Smuzhiyun */
266*4882a593Smuzhiyun #define LL_RESERVED_SPACE(dev) \
267*4882a593Smuzhiyun ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
268*4882a593Smuzhiyun #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
269*4882a593Smuzhiyun ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun struct header_ops {
272*4882a593Smuzhiyun int (*create) (struct sk_buff *skb, struct net_device *dev,
273*4882a593Smuzhiyun unsigned short type, const void *daddr,
274*4882a593Smuzhiyun const void *saddr, unsigned int len);
275*4882a593Smuzhiyun int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
276*4882a593Smuzhiyun int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
277*4882a593Smuzhiyun void (*cache_update)(struct hh_cache *hh,
278*4882a593Smuzhiyun const struct net_device *dev,
279*4882a593Smuzhiyun const unsigned char *haddr);
280*4882a593Smuzhiyun bool (*validate)(const char *ll_header, unsigned int len);
281*4882a593Smuzhiyun __be16 (*parse_protocol)(const struct sk_buff *skb);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
284*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
285*4882a593Smuzhiyun };
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* These flag bits are private to the generic network queueing
288*4882a593Smuzhiyun * layer; they may not be explicitly referenced by any other
289*4882a593Smuzhiyun * code.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun enum netdev_state_t {
293*4882a593Smuzhiyun __LINK_STATE_START,
294*4882a593Smuzhiyun __LINK_STATE_PRESENT,
295*4882a593Smuzhiyun __LINK_STATE_NOCARRIER,
296*4882a593Smuzhiyun __LINK_STATE_LINKWATCH_PENDING,
297*4882a593Smuzhiyun __LINK_STATE_DORMANT,
298*4882a593Smuzhiyun __LINK_STATE_TESTING,
299*4882a593Smuzhiyun };
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun * This structure holds boot-time configured netdevice settings. They
304*4882a593Smuzhiyun * are then used in the device probing.
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun struct netdev_boot_setup {
307*4882a593Smuzhiyun char name[IFNAMSIZ];
308*4882a593Smuzhiyun struct ifmap map;
309*4882a593Smuzhiyun };
310*4882a593Smuzhiyun #define NETDEV_BOOT_SETUP_MAX 8
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun int __init netdev_boot_setup(char *str);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun struct gro_list {
315*4882a593Smuzhiyun struct list_head list;
316*4882a593Smuzhiyun int count;
317*4882a593Smuzhiyun };
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun * size of gro hash buckets, must less than bit number of
321*4882a593Smuzhiyun * napi_struct::gro_bitmask
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun #define GRO_HASH_BUCKETS 8
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun * Structure for NAPI scheduling similar to tasklet but with weighting
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun struct napi_struct {
329*4882a593Smuzhiyun /* The poll_list must only be managed by the entity which
330*4882a593Smuzhiyun * changes the state of the NAPI_STATE_SCHED bit. This means
331*4882a593Smuzhiyun * whoever atomically sets that bit can add this napi_struct
332*4882a593Smuzhiyun * to the per-CPU poll_list, and whoever clears that bit
333*4882a593Smuzhiyun * can remove from the list right before clearing the bit.
334*4882a593Smuzhiyun */
335*4882a593Smuzhiyun struct list_head poll_list;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun unsigned long state;
338*4882a593Smuzhiyun int weight;
339*4882a593Smuzhiyun int defer_hard_irqs_count;
340*4882a593Smuzhiyun unsigned long gro_bitmask;
341*4882a593Smuzhiyun int (*poll)(struct napi_struct *, int);
342*4882a593Smuzhiyun #ifdef CONFIG_NETPOLL
343*4882a593Smuzhiyun int poll_owner;
344*4882a593Smuzhiyun #endif
345*4882a593Smuzhiyun struct net_device *dev;
346*4882a593Smuzhiyun struct gro_list gro_hash[GRO_HASH_BUCKETS];
347*4882a593Smuzhiyun struct sk_buff *skb;
348*4882a593Smuzhiyun struct list_head rx_list; /* Pending GRO_NORMAL skbs */
349*4882a593Smuzhiyun int rx_count; /* length of rx_list */
350*4882a593Smuzhiyun struct hrtimer timer;
351*4882a593Smuzhiyun struct list_head dev_list;
352*4882a593Smuzhiyun struct hlist_node napi_hash_node;
353*4882a593Smuzhiyun unsigned int napi_id;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
356*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
357*4882a593Smuzhiyun ANDROID_KABI_RESERVE(3);
358*4882a593Smuzhiyun ANDROID_KABI_RESERVE(4);
359*4882a593Smuzhiyun };
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun enum {
362*4882a593Smuzhiyun NAPI_STATE_SCHED, /* Poll is scheduled */
363*4882a593Smuzhiyun NAPI_STATE_MISSED, /* reschedule a napi */
364*4882a593Smuzhiyun NAPI_STATE_DISABLE, /* Disable pending */
365*4882a593Smuzhiyun NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
366*4882a593Smuzhiyun NAPI_STATE_LISTED, /* NAPI added to system lists */
367*4882a593Smuzhiyun NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
368*4882a593Smuzhiyun NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
369*4882a593Smuzhiyun };
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun enum {
372*4882a593Smuzhiyun NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED),
373*4882a593Smuzhiyun NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
374*4882a593Smuzhiyun NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
375*4882a593Smuzhiyun NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
376*4882a593Smuzhiyun NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
377*4882a593Smuzhiyun NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
378*4882a593Smuzhiyun NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
379*4882a593Smuzhiyun };
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun enum gro_result {
382*4882a593Smuzhiyun GRO_MERGED,
383*4882a593Smuzhiyun GRO_MERGED_FREE,
384*4882a593Smuzhiyun GRO_HELD,
385*4882a593Smuzhiyun GRO_NORMAL,
386*4882a593Smuzhiyun GRO_DROP,
387*4882a593Smuzhiyun GRO_CONSUMED,
388*4882a593Smuzhiyun };
389*4882a593Smuzhiyun typedef enum gro_result gro_result_t;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /*
392*4882a593Smuzhiyun * enum rx_handler_result - Possible return values for rx_handlers.
393*4882a593Smuzhiyun * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
394*4882a593Smuzhiyun * further.
395*4882a593Smuzhiyun * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
396*4882a593Smuzhiyun * case skb->dev was changed by rx_handler.
397*4882a593Smuzhiyun * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
398*4882a593Smuzhiyun * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
399*4882a593Smuzhiyun *
400*4882a593Smuzhiyun * rx_handlers are functions called from inside __netif_receive_skb(), to do
401*4882a593Smuzhiyun * special processing of the skb, prior to delivery to protocol handlers.
402*4882a593Smuzhiyun *
403*4882a593Smuzhiyun * Currently, a net_device can only have a single rx_handler registered. Trying
404*4882a593Smuzhiyun * to register a second rx_handler will return -EBUSY.
405*4882a593Smuzhiyun *
406*4882a593Smuzhiyun * To register a rx_handler on a net_device, use netdev_rx_handler_register().
407*4882a593Smuzhiyun * To unregister a rx_handler on a net_device, use
408*4882a593Smuzhiyun * netdev_rx_handler_unregister().
409*4882a593Smuzhiyun *
410*4882a593Smuzhiyun * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
411*4882a593Smuzhiyun * do with the skb.
412*4882a593Smuzhiyun *
413*4882a593Smuzhiyun * If the rx_handler consumed the skb in some way, it should return
414*4882a593Smuzhiyun * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
415*4882a593Smuzhiyun * the skb to be delivered in some other way.
416*4882a593Smuzhiyun *
417*4882a593Smuzhiyun * If the rx_handler changed skb->dev, to divert the skb to another
418*4882a593Smuzhiyun * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
419*4882a593Smuzhiyun * new device will be called if it exists.
420*4882a593Smuzhiyun *
421*4882a593Smuzhiyun * If the rx_handler decides the skb should be ignored, it should return
422*4882a593Smuzhiyun * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
423*4882a593Smuzhiyun * are registered on exact device (ptype->dev == skb->dev).
424*4882a593Smuzhiyun *
425*4882a593Smuzhiyun * If the rx_handler didn't change skb->dev, but wants the skb to be normally
426*4882a593Smuzhiyun * delivered, it should return RX_HANDLER_PASS.
427*4882a593Smuzhiyun *
428*4882a593Smuzhiyun * A device without a registered rx_handler will behave as if rx_handler
429*4882a593Smuzhiyun * returned RX_HANDLER_PASS.
430*4882a593Smuzhiyun */
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun enum rx_handler_result {
433*4882a593Smuzhiyun RX_HANDLER_CONSUMED,
434*4882a593Smuzhiyun RX_HANDLER_ANOTHER,
435*4882a593Smuzhiyun RX_HANDLER_EXACT,
436*4882a593Smuzhiyun RX_HANDLER_PASS,
437*4882a593Smuzhiyun };
438*4882a593Smuzhiyun typedef enum rx_handler_result rx_handler_result_t;
439*4882a593Smuzhiyun typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun void __napi_schedule(struct napi_struct *n);
442*4882a593Smuzhiyun void __napi_schedule_irqoff(struct napi_struct *n);
443*4882a593Smuzhiyun
napi_disable_pending(struct napi_struct * n)444*4882a593Smuzhiyun static inline bool napi_disable_pending(struct napi_struct *n)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun return test_bit(NAPI_STATE_DISABLE, &n->state);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun bool napi_schedule_prep(struct napi_struct *n);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /**
452*4882a593Smuzhiyun * napi_schedule - schedule NAPI poll
453*4882a593Smuzhiyun * @n: NAPI context
454*4882a593Smuzhiyun *
455*4882a593Smuzhiyun * Schedule NAPI poll routine to be called if it is not already
456*4882a593Smuzhiyun * running.
457*4882a593Smuzhiyun */
napi_schedule(struct napi_struct * n)458*4882a593Smuzhiyun static inline void napi_schedule(struct napi_struct *n)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun if (napi_schedule_prep(n))
461*4882a593Smuzhiyun __napi_schedule(n);
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /**
465*4882a593Smuzhiyun * napi_schedule_irqoff - schedule NAPI poll
466*4882a593Smuzhiyun * @n: NAPI context
467*4882a593Smuzhiyun *
468*4882a593Smuzhiyun * Variant of napi_schedule(), assuming hard irqs are masked.
469*4882a593Smuzhiyun */
napi_schedule_irqoff(struct napi_struct * n)470*4882a593Smuzhiyun static inline void napi_schedule_irqoff(struct napi_struct *n)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun if (napi_schedule_prep(n))
473*4882a593Smuzhiyun __napi_schedule_irqoff(n);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
napi_reschedule(struct napi_struct * napi)477*4882a593Smuzhiyun static inline bool napi_reschedule(struct napi_struct *napi)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun if (napi_schedule_prep(napi)) {
480*4882a593Smuzhiyun __napi_schedule(napi);
481*4882a593Smuzhiyun return true;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun return false;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun bool napi_complete_done(struct napi_struct *n, int work_done);
487*4882a593Smuzhiyun /**
488*4882a593Smuzhiyun * napi_complete - NAPI processing complete
489*4882a593Smuzhiyun * @n: NAPI context
490*4882a593Smuzhiyun *
491*4882a593Smuzhiyun * Mark NAPI processing as complete.
492*4882a593Smuzhiyun * Consider using napi_complete_done() instead.
493*4882a593Smuzhiyun * Return false if device should avoid rearming interrupts.
494*4882a593Smuzhiyun */
napi_complete(struct napi_struct * n)495*4882a593Smuzhiyun static inline bool napi_complete(struct napi_struct *n)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun return napi_complete_done(n, 0);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /**
501*4882a593Smuzhiyun * napi_disable - prevent NAPI from scheduling
502*4882a593Smuzhiyun * @n: NAPI context
503*4882a593Smuzhiyun *
504*4882a593Smuzhiyun * Stop NAPI from being scheduled on this context.
505*4882a593Smuzhiyun * Waits till any outstanding processing completes.
506*4882a593Smuzhiyun */
507*4882a593Smuzhiyun void napi_disable(struct napi_struct *n);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /**
510*4882a593Smuzhiyun * napi_enable - enable NAPI scheduling
511*4882a593Smuzhiyun * @n: NAPI context
512*4882a593Smuzhiyun *
513*4882a593Smuzhiyun * Resume NAPI from being scheduled on this context.
514*4882a593Smuzhiyun * Must be paired with napi_disable.
515*4882a593Smuzhiyun */
napi_enable(struct napi_struct * n)516*4882a593Smuzhiyun static inline void napi_enable(struct napi_struct *n)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
519*4882a593Smuzhiyun smp_mb__before_atomic();
520*4882a593Smuzhiyun clear_bit(NAPI_STATE_SCHED, &n->state);
521*4882a593Smuzhiyun clear_bit(NAPI_STATE_NPSVC, &n->state);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /**
525*4882a593Smuzhiyun * napi_synchronize - wait until NAPI is not running
526*4882a593Smuzhiyun * @n: NAPI context
527*4882a593Smuzhiyun *
528*4882a593Smuzhiyun * Wait until NAPI is done being scheduled on this context.
529*4882a593Smuzhiyun * Waits till any outstanding processing completes but
530*4882a593Smuzhiyun * does not disable future activations.
531*4882a593Smuzhiyun */
napi_synchronize(const struct napi_struct * n)532*4882a593Smuzhiyun static inline void napi_synchronize(const struct napi_struct *n)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_SMP))
535*4882a593Smuzhiyun while (test_bit(NAPI_STATE_SCHED, &n->state))
536*4882a593Smuzhiyun msleep(1);
537*4882a593Smuzhiyun else
538*4882a593Smuzhiyun barrier();
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /**
542*4882a593Smuzhiyun * napi_if_scheduled_mark_missed - if napi is running, set the
543*4882a593Smuzhiyun * NAPIF_STATE_MISSED
544*4882a593Smuzhiyun * @n: NAPI context
545*4882a593Smuzhiyun *
546*4882a593Smuzhiyun * If napi is running, set the NAPIF_STATE_MISSED, and return true if
547*4882a593Smuzhiyun * NAPI is scheduled.
548*4882a593Smuzhiyun **/
napi_if_scheduled_mark_missed(struct napi_struct * n)549*4882a593Smuzhiyun static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun unsigned long val, new;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun do {
554*4882a593Smuzhiyun val = READ_ONCE(n->state);
555*4882a593Smuzhiyun if (val & NAPIF_STATE_DISABLE)
556*4882a593Smuzhiyun return true;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (!(val & NAPIF_STATE_SCHED))
559*4882a593Smuzhiyun return false;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun new = val | NAPIF_STATE_MISSED;
562*4882a593Smuzhiyun } while (cmpxchg(&n->state, val, new) != val);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun return true;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun enum netdev_queue_state_t {
568*4882a593Smuzhiyun __QUEUE_STATE_DRV_XOFF,
569*4882a593Smuzhiyun __QUEUE_STATE_STACK_XOFF,
570*4882a593Smuzhiyun __QUEUE_STATE_FROZEN,
571*4882a593Smuzhiyun };
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
574*4882a593Smuzhiyun #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
575*4882a593Smuzhiyun #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
578*4882a593Smuzhiyun #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
579*4882a593Smuzhiyun QUEUE_STATE_FROZEN)
580*4882a593Smuzhiyun #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
581*4882a593Smuzhiyun QUEUE_STATE_FROZEN)
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /*
584*4882a593Smuzhiyun * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
585*4882a593Smuzhiyun * netif_tx_* functions below are used to manipulate this flag. The
586*4882a593Smuzhiyun * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
587*4882a593Smuzhiyun * queue independently. The netif_xmit_*stopped functions below are called
588*4882a593Smuzhiyun * to check if the queue has been stopped by the driver or stack (either
589*4882a593Smuzhiyun * of the XOFF bits are set in the state). Drivers should not need to call
590*4882a593Smuzhiyun * netif_xmit*stopped functions, they should only be using netif_tx_*.
591*4882a593Smuzhiyun */
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun struct netdev_queue {
594*4882a593Smuzhiyun /*
595*4882a593Smuzhiyun * read-mostly part
596*4882a593Smuzhiyun */
597*4882a593Smuzhiyun struct net_device *dev;
598*4882a593Smuzhiyun struct Qdisc __rcu *qdisc;
599*4882a593Smuzhiyun struct Qdisc *qdisc_sleeping;
600*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
601*4882a593Smuzhiyun struct kobject kobj;
602*4882a593Smuzhiyun #endif
603*4882a593Smuzhiyun #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
604*4882a593Smuzhiyun int numa_node;
605*4882a593Smuzhiyun #endif
606*4882a593Smuzhiyun unsigned long tx_maxrate;
607*4882a593Smuzhiyun /*
608*4882a593Smuzhiyun * Number of TX timeouts for this queue
609*4882a593Smuzhiyun * (/sys/class/net/DEV/Q/trans_timeout)
610*4882a593Smuzhiyun */
611*4882a593Smuzhiyun unsigned long trans_timeout;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /* Subordinate device that the queue has been assigned to */
614*4882a593Smuzhiyun struct net_device *sb_dev;
615*4882a593Smuzhiyun #ifdef CONFIG_XDP_SOCKETS
616*4882a593Smuzhiyun struct xsk_buff_pool *pool;
617*4882a593Smuzhiyun #endif
618*4882a593Smuzhiyun /*
619*4882a593Smuzhiyun * write-mostly part
620*4882a593Smuzhiyun */
621*4882a593Smuzhiyun spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
622*4882a593Smuzhiyun int xmit_lock_owner;
623*4882a593Smuzhiyun /*
624*4882a593Smuzhiyun * Time (in jiffies) of last Tx
625*4882a593Smuzhiyun */
626*4882a593Smuzhiyun unsigned long trans_start;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun unsigned long state;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun #ifdef CONFIG_BQL
631*4882a593Smuzhiyun struct dql dql;
632*4882a593Smuzhiyun #endif
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
635*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
636*4882a593Smuzhiyun ANDROID_KABI_RESERVE(3);
637*4882a593Smuzhiyun ANDROID_KABI_RESERVE(4);
638*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun extern int sysctl_fb_tunnels_only_for_init_net;
641*4882a593Smuzhiyun extern int sysctl_devconf_inherit_init_net;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /*
644*4882a593Smuzhiyun * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
645*4882a593Smuzhiyun * == 1 : For initns only
646*4882a593Smuzhiyun * == 2 : For none.
647*4882a593Smuzhiyun */
net_has_fallback_tunnels(const struct net * net)648*4882a593Smuzhiyun static inline bool net_has_fallback_tunnels(const struct net *net)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_SYSCTL)
651*4882a593Smuzhiyun int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun return !fb_tunnels_only_for_init_net ||
654*4882a593Smuzhiyun (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
655*4882a593Smuzhiyun #else
656*4882a593Smuzhiyun return true;
657*4882a593Smuzhiyun #endif
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
net_inherit_devconf(void)660*4882a593Smuzhiyun static inline int net_inherit_devconf(void)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_SYSCTL)
663*4882a593Smuzhiyun return READ_ONCE(sysctl_devconf_inherit_init_net);
664*4882a593Smuzhiyun #else
665*4882a593Smuzhiyun return 0;
666*4882a593Smuzhiyun #endif
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
netdev_queue_numa_node_read(const struct netdev_queue * q)669*4882a593Smuzhiyun static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
672*4882a593Smuzhiyun return q->numa_node;
673*4882a593Smuzhiyun #else
674*4882a593Smuzhiyun return NUMA_NO_NODE;
675*4882a593Smuzhiyun #endif
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
netdev_queue_numa_node_write(struct netdev_queue * q,int node)678*4882a593Smuzhiyun static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
681*4882a593Smuzhiyun q->numa_node = node;
682*4882a593Smuzhiyun #endif
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun #ifdef CONFIG_RPS
686*4882a593Smuzhiyun /*
687*4882a593Smuzhiyun * This structure holds an RPS map which can be of variable length. The
688*4882a593Smuzhiyun * map is an array of CPUs.
689*4882a593Smuzhiyun */
690*4882a593Smuzhiyun struct rps_map {
691*4882a593Smuzhiyun unsigned int len;
692*4882a593Smuzhiyun struct rcu_head rcu;
693*4882a593Smuzhiyun u16 cpus[];
694*4882a593Smuzhiyun };
695*4882a593Smuzhiyun #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
699*4882a593Smuzhiyun * tail pointer for that CPU's input queue at the time of last enqueue, and
700*4882a593Smuzhiyun * a hardware filter index.
701*4882a593Smuzhiyun */
702*4882a593Smuzhiyun struct rps_dev_flow {
703*4882a593Smuzhiyun u16 cpu;
704*4882a593Smuzhiyun u16 filter;
705*4882a593Smuzhiyun unsigned int last_qtail;
706*4882a593Smuzhiyun };
707*4882a593Smuzhiyun #define RPS_NO_FILTER 0xffff
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /*
710*4882a593Smuzhiyun * The rps_dev_flow_table structure contains a table of flow mappings.
711*4882a593Smuzhiyun */
712*4882a593Smuzhiyun struct rps_dev_flow_table {
713*4882a593Smuzhiyun unsigned int mask;
714*4882a593Smuzhiyun struct rcu_head rcu;
715*4882a593Smuzhiyun struct rps_dev_flow flows[];
716*4882a593Smuzhiyun };
717*4882a593Smuzhiyun #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
718*4882a593Smuzhiyun ((_num) * sizeof(struct rps_dev_flow)))
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /*
721*4882a593Smuzhiyun * The rps_sock_flow_table contains mappings of flows to the last CPU
722*4882a593Smuzhiyun * on which they were processed by the application (set in recvmsg).
723*4882a593Smuzhiyun * Each entry is a 32bit value. Upper part is the high-order bits
724*4882a593Smuzhiyun * of flow hash, lower part is CPU number.
725*4882a593Smuzhiyun * rps_cpu_mask is used to partition the space, depending on number of
726*4882a593Smuzhiyun * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
727*4882a593Smuzhiyun * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
728*4882a593Smuzhiyun * meaning we use 32-6=26 bits for the hash.
729*4882a593Smuzhiyun */
730*4882a593Smuzhiyun struct rps_sock_flow_table {
731*4882a593Smuzhiyun u32 mask;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun u32 ents[] ____cacheline_aligned_in_smp;
734*4882a593Smuzhiyun };
735*4882a593Smuzhiyun #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun #define RPS_NO_CPU 0xffff
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun extern u32 rps_cpu_mask;
740*4882a593Smuzhiyun extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
741*4882a593Smuzhiyun
rps_record_sock_flow(struct rps_sock_flow_table * table,u32 hash)742*4882a593Smuzhiyun static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
743*4882a593Smuzhiyun u32 hash)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun if (table && hash) {
746*4882a593Smuzhiyun unsigned int index = hash & table->mask;
747*4882a593Smuzhiyun u32 val = hash & ~rps_cpu_mask;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /* We only give a hint, preemption can change CPU under us */
750*4882a593Smuzhiyun val |= raw_smp_processor_id();
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun if (table->ents[index] != val)
753*4882a593Smuzhiyun table->ents[index] = val;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun #ifdef CONFIG_RFS_ACCEL
758*4882a593Smuzhiyun bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
759*4882a593Smuzhiyun u16 filter_id);
760*4882a593Smuzhiyun #endif
761*4882a593Smuzhiyun #endif /* CONFIG_RPS */
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun /* This structure contains an instance of an RX queue. */
764*4882a593Smuzhiyun struct netdev_rx_queue {
765*4882a593Smuzhiyun #ifdef CONFIG_RPS
766*4882a593Smuzhiyun struct rps_map __rcu *rps_map;
767*4882a593Smuzhiyun struct rps_dev_flow_table __rcu *rps_flow_table;
768*4882a593Smuzhiyun #endif
769*4882a593Smuzhiyun struct kobject kobj;
770*4882a593Smuzhiyun struct net_device *dev;
771*4882a593Smuzhiyun struct xdp_rxq_info xdp_rxq;
772*4882a593Smuzhiyun #ifdef CONFIG_XDP_SOCKETS
773*4882a593Smuzhiyun struct xsk_buff_pool *pool;
774*4882a593Smuzhiyun #endif
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
777*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
778*4882a593Smuzhiyun ANDROID_KABI_RESERVE(3);
779*4882a593Smuzhiyun ANDROID_KABI_RESERVE(4);
780*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun /*
783*4882a593Smuzhiyun * RX queue sysfs structures and functions.
784*4882a593Smuzhiyun */
785*4882a593Smuzhiyun struct rx_queue_attribute {
786*4882a593Smuzhiyun struct attribute attr;
787*4882a593Smuzhiyun ssize_t (*show)(struct netdev_rx_queue *queue, char *buf);
788*4882a593Smuzhiyun ssize_t (*store)(struct netdev_rx_queue *queue,
789*4882a593Smuzhiyun const char *buf, size_t len);
790*4882a593Smuzhiyun };
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun #ifdef CONFIG_XPS
793*4882a593Smuzhiyun /*
794*4882a593Smuzhiyun * This structure holds an XPS map which can be of variable length. The
795*4882a593Smuzhiyun * map is an array of queues.
796*4882a593Smuzhiyun */
797*4882a593Smuzhiyun struct xps_map {
798*4882a593Smuzhiyun unsigned int len;
799*4882a593Smuzhiyun unsigned int alloc_len;
800*4882a593Smuzhiyun struct rcu_head rcu;
801*4882a593Smuzhiyun u16 queues[];
802*4882a593Smuzhiyun };
803*4882a593Smuzhiyun #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
804*4882a593Smuzhiyun #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \
805*4882a593Smuzhiyun - sizeof(struct xps_map)) / sizeof(u16))
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /*
808*4882a593Smuzhiyun * This structure holds all XPS maps for device. Maps are indexed by CPU.
809*4882a593Smuzhiyun */
810*4882a593Smuzhiyun struct xps_dev_maps {
811*4882a593Smuzhiyun struct rcu_head rcu;
812*4882a593Smuzhiyun struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */
813*4882a593Smuzhiyun };
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
816*4882a593Smuzhiyun (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
819*4882a593Smuzhiyun (_rxqs * (_tcs) * sizeof(struct xps_map *)))
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun #endif /* CONFIG_XPS */
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun #define TC_MAX_QUEUE 16
824*4882a593Smuzhiyun #define TC_BITMASK 15
825*4882a593Smuzhiyun /* HW offloaded queuing disciplines txq count and offset maps */
826*4882a593Smuzhiyun struct netdev_tc_txq {
827*4882a593Smuzhiyun u16 count;
828*4882a593Smuzhiyun u16 offset;
829*4882a593Smuzhiyun };
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
832*4882a593Smuzhiyun /*
833*4882a593Smuzhiyun * This structure is to hold information about the device
834*4882a593Smuzhiyun * configured to run FCoE protocol stack.
835*4882a593Smuzhiyun */
836*4882a593Smuzhiyun struct netdev_fcoe_hbainfo {
837*4882a593Smuzhiyun char manufacturer[64];
838*4882a593Smuzhiyun char serial_number[64];
839*4882a593Smuzhiyun char hardware_version[64];
840*4882a593Smuzhiyun char driver_version[64];
841*4882a593Smuzhiyun char optionrom_version[64];
842*4882a593Smuzhiyun char firmware_version[64];
843*4882a593Smuzhiyun char model[256];
844*4882a593Smuzhiyun char model_description[256];
845*4882a593Smuzhiyun };
846*4882a593Smuzhiyun #endif
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun #define MAX_PHYS_ITEM_ID_LEN 32
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /* This structure holds a unique identifier to identify some
851*4882a593Smuzhiyun * physical item (port for example) used by a netdevice.
852*4882a593Smuzhiyun */
853*4882a593Smuzhiyun struct netdev_phys_item_id {
854*4882a593Smuzhiyun unsigned char id[MAX_PHYS_ITEM_ID_LEN];
855*4882a593Smuzhiyun unsigned char id_len;
856*4882a593Smuzhiyun };
857*4882a593Smuzhiyun
netdev_phys_item_id_same(struct netdev_phys_item_id * a,struct netdev_phys_item_id * b)858*4882a593Smuzhiyun static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
859*4882a593Smuzhiyun struct netdev_phys_item_id *b)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun return a->id_len == b->id_len &&
862*4882a593Smuzhiyun memcmp(a->id, b->id, a->id_len) == 0;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
866*4882a593Smuzhiyun struct sk_buff *skb,
867*4882a593Smuzhiyun struct net_device *sb_dev);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun enum tc_setup_type {
870*4882a593Smuzhiyun TC_SETUP_QDISC_MQPRIO,
871*4882a593Smuzhiyun TC_SETUP_CLSU32,
872*4882a593Smuzhiyun TC_SETUP_CLSFLOWER,
873*4882a593Smuzhiyun TC_SETUP_CLSMATCHALL,
874*4882a593Smuzhiyun TC_SETUP_CLSBPF,
875*4882a593Smuzhiyun TC_SETUP_BLOCK,
876*4882a593Smuzhiyun TC_SETUP_QDISC_CBS,
877*4882a593Smuzhiyun TC_SETUP_QDISC_RED,
878*4882a593Smuzhiyun TC_SETUP_QDISC_PRIO,
879*4882a593Smuzhiyun TC_SETUP_QDISC_MQ,
880*4882a593Smuzhiyun TC_SETUP_QDISC_ETF,
881*4882a593Smuzhiyun TC_SETUP_ROOT_QDISC,
882*4882a593Smuzhiyun TC_SETUP_QDISC_GRED,
883*4882a593Smuzhiyun TC_SETUP_QDISC_TAPRIO,
884*4882a593Smuzhiyun TC_SETUP_FT,
885*4882a593Smuzhiyun TC_SETUP_QDISC_ETS,
886*4882a593Smuzhiyun TC_SETUP_QDISC_TBF,
887*4882a593Smuzhiyun TC_SETUP_QDISC_FIFO,
888*4882a593Smuzhiyun };
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /* These structures hold the attributes of bpf state that are being passed
891*4882a593Smuzhiyun * to the netdevice through the bpf op.
892*4882a593Smuzhiyun */
893*4882a593Smuzhiyun enum bpf_netdev_command {
894*4882a593Smuzhiyun /* Set or clear a bpf program used in the earliest stages of packet
895*4882a593Smuzhiyun * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee
896*4882a593Smuzhiyun * is responsible for calling bpf_prog_put on any old progs that are
897*4882a593Smuzhiyun * stored. In case of error, the callee need not release the new prog
898*4882a593Smuzhiyun * reference, but on success it takes ownership and must bpf_prog_put
899*4882a593Smuzhiyun * when it is no longer used.
900*4882a593Smuzhiyun */
901*4882a593Smuzhiyun XDP_SETUP_PROG,
902*4882a593Smuzhiyun XDP_SETUP_PROG_HW,
903*4882a593Smuzhiyun /* BPF program for offload callbacks, invoked at program load time. */
904*4882a593Smuzhiyun BPF_OFFLOAD_MAP_ALLOC,
905*4882a593Smuzhiyun BPF_OFFLOAD_MAP_FREE,
906*4882a593Smuzhiyun XDP_SETUP_XSK_POOL,
907*4882a593Smuzhiyun };
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun struct bpf_prog_offload_ops;
910*4882a593Smuzhiyun struct netlink_ext_ack;
911*4882a593Smuzhiyun struct xdp_umem;
912*4882a593Smuzhiyun struct xdp_dev_bulk_queue;
913*4882a593Smuzhiyun struct bpf_xdp_link;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun enum bpf_xdp_mode {
916*4882a593Smuzhiyun XDP_MODE_SKB = 0,
917*4882a593Smuzhiyun XDP_MODE_DRV = 1,
918*4882a593Smuzhiyun XDP_MODE_HW = 2,
919*4882a593Smuzhiyun __MAX_XDP_MODE
920*4882a593Smuzhiyun };
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun struct bpf_xdp_entity {
923*4882a593Smuzhiyun struct bpf_prog *prog;
924*4882a593Smuzhiyun struct bpf_xdp_link *link;
925*4882a593Smuzhiyun };
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun struct netdev_bpf {
928*4882a593Smuzhiyun enum bpf_netdev_command command;
929*4882a593Smuzhiyun union {
930*4882a593Smuzhiyun /* XDP_SETUP_PROG */
931*4882a593Smuzhiyun struct {
932*4882a593Smuzhiyun u32 flags;
933*4882a593Smuzhiyun struct bpf_prog *prog;
934*4882a593Smuzhiyun struct netlink_ext_ack *extack;
935*4882a593Smuzhiyun };
936*4882a593Smuzhiyun /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
937*4882a593Smuzhiyun struct {
938*4882a593Smuzhiyun struct bpf_offloaded_map *offmap;
939*4882a593Smuzhiyun };
940*4882a593Smuzhiyun /* XDP_SETUP_XSK_POOL */
941*4882a593Smuzhiyun struct {
942*4882a593Smuzhiyun struct xsk_buff_pool *pool;
943*4882a593Smuzhiyun u16 queue_id;
944*4882a593Smuzhiyun } xsk;
945*4882a593Smuzhiyun };
946*4882a593Smuzhiyun };
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun /* Flags for ndo_xsk_wakeup. */
949*4882a593Smuzhiyun #define XDP_WAKEUP_RX (1 << 0)
950*4882a593Smuzhiyun #define XDP_WAKEUP_TX (1 << 1)
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun #ifdef CONFIG_XFRM_OFFLOAD
953*4882a593Smuzhiyun struct xfrmdev_ops {
954*4882a593Smuzhiyun int (*xdo_dev_state_add) (struct xfrm_state *x);
955*4882a593Smuzhiyun void (*xdo_dev_state_delete) (struct xfrm_state *x);
956*4882a593Smuzhiyun void (*xdo_dev_state_free) (struct xfrm_state *x);
957*4882a593Smuzhiyun bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
958*4882a593Smuzhiyun struct xfrm_state *x);
959*4882a593Smuzhiyun void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
962*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
963*4882a593Smuzhiyun ANDROID_KABI_RESERVE(3);
964*4882a593Smuzhiyun ANDROID_KABI_RESERVE(4);
965*4882a593Smuzhiyun };
966*4882a593Smuzhiyun #endif
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun struct dev_ifalias {
969*4882a593Smuzhiyun struct rcu_head rcuhead;
970*4882a593Smuzhiyun char ifalias[];
971*4882a593Smuzhiyun };
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun struct devlink;
974*4882a593Smuzhiyun struct tlsdev_ops;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun struct netdev_name_node {
977*4882a593Smuzhiyun struct hlist_node hlist;
978*4882a593Smuzhiyun struct list_head list;
979*4882a593Smuzhiyun struct net_device *dev;
980*4882a593Smuzhiyun const char *name;
981*4882a593Smuzhiyun };
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun int netdev_name_node_alt_create(struct net_device *dev, const char *name);
984*4882a593Smuzhiyun int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun struct netdev_net_notifier {
987*4882a593Smuzhiyun struct list_head list;
988*4882a593Smuzhiyun struct notifier_block *nb;
989*4882a593Smuzhiyun };
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun /*
992*4882a593Smuzhiyun * This structure defines the management hooks for network devices.
993*4882a593Smuzhiyun * The following hooks can be defined; unless noted otherwise, they are
994*4882a593Smuzhiyun * optional and can be filled with a null pointer.
995*4882a593Smuzhiyun *
996*4882a593Smuzhiyun * int (*ndo_init)(struct net_device *dev);
997*4882a593Smuzhiyun * This function is called once when a network device is registered.
998*4882a593Smuzhiyun * The network device can use this for any late stage initialization
999*4882a593Smuzhiyun * or semantic validation. It can fail with an error code which will
1000*4882a593Smuzhiyun * be propagated back to register_netdev.
1001*4882a593Smuzhiyun *
1002*4882a593Smuzhiyun * void (*ndo_uninit)(struct net_device *dev);
1003*4882a593Smuzhiyun * This function is called when device is unregistered or when registration
1004*4882a593Smuzhiyun * fails. It is not called if init fails.
1005*4882a593Smuzhiyun *
1006*4882a593Smuzhiyun * int (*ndo_open)(struct net_device *dev);
1007*4882a593Smuzhiyun * This function is called when a network device transitions to the up
1008*4882a593Smuzhiyun * state.
1009*4882a593Smuzhiyun *
1010*4882a593Smuzhiyun * int (*ndo_stop)(struct net_device *dev);
1011*4882a593Smuzhiyun * This function is called when a network device transitions to the down
1012*4882a593Smuzhiyun * state.
1013*4882a593Smuzhiyun *
1014*4882a593Smuzhiyun * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1015*4882a593Smuzhiyun * struct net_device *dev);
1016*4882a593Smuzhiyun * Called when a packet needs to be transmitted.
1017*4882a593Smuzhiyun * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
1018*4882a593Smuzhiyun * the queue before that can happen; it's for obsolete devices and weird
1019*4882a593Smuzhiyun * corner cases, but the stack really does a non-trivial amount
1020*4882a593Smuzhiyun * of useless work if you return NETDEV_TX_BUSY.
1021*4882a593Smuzhiyun * Required; cannot be NULL.
1022*4882a593Smuzhiyun *
1023*4882a593Smuzhiyun * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1024*4882a593Smuzhiyun * struct net_device *dev
1025*4882a593Smuzhiyun * netdev_features_t features);
1026*4882a593Smuzhiyun * Called by core transmit path to determine if device is capable of
1027*4882a593Smuzhiyun * performing offload operations on a given packet. This is to give
1028*4882a593Smuzhiyun * the device an opportunity to implement any restrictions that cannot
1029*4882a593Smuzhiyun * be otherwise expressed by feature flags. The check is called with
1030*4882a593Smuzhiyun * the set of features that the stack has calculated and it returns
1031*4882a593Smuzhiyun * those the driver believes to be appropriate.
1032*4882a593Smuzhiyun *
1033*4882a593Smuzhiyun * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
1034*4882a593Smuzhiyun * struct net_device *sb_dev);
1035*4882a593Smuzhiyun * Called to decide which queue to use when device supports multiple
1036*4882a593Smuzhiyun * transmit queues.
1037*4882a593Smuzhiyun *
1038*4882a593Smuzhiyun * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
1039*4882a593Smuzhiyun * This function is called to allow device receiver to make
1040*4882a593Smuzhiyun * changes to configuration when multicast or promiscuous is enabled.
1041*4882a593Smuzhiyun *
1042*4882a593Smuzhiyun * void (*ndo_set_rx_mode)(struct net_device *dev);
1043*4882a593Smuzhiyun * This function is called device changes address list filtering.
1044*4882a593Smuzhiyun * If driver handles unicast address filtering, it should set
1045*4882a593Smuzhiyun * IFF_UNICAST_FLT in its priv_flags.
1046*4882a593Smuzhiyun *
1047*4882a593Smuzhiyun * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
1048*4882a593Smuzhiyun * This function is called when the Media Access Control address
1049*4882a593Smuzhiyun * needs to be changed. If this interface is not defined, the
1050*4882a593Smuzhiyun * MAC address can not be changed.
1051*4882a593Smuzhiyun *
1052*4882a593Smuzhiyun * int (*ndo_validate_addr)(struct net_device *dev);
1053*4882a593Smuzhiyun * Test if Media Access Control address is valid for the device.
1054*4882a593Smuzhiyun *
1055*4882a593Smuzhiyun * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
1056*4882a593Smuzhiyun * Called when a user requests an ioctl which can't be handled by
1057*4882a593Smuzhiyun * the generic interface code. If not defined ioctls return
1058*4882a593Smuzhiyun * not supported error code.
1059*4882a593Smuzhiyun *
1060*4882a593Smuzhiyun * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
1061*4882a593Smuzhiyun * Used to set network devices bus interface parameters. This interface
1062*4882a593Smuzhiyun * is retained for legacy reasons; new devices should use the bus
1063*4882a593Smuzhiyun * interface (PCI) for low level management.
1064*4882a593Smuzhiyun *
1065*4882a593Smuzhiyun * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
1066*4882a593Smuzhiyun * Called when a user wants to change the Maximum Transfer Unit
1067*4882a593Smuzhiyun * of a device.
1068*4882a593Smuzhiyun *
1069*4882a593Smuzhiyun * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
1070*4882a593Smuzhiyun * Callback used when the transmitter has not made any progress
1071*4882a593Smuzhiyun * for dev->watchdog ticks.
1072*4882a593Smuzhiyun *
1073*4882a593Smuzhiyun * void (*ndo_get_stats64)(struct net_device *dev,
1074*4882a593Smuzhiyun * struct rtnl_link_stats64 *storage);
1075*4882a593Smuzhiyun * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1076*4882a593Smuzhiyun * Called when a user wants to get the network device usage
1077*4882a593Smuzhiyun * statistics. Drivers must do one of the following:
1078*4882a593Smuzhiyun * 1. Define @ndo_get_stats64 to fill in a zero-initialised
1079*4882a593Smuzhiyun * rtnl_link_stats64 structure passed by the caller.
1080*4882a593Smuzhiyun * 2. Define @ndo_get_stats to update a net_device_stats structure
1081*4882a593Smuzhiyun * (which should normally be dev->stats) and return a pointer to
1082*4882a593Smuzhiyun * it. The structure may be changed asynchronously only if each
1083*4882a593Smuzhiyun * field is written atomically.
1084*4882a593Smuzhiyun * 3. Update dev->stats asynchronously and atomically, and define
1085*4882a593Smuzhiyun * neither operation.
1086*4882a593Smuzhiyun *
1087*4882a593Smuzhiyun * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id)
1088*4882a593Smuzhiyun * Return true if this device supports offload stats of this attr_id.
1089*4882a593Smuzhiyun *
1090*4882a593Smuzhiyun * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
1091*4882a593Smuzhiyun * void *attr_data)
1092*4882a593Smuzhiyun * Get statistics for offload operations by attr_id. Write it into the
1093*4882a593Smuzhiyun * attr_data pointer.
1094*4882a593Smuzhiyun *
1095*4882a593Smuzhiyun * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
1096*4882a593Smuzhiyun * If device supports VLAN filtering this function is called when a
1097*4882a593Smuzhiyun * VLAN id is registered.
1098*4882a593Smuzhiyun *
1099*4882a593Smuzhiyun * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
1100*4882a593Smuzhiyun * If device supports VLAN filtering this function is called when a
1101*4882a593Smuzhiyun * VLAN id is unregistered.
1102*4882a593Smuzhiyun *
1103*4882a593Smuzhiyun * void (*ndo_poll_controller)(struct net_device *dev);
1104*4882a593Smuzhiyun *
1105*4882a593Smuzhiyun * SR-IOV management functions.
1106*4882a593Smuzhiyun * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
1107*4882a593Smuzhiyun * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
1108*4882a593Smuzhiyun * u8 qos, __be16 proto);
1109*4882a593Smuzhiyun * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
1110*4882a593Smuzhiyun * int max_tx_rate);
1111*4882a593Smuzhiyun * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
1112*4882a593Smuzhiyun * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting);
1113*4882a593Smuzhiyun * int (*ndo_get_vf_config)(struct net_device *dev,
1114*4882a593Smuzhiyun * int vf, struct ifla_vf_info *ivf);
1115*4882a593Smuzhiyun * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
1116*4882a593Smuzhiyun * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
1117*4882a593Smuzhiyun * struct nlattr *port[]);
1118*4882a593Smuzhiyun *
1119*4882a593Smuzhiyun * Enable or disable the VF ability to query its RSS Redirection Table and
1120*4882a593Smuzhiyun * Hash Key. This is needed since on some devices VF share this information
1121*4882a593Smuzhiyun * with PF and querying it may introduce a theoretical security risk.
1122*4882a593Smuzhiyun * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
1123*4882a593Smuzhiyun * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
1124*4882a593Smuzhiyun * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type,
1125*4882a593Smuzhiyun * void *type_data);
1126*4882a593Smuzhiyun * Called to setup any 'tc' scheduler, classifier or action on @dev.
1127*4882a593Smuzhiyun * This is always called from the stack with the rtnl lock held and netif
1128*4882a593Smuzhiyun * tx queues stopped. This allows the netdevice to perform queue
1129*4882a593Smuzhiyun * management safely.
1130*4882a593Smuzhiyun *
1131*4882a593Smuzhiyun * Fiber Channel over Ethernet (FCoE) offload functions.
1132*4882a593Smuzhiyun * int (*ndo_fcoe_enable)(struct net_device *dev);
1133*4882a593Smuzhiyun * Called when the FCoE protocol stack wants to start using LLD for FCoE
1134*4882a593Smuzhiyun * so the underlying device can perform whatever needed configuration or
1135*4882a593Smuzhiyun * initialization to support acceleration of FCoE traffic.
1136*4882a593Smuzhiyun *
1137*4882a593Smuzhiyun * int (*ndo_fcoe_disable)(struct net_device *dev);
1138*4882a593Smuzhiyun * Called when the FCoE protocol stack wants to stop using LLD for FCoE
1139*4882a593Smuzhiyun * so the underlying device can perform whatever needed clean-ups to
1140*4882a593Smuzhiyun * stop supporting acceleration of FCoE traffic.
1141*4882a593Smuzhiyun *
1142*4882a593Smuzhiyun * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
1143*4882a593Smuzhiyun * struct scatterlist *sgl, unsigned int sgc);
1144*4882a593Smuzhiyun * Called when the FCoE Initiator wants to initialize an I/O that
1145*4882a593Smuzhiyun * is a possible candidate for Direct Data Placement (DDP). The LLD can
1146*4882a593Smuzhiyun * perform necessary setup and returns 1 to indicate the device is set up
1147*4882a593Smuzhiyun * successfully to perform DDP on this I/O, otherwise this returns 0.
1148*4882a593Smuzhiyun *
1149*4882a593Smuzhiyun * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
1150*4882a593Smuzhiyun * Called when the FCoE Initiator/Target is done with the DDPed I/O as
1151*4882a593Smuzhiyun * indicated by the FC exchange id 'xid', so the underlying device can
1152*4882a593Smuzhiyun * clean up and reuse resources for later DDP requests.
1153*4882a593Smuzhiyun *
1154*4882a593Smuzhiyun * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
1155*4882a593Smuzhiyun * struct scatterlist *sgl, unsigned int sgc);
1156*4882a593Smuzhiyun * Called when the FCoE Target wants to initialize an I/O that
1157*4882a593Smuzhiyun * is a possible candidate for Direct Data Placement (DDP). The LLD can
1158*4882a593Smuzhiyun * perform necessary setup and returns 1 to indicate the device is set up
1159*4882a593Smuzhiyun * successfully to perform DDP on this I/O, otherwise this returns 0.
1160*4882a593Smuzhiyun *
1161*4882a593Smuzhiyun * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1162*4882a593Smuzhiyun * struct netdev_fcoe_hbainfo *hbainfo);
1163*4882a593Smuzhiyun * Called when the FCoE Protocol stack wants information on the underlying
1164*4882a593Smuzhiyun * device. This information is utilized by the FCoE protocol stack to
1165*4882a593Smuzhiyun * register attributes with Fiber Channel management service as per the
1166*4882a593Smuzhiyun * FC-GS Fabric Device Management Information(FDMI) specification.
1167*4882a593Smuzhiyun *
1168*4882a593Smuzhiyun * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
1169*4882a593Smuzhiyun * Called when the underlying device wants to override default World Wide
1170*4882a593Smuzhiyun * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
1171*4882a593Smuzhiyun * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
1172*4882a593Smuzhiyun * protocol stack to use.
1173*4882a593Smuzhiyun *
1174*4882a593Smuzhiyun * RFS acceleration.
1175*4882a593Smuzhiyun * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
1176*4882a593Smuzhiyun * u16 rxq_index, u32 flow_id);
1177*4882a593Smuzhiyun * Set hardware filter for RFS. rxq_index is the target queue index;
1178*4882a593Smuzhiyun * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
1179*4882a593Smuzhiyun * Return the filter ID on success, or a negative error code.
1180*4882a593Smuzhiyun *
1181*4882a593Smuzhiyun * Slave management functions (for bridge, bonding, etc).
1182*4882a593Smuzhiyun * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
1183*4882a593Smuzhiyun * Called to make another netdev an underling.
1184*4882a593Smuzhiyun *
1185*4882a593Smuzhiyun * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
1186*4882a593Smuzhiyun * Called to release previously enslaved netdev.
1187*4882a593Smuzhiyun *
1188*4882a593Smuzhiyun * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev,
1189*4882a593Smuzhiyun * struct sk_buff *skb,
1190*4882a593Smuzhiyun * bool all_slaves);
1191*4882a593Smuzhiyun * Get the xmit slave of master device. If all_slaves is true, function
1192*4882a593Smuzhiyun * assume all the slaves can transmit.
1193*4882a593Smuzhiyun *
1194*4882a593Smuzhiyun * Feature/offload setting functions.
1195*4882a593Smuzhiyun * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1196*4882a593Smuzhiyun * netdev_features_t features);
1197*4882a593Smuzhiyun * Adjusts the requested feature flags according to device-specific
1198*4882a593Smuzhiyun * constraints, and returns the resulting flags. Must not modify
1199*4882a593Smuzhiyun * the device state.
1200*4882a593Smuzhiyun *
1201*4882a593Smuzhiyun * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
1202*4882a593Smuzhiyun * Called to update device configuration to new features. Passed
1203*4882a593Smuzhiyun * feature set might be less than what was returned by ndo_fix_features()).
1204*4882a593Smuzhiyun * Must return >0 or -errno if it changed dev->features itself.
1205*4882a593Smuzhiyun *
1206*4882a593Smuzhiyun * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
1207*4882a593Smuzhiyun * struct net_device *dev,
1208*4882a593Smuzhiyun * const unsigned char *addr, u16 vid, u16 flags,
1209*4882a593Smuzhiyun * struct netlink_ext_ack *extack);
1210*4882a593Smuzhiyun * Adds an FDB entry to dev for addr.
1211*4882a593Smuzhiyun * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
1212*4882a593Smuzhiyun * struct net_device *dev,
1213*4882a593Smuzhiyun * const unsigned char *addr, u16 vid)
1214*4882a593Smuzhiyun * Deletes the FDB entry from dev coresponding to addr.
1215*4882a593Smuzhiyun * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
1216*4882a593Smuzhiyun * struct net_device *dev, struct net_device *filter_dev,
1217*4882a593Smuzhiyun * int *idx)
1218*4882a593Smuzhiyun * Used to add FDB entries to dump requests. Implementers should add
1219*4882a593Smuzhiyun * entries to skb and update idx with the number of entries.
1220*4882a593Smuzhiyun *
1221*4882a593Smuzhiyun * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
1222*4882a593Smuzhiyun * u16 flags, struct netlink_ext_ack *extack)
1223*4882a593Smuzhiyun * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
1224*4882a593Smuzhiyun * struct net_device *dev, u32 filter_mask,
1225*4882a593Smuzhiyun * int nlflags)
1226*4882a593Smuzhiyun * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
1227*4882a593Smuzhiyun * u16 flags);
1228*4882a593Smuzhiyun *
1229*4882a593Smuzhiyun * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
1230*4882a593Smuzhiyun * Called to change device carrier. Soft-devices (like dummy, team, etc)
1231*4882a593Smuzhiyun * which do not represent real hardware may define this to allow their
1232*4882a593Smuzhiyun * userspace components to manage their virtual carrier state. Devices
1233*4882a593Smuzhiyun * that determine carrier state from physical hardware properties (eg
1234*4882a593Smuzhiyun * network cables) or protocol-dependent mechanisms (eg
1235*4882a593Smuzhiyun * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
1236*4882a593Smuzhiyun *
1237*4882a593Smuzhiyun * int (*ndo_get_phys_port_id)(struct net_device *dev,
1238*4882a593Smuzhiyun * struct netdev_phys_item_id *ppid);
1239*4882a593Smuzhiyun * Called to get ID of physical port of this device. If driver does
1240*4882a593Smuzhiyun * not implement this, it is assumed that the hw is not able to have
1241*4882a593Smuzhiyun * multiple net devices on single physical port.
1242*4882a593Smuzhiyun *
1243*4882a593Smuzhiyun * int (*ndo_get_port_parent_id)(struct net_device *dev,
1244*4882a593Smuzhiyun * struct netdev_phys_item_id *ppid)
1245*4882a593Smuzhiyun * Called to get the parent ID of the physical port of this device.
1246*4882a593Smuzhiyun *
1247*4882a593Smuzhiyun * void (*ndo_udp_tunnel_add)(struct net_device *dev,
1248*4882a593Smuzhiyun * struct udp_tunnel_info *ti);
1249*4882a593Smuzhiyun * Called by UDP tunnel to notify a driver about the UDP port and socket
1250*4882a593Smuzhiyun * address family that a UDP tunnel is listnening to. It is called only
1251*4882a593Smuzhiyun * when a new port starts listening. The operation is protected by the
1252*4882a593Smuzhiyun * RTNL.
1253*4882a593Smuzhiyun *
1254*4882a593Smuzhiyun * void (*ndo_udp_tunnel_del)(struct net_device *dev,
1255*4882a593Smuzhiyun * struct udp_tunnel_info *ti);
1256*4882a593Smuzhiyun * Called by UDP tunnel to notify the driver about a UDP port and socket
1257*4882a593Smuzhiyun * address family that the UDP tunnel is not listening to anymore. The
1258*4882a593Smuzhiyun * operation is protected by the RTNL.
1259*4882a593Smuzhiyun *
1260*4882a593Smuzhiyun * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1261*4882a593Smuzhiyun * struct net_device *dev)
1262*4882a593Smuzhiyun * Called by upper layer devices to accelerate switching or other
1263*4882a593Smuzhiyun * station functionality into hardware. 'pdev is the lowerdev
1264*4882a593Smuzhiyun * to use for the offload and 'dev' is the net device that will
1265*4882a593Smuzhiyun * back the offload. Returns a pointer to the private structure
1266*4882a593Smuzhiyun * the upper layer will maintain.
1267*4882a593Smuzhiyun * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
1268*4882a593Smuzhiyun * Called by upper layer device to delete the station created
1269*4882a593Smuzhiyun * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
1270*4882a593Smuzhiyun * the station and priv is the structure returned by the add
1271*4882a593Smuzhiyun * operation.
1272*4882a593Smuzhiyun * int (*ndo_set_tx_maxrate)(struct net_device *dev,
1273*4882a593Smuzhiyun * int queue_index, u32 maxrate);
1274*4882a593Smuzhiyun * Called when a user wants to set a max-rate limitation of specific
1275*4882a593Smuzhiyun * TX queue.
1276*4882a593Smuzhiyun * int (*ndo_get_iflink)(const struct net_device *dev);
1277*4882a593Smuzhiyun * Called to get the iflink value of this device.
1278*4882a593Smuzhiyun * void (*ndo_change_proto_down)(struct net_device *dev,
1279*4882a593Smuzhiyun * bool proto_down);
1280*4882a593Smuzhiyun * This function is used to pass protocol port error state information
1281*4882a593Smuzhiyun * to the switch driver. The switch driver can react to the proto_down
1282*4882a593Smuzhiyun * by doing a phys down on the associated switch port.
1283*4882a593Smuzhiyun * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb);
1284*4882a593Smuzhiyun * This function is used to get egress tunnel information for given skb.
1285*4882a593Smuzhiyun * This is useful for retrieving outer tunnel header parameters while
1286*4882a593Smuzhiyun * sampling packet.
1287*4882a593Smuzhiyun * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom);
1288*4882a593Smuzhiyun * This function is used to specify the headroom that the skb must
1289*4882a593Smuzhiyun * consider when allocation skb during packet reception. Setting
1290*4882a593Smuzhiyun * appropriate rx headroom value allows avoiding skb head copy on
1291*4882a593Smuzhiyun * forward. Setting a negative value resets the rx headroom to the
1292*4882a593Smuzhiyun * default value.
1293*4882a593Smuzhiyun * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf);
1294*4882a593Smuzhiyun * This function is used to set or query state related to XDP on the
1295*4882a593Smuzhiyun * netdevice and manage BPF offload. See definition of
1296*4882a593Smuzhiyun * enum bpf_netdev_command for details.
1297*4882a593Smuzhiyun * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp,
1298*4882a593Smuzhiyun * u32 flags);
1299*4882a593Smuzhiyun * This function is used to submit @n XDP packets for transmit on a
1300*4882a593Smuzhiyun * netdevice. Returns number of frames successfully transmitted, frames
1301*4882a593Smuzhiyun * that got dropped are freed/returned via xdp_return_frame().
1302*4882a593Smuzhiyun * Returns negative number, means general error invoking ndo, meaning
1303*4882a593Smuzhiyun * no frames were xmit'ed and core-caller will free all frames.
1304*4882a593Smuzhiyun * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags);
1305*4882a593Smuzhiyun * This function is used to wake up the softirq, ksoftirqd or kthread
1306*4882a593Smuzhiyun * responsible for sending and/or receiving packets on a specific
1307*4882a593Smuzhiyun * queue id bound to an AF_XDP socket. The flags field specifies if
1308*4882a593Smuzhiyun * only RX, only Tx, or both should be woken up using the flags
1309*4882a593Smuzhiyun * XDP_WAKEUP_RX and XDP_WAKEUP_TX.
1310*4882a593Smuzhiyun * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev);
1311*4882a593Smuzhiyun * Get devlink port instance associated with a given netdev.
1312*4882a593Smuzhiyun * Called with a reference on the netdevice and devlink locks only,
1313*4882a593Smuzhiyun * rtnl_lock is not held.
1314*4882a593Smuzhiyun * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p,
1315*4882a593Smuzhiyun * int cmd);
1316*4882a593Smuzhiyun * Add, change, delete or get information on an IPv4 tunnel.
1317*4882a593Smuzhiyun * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
1318*4882a593Smuzhiyun * If a device is paired with a peer device, return the peer instance.
1319*4882a593Smuzhiyun * The caller must be under RCU read context.
1320*4882a593Smuzhiyun */
1321*4882a593Smuzhiyun struct net_device_ops {
1322*4882a593Smuzhiyun int (*ndo_init)(struct net_device *dev);
1323*4882a593Smuzhiyun void (*ndo_uninit)(struct net_device *dev);
1324*4882a593Smuzhiyun int (*ndo_open)(struct net_device *dev);
1325*4882a593Smuzhiyun int (*ndo_stop)(struct net_device *dev);
1326*4882a593Smuzhiyun netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
1327*4882a593Smuzhiyun struct net_device *dev);
1328*4882a593Smuzhiyun netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
1329*4882a593Smuzhiyun struct net_device *dev,
1330*4882a593Smuzhiyun netdev_features_t features);
1331*4882a593Smuzhiyun u16 (*ndo_select_queue)(struct net_device *dev,
1332*4882a593Smuzhiyun struct sk_buff *skb,
1333*4882a593Smuzhiyun struct net_device *sb_dev);
1334*4882a593Smuzhiyun void (*ndo_change_rx_flags)(struct net_device *dev,
1335*4882a593Smuzhiyun int flags);
1336*4882a593Smuzhiyun void (*ndo_set_rx_mode)(struct net_device *dev);
1337*4882a593Smuzhiyun int (*ndo_set_mac_address)(struct net_device *dev,
1338*4882a593Smuzhiyun void *addr);
1339*4882a593Smuzhiyun int (*ndo_validate_addr)(struct net_device *dev);
1340*4882a593Smuzhiyun int (*ndo_do_ioctl)(struct net_device *dev,
1341*4882a593Smuzhiyun struct ifreq *ifr, int cmd);
1342*4882a593Smuzhiyun int (*ndo_set_config)(struct net_device *dev,
1343*4882a593Smuzhiyun struct ifmap *map);
1344*4882a593Smuzhiyun int (*ndo_change_mtu)(struct net_device *dev,
1345*4882a593Smuzhiyun int new_mtu);
1346*4882a593Smuzhiyun int (*ndo_neigh_setup)(struct net_device *dev,
1347*4882a593Smuzhiyun struct neigh_parms *);
1348*4882a593Smuzhiyun void (*ndo_tx_timeout) (struct net_device *dev,
1349*4882a593Smuzhiyun unsigned int txqueue);
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun void (*ndo_get_stats64)(struct net_device *dev,
1352*4882a593Smuzhiyun struct rtnl_link_stats64 *storage);
1353*4882a593Smuzhiyun bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
1354*4882a593Smuzhiyun int (*ndo_get_offload_stats)(int attr_id,
1355*4882a593Smuzhiyun const struct net_device *dev,
1356*4882a593Smuzhiyun void *attr_data);
1357*4882a593Smuzhiyun struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
1360*4882a593Smuzhiyun __be16 proto, u16 vid);
1361*4882a593Smuzhiyun int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
1362*4882a593Smuzhiyun __be16 proto, u16 vid);
1363*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1364*4882a593Smuzhiyun void (*ndo_poll_controller)(struct net_device *dev);
1365*4882a593Smuzhiyun int (*ndo_netpoll_setup)(struct net_device *dev,
1366*4882a593Smuzhiyun struct netpoll_info *info);
1367*4882a593Smuzhiyun void (*ndo_netpoll_cleanup)(struct net_device *dev);
1368*4882a593Smuzhiyun #endif
1369*4882a593Smuzhiyun int (*ndo_set_vf_mac)(struct net_device *dev,
1370*4882a593Smuzhiyun int queue, u8 *mac);
1371*4882a593Smuzhiyun int (*ndo_set_vf_vlan)(struct net_device *dev,
1372*4882a593Smuzhiyun int queue, u16 vlan,
1373*4882a593Smuzhiyun u8 qos, __be16 proto);
1374*4882a593Smuzhiyun int (*ndo_set_vf_rate)(struct net_device *dev,
1375*4882a593Smuzhiyun int vf, int min_tx_rate,
1376*4882a593Smuzhiyun int max_tx_rate);
1377*4882a593Smuzhiyun int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1378*4882a593Smuzhiyun int vf, bool setting);
1379*4882a593Smuzhiyun int (*ndo_set_vf_trust)(struct net_device *dev,
1380*4882a593Smuzhiyun int vf, bool setting);
1381*4882a593Smuzhiyun int (*ndo_get_vf_config)(struct net_device *dev,
1382*4882a593Smuzhiyun int vf,
1383*4882a593Smuzhiyun struct ifla_vf_info *ivf);
1384*4882a593Smuzhiyun int (*ndo_set_vf_link_state)(struct net_device *dev,
1385*4882a593Smuzhiyun int vf, int link_state);
1386*4882a593Smuzhiyun int (*ndo_get_vf_stats)(struct net_device *dev,
1387*4882a593Smuzhiyun int vf,
1388*4882a593Smuzhiyun struct ifla_vf_stats
1389*4882a593Smuzhiyun *vf_stats);
1390*4882a593Smuzhiyun int (*ndo_set_vf_port)(struct net_device *dev,
1391*4882a593Smuzhiyun int vf,
1392*4882a593Smuzhiyun struct nlattr *port[]);
1393*4882a593Smuzhiyun int (*ndo_get_vf_port)(struct net_device *dev,
1394*4882a593Smuzhiyun int vf, struct sk_buff *skb);
1395*4882a593Smuzhiyun int (*ndo_get_vf_guid)(struct net_device *dev,
1396*4882a593Smuzhiyun int vf,
1397*4882a593Smuzhiyun struct ifla_vf_guid *node_guid,
1398*4882a593Smuzhiyun struct ifla_vf_guid *port_guid);
1399*4882a593Smuzhiyun int (*ndo_set_vf_guid)(struct net_device *dev,
1400*4882a593Smuzhiyun int vf, u64 guid,
1401*4882a593Smuzhiyun int guid_type);
1402*4882a593Smuzhiyun int (*ndo_set_vf_rss_query_en)(
1403*4882a593Smuzhiyun struct net_device *dev,
1404*4882a593Smuzhiyun int vf, bool setting);
1405*4882a593Smuzhiyun int (*ndo_setup_tc)(struct net_device *dev,
1406*4882a593Smuzhiyun enum tc_setup_type type,
1407*4882a593Smuzhiyun void *type_data);
1408*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_FCOE)
1409*4882a593Smuzhiyun int (*ndo_fcoe_enable)(struct net_device *dev);
1410*4882a593Smuzhiyun int (*ndo_fcoe_disable)(struct net_device *dev);
1411*4882a593Smuzhiyun int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1412*4882a593Smuzhiyun u16 xid,
1413*4882a593Smuzhiyun struct scatterlist *sgl,
1414*4882a593Smuzhiyun unsigned int sgc);
1415*4882a593Smuzhiyun int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1416*4882a593Smuzhiyun u16 xid);
1417*4882a593Smuzhiyun int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1418*4882a593Smuzhiyun u16 xid,
1419*4882a593Smuzhiyun struct scatterlist *sgl,
1420*4882a593Smuzhiyun unsigned int sgc);
1421*4882a593Smuzhiyun int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1422*4882a593Smuzhiyun struct netdev_fcoe_hbainfo *hbainfo);
1423*4882a593Smuzhiyun #endif
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_LIBFCOE)
1426*4882a593Smuzhiyun #define NETDEV_FCOE_WWNN 0
1427*4882a593Smuzhiyun #define NETDEV_FCOE_WWPN 1
1428*4882a593Smuzhiyun int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1429*4882a593Smuzhiyun u64 *wwn, int type);
1430*4882a593Smuzhiyun #endif
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun #ifdef CONFIG_RFS_ACCEL
1433*4882a593Smuzhiyun int (*ndo_rx_flow_steer)(struct net_device *dev,
1434*4882a593Smuzhiyun const struct sk_buff *skb,
1435*4882a593Smuzhiyun u16 rxq_index,
1436*4882a593Smuzhiyun u32 flow_id);
1437*4882a593Smuzhiyun #endif
1438*4882a593Smuzhiyun int (*ndo_add_slave)(struct net_device *dev,
1439*4882a593Smuzhiyun struct net_device *slave_dev,
1440*4882a593Smuzhiyun struct netlink_ext_ack *extack);
1441*4882a593Smuzhiyun int (*ndo_del_slave)(struct net_device *dev,
1442*4882a593Smuzhiyun struct net_device *slave_dev);
1443*4882a593Smuzhiyun struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev,
1444*4882a593Smuzhiyun struct sk_buff *skb,
1445*4882a593Smuzhiyun bool all_slaves);
1446*4882a593Smuzhiyun netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1447*4882a593Smuzhiyun netdev_features_t features);
1448*4882a593Smuzhiyun int (*ndo_set_features)(struct net_device *dev,
1449*4882a593Smuzhiyun netdev_features_t features);
1450*4882a593Smuzhiyun int (*ndo_neigh_construct)(struct net_device *dev,
1451*4882a593Smuzhiyun struct neighbour *n);
1452*4882a593Smuzhiyun void (*ndo_neigh_destroy)(struct net_device *dev,
1453*4882a593Smuzhiyun struct neighbour *n);
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun int (*ndo_fdb_add)(struct ndmsg *ndm,
1456*4882a593Smuzhiyun struct nlattr *tb[],
1457*4882a593Smuzhiyun struct net_device *dev,
1458*4882a593Smuzhiyun const unsigned char *addr,
1459*4882a593Smuzhiyun u16 vid,
1460*4882a593Smuzhiyun u16 flags,
1461*4882a593Smuzhiyun struct netlink_ext_ack *extack);
1462*4882a593Smuzhiyun int (*ndo_fdb_del)(struct ndmsg *ndm,
1463*4882a593Smuzhiyun struct nlattr *tb[],
1464*4882a593Smuzhiyun struct net_device *dev,
1465*4882a593Smuzhiyun const unsigned char *addr,
1466*4882a593Smuzhiyun u16 vid);
1467*4882a593Smuzhiyun int (*ndo_fdb_dump)(struct sk_buff *skb,
1468*4882a593Smuzhiyun struct netlink_callback *cb,
1469*4882a593Smuzhiyun struct net_device *dev,
1470*4882a593Smuzhiyun struct net_device *filter_dev,
1471*4882a593Smuzhiyun int *idx);
1472*4882a593Smuzhiyun int (*ndo_fdb_get)(struct sk_buff *skb,
1473*4882a593Smuzhiyun struct nlattr *tb[],
1474*4882a593Smuzhiyun struct net_device *dev,
1475*4882a593Smuzhiyun const unsigned char *addr,
1476*4882a593Smuzhiyun u16 vid, u32 portid, u32 seq,
1477*4882a593Smuzhiyun struct netlink_ext_ack *extack);
1478*4882a593Smuzhiyun int (*ndo_bridge_setlink)(struct net_device *dev,
1479*4882a593Smuzhiyun struct nlmsghdr *nlh,
1480*4882a593Smuzhiyun u16 flags,
1481*4882a593Smuzhiyun struct netlink_ext_ack *extack);
1482*4882a593Smuzhiyun int (*ndo_bridge_getlink)(struct sk_buff *skb,
1483*4882a593Smuzhiyun u32 pid, u32 seq,
1484*4882a593Smuzhiyun struct net_device *dev,
1485*4882a593Smuzhiyun u32 filter_mask,
1486*4882a593Smuzhiyun int nlflags);
1487*4882a593Smuzhiyun int (*ndo_bridge_dellink)(struct net_device *dev,
1488*4882a593Smuzhiyun struct nlmsghdr *nlh,
1489*4882a593Smuzhiyun u16 flags);
1490*4882a593Smuzhiyun int (*ndo_change_carrier)(struct net_device *dev,
1491*4882a593Smuzhiyun bool new_carrier);
1492*4882a593Smuzhiyun int (*ndo_get_phys_port_id)(struct net_device *dev,
1493*4882a593Smuzhiyun struct netdev_phys_item_id *ppid);
1494*4882a593Smuzhiyun int (*ndo_get_port_parent_id)(struct net_device *dev,
1495*4882a593Smuzhiyun struct netdev_phys_item_id *ppid);
1496*4882a593Smuzhiyun int (*ndo_get_phys_port_name)(struct net_device *dev,
1497*4882a593Smuzhiyun char *name, size_t len);
1498*4882a593Smuzhiyun void (*ndo_udp_tunnel_add)(struct net_device *dev,
1499*4882a593Smuzhiyun struct udp_tunnel_info *ti);
1500*4882a593Smuzhiyun void (*ndo_udp_tunnel_del)(struct net_device *dev,
1501*4882a593Smuzhiyun struct udp_tunnel_info *ti);
1502*4882a593Smuzhiyun void* (*ndo_dfwd_add_station)(struct net_device *pdev,
1503*4882a593Smuzhiyun struct net_device *dev);
1504*4882a593Smuzhiyun void (*ndo_dfwd_del_station)(struct net_device *pdev,
1505*4882a593Smuzhiyun void *priv);
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun int (*ndo_set_tx_maxrate)(struct net_device *dev,
1508*4882a593Smuzhiyun int queue_index,
1509*4882a593Smuzhiyun u32 maxrate);
1510*4882a593Smuzhiyun int (*ndo_get_iflink)(const struct net_device *dev);
1511*4882a593Smuzhiyun int (*ndo_change_proto_down)(struct net_device *dev,
1512*4882a593Smuzhiyun bool proto_down);
1513*4882a593Smuzhiyun int (*ndo_fill_metadata_dst)(struct net_device *dev,
1514*4882a593Smuzhiyun struct sk_buff *skb);
1515*4882a593Smuzhiyun void (*ndo_set_rx_headroom)(struct net_device *dev,
1516*4882a593Smuzhiyun int needed_headroom);
1517*4882a593Smuzhiyun int (*ndo_bpf)(struct net_device *dev,
1518*4882a593Smuzhiyun struct netdev_bpf *bpf);
1519*4882a593Smuzhiyun int (*ndo_xdp_xmit)(struct net_device *dev, int n,
1520*4882a593Smuzhiyun struct xdp_frame **xdp,
1521*4882a593Smuzhiyun u32 flags);
1522*4882a593Smuzhiyun int (*ndo_xsk_wakeup)(struct net_device *dev,
1523*4882a593Smuzhiyun u32 queue_id, u32 flags);
1524*4882a593Smuzhiyun struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
1525*4882a593Smuzhiyun int (*ndo_tunnel_ctl)(struct net_device *dev,
1526*4882a593Smuzhiyun struct ip_tunnel_parm *p, int cmd);
1527*4882a593Smuzhiyun struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
1530*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
1531*4882a593Smuzhiyun ANDROID_KABI_RESERVE(3);
1532*4882a593Smuzhiyun ANDROID_KABI_RESERVE(4);
1533*4882a593Smuzhiyun ANDROID_KABI_RESERVE(5);
1534*4882a593Smuzhiyun ANDROID_KABI_RESERVE(6);
1535*4882a593Smuzhiyun ANDROID_KABI_RESERVE(7);
1536*4882a593Smuzhiyun ANDROID_KABI_RESERVE(8);
1537*4882a593Smuzhiyun };
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun /**
1540*4882a593Smuzhiyun * enum net_device_priv_flags - &struct net_device priv_flags
1541*4882a593Smuzhiyun *
1542*4882a593Smuzhiyun * These are the &struct net_device, they are only set internally
1543*4882a593Smuzhiyun * by drivers and used in the kernel. These flags are invisible to
1544*4882a593Smuzhiyun * userspace; this means that the order of these flags can change
1545*4882a593Smuzhiyun * during any kernel release.
1546*4882a593Smuzhiyun *
1547*4882a593Smuzhiyun * You should have a pretty good reason to be extending these flags.
1548*4882a593Smuzhiyun *
1549*4882a593Smuzhiyun * @IFF_802_1Q_VLAN: 802.1Q VLAN device
1550*4882a593Smuzhiyun * @IFF_EBRIDGE: Ethernet bridging device
1551*4882a593Smuzhiyun * @IFF_BONDING: bonding master or slave
1552*4882a593Smuzhiyun * @IFF_ISATAP: ISATAP interface (RFC4214)
1553*4882a593Smuzhiyun * @IFF_WAN_HDLC: WAN HDLC device
1554*4882a593Smuzhiyun * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
1555*4882a593Smuzhiyun * release skb->dst
1556*4882a593Smuzhiyun * @IFF_DONT_BRIDGE: disallow bridging this ether dev
1557*4882a593Smuzhiyun * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
1558*4882a593Smuzhiyun * @IFF_MACVLAN_PORT: device used as macvlan port
1559*4882a593Smuzhiyun * @IFF_BRIDGE_PORT: device used as bridge port
1560*4882a593Smuzhiyun * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
1561*4882a593Smuzhiyun * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
1562*4882a593Smuzhiyun * @IFF_UNICAST_FLT: Supports unicast filtering
1563*4882a593Smuzhiyun * @IFF_TEAM_PORT: device used as team port
1564*4882a593Smuzhiyun * @IFF_SUPP_NOFCS: device supports sending custom FCS
1565*4882a593Smuzhiyun * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
1566*4882a593Smuzhiyun * change when it's running
1567*4882a593Smuzhiyun * @IFF_MACVLAN: Macvlan device
1568*4882a593Smuzhiyun * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
1569*4882a593Smuzhiyun * underlying stacked devices
1570*4882a593Smuzhiyun * @IFF_L3MDEV_MASTER: device is an L3 master device
1571*4882a593Smuzhiyun * @IFF_NO_QUEUE: device can run without qdisc attached
1572*4882a593Smuzhiyun * @IFF_OPENVSWITCH: device is a Open vSwitch master
1573*4882a593Smuzhiyun * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device
1574*4882a593Smuzhiyun * @IFF_TEAM: device is a team device
1575*4882a593Smuzhiyun * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured
1576*4882a593Smuzhiyun * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external
1577*4882a593Smuzhiyun * entity (i.e. the master device for bridged veth)
1578*4882a593Smuzhiyun * @IFF_MACSEC: device is a MACsec device
1579*4882a593Smuzhiyun * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
1580*4882a593Smuzhiyun * @IFF_FAILOVER: device is a failover master device
1581*4882a593Smuzhiyun * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
1582*4882a593Smuzhiyun * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
1583*4882a593Smuzhiyun * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
1584*4882a593Smuzhiyun */
1585*4882a593Smuzhiyun enum netdev_priv_flags {
1586*4882a593Smuzhiyun IFF_802_1Q_VLAN = 1<<0,
1587*4882a593Smuzhiyun IFF_EBRIDGE = 1<<1,
1588*4882a593Smuzhiyun IFF_BONDING = 1<<2,
1589*4882a593Smuzhiyun IFF_ISATAP = 1<<3,
1590*4882a593Smuzhiyun IFF_WAN_HDLC = 1<<4,
1591*4882a593Smuzhiyun IFF_XMIT_DST_RELEASE = 1<<5,
1592*4882a593Smuzhiyun IFF_DONT_BRIDGE = 1<<6,
1593*4882a593Smuzhiyun IFF_DISABLE_NETPOLL = 1<<7,
1594*4882a593Smuzhiyun IFF_MACVLAN_PORT = 1<<8,
1595*4882a593Smuzhiyun IFF_BRIDGE_PORT = 1<<9,
1596*4882a593Smuzhiyun IFF_OVS_DATAPATH = 1<<10,
1597*4882a593Smuzhiyun IFF_TX_SKB_SHARING = 1<<11,
1598*4882a593Smuzhiyun IFF_UNICAST_FLT = 1<<12,
1599*4882a593Smuzhiyun IFF_TEAM_PORT = 1<<13,
1600*4882a593Smuzhiyun IFF_SUPP_NOFCS = 1<<14,
1601*4882a593Smuzhiyun IFF_LIVE_ADDR_CHANGE = 1<<15,
1602*4882a593Smuzhiyun IFF_MACVLAN = 1<<16,
1603*4882a593Smuzhiyun IFF_XMIT_DST_RELEASE_PERM = 1<<17,
1604*4882a593Smuzhiyun IFF_L3MDEV_MASTER = 1<<18,
1605*4882a593Smuzhiyun IFF_NO_QUEUE = 1<<19,
1606*4882a593Smuzhiyun IFF_OPENVSWITCH = 1<<20,
1607*4882a593Smuzhiyun IFF_L3MDEV_SLAVE = 1<<21,
1608*4882a593Smuzhiyun IFF_TEAM = 1<<22,
1609*4882a593Smuzhiyun IFF_RXFH_CONFIGURED = 1<<23,
1610*4882a593Smuzhiyun IFF_PHONY_HEADROOM = 1<<24,
1611*4882a593Smuzhiyun IFF_MACSEC = 1<<25,
1612*4882a593Smuzhiyun IFF_NO_RX_HANDLER = 1<<26,
1613*4882a593Smuzhiyun IFF_FAILOVER = 1<<27,
1614*4882a593Smuzhiyun IFF_FAILOVER_SLAVE = 1<<28,
1615*4882a593Smuzhiyun IFF_L3MDEV_RX_HANDLER = 1<<29,
1616*4882a593Smuzhiyun IFF_LIVE_RENAME_OK = 1<<30,
1617*4882a593Smuzhiyun };
1618*4882a593Smuzhiyun
1619*4882a593Smuzhiyun #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
1620*4882a593Smuzhiyun #define IFF_EBRIDGE IFF_EBRIDGE
1621*4882a593Smuzhiyun #define IFF_BONDING IFF_BONDING
1622*4882a593Smuzhiyun #define IFF_ISATAP IFF_ISATAP
1623*4882a593Smuzhiyun #define IFF_WAN_HDLC IFF_WAN_HDLC
1624*4882a593Smuzhiyun #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
1625*4882a593Smuzhiyun #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
1626*4882a593Smuzhiyun #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
1627*4882a593Smuzhiyun #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
1628*4882a593Smuzhiyun #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
1629*4882a593Smuzhiyun #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
1630*4882a593Smuzhiyun #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
1631*4882a593Smuzhiyun #define IFF_UNICAST_FLT IFF_UNICAST_FLT
1632*4882a593Smuzhiyun #define IFF_TEAM_PORT IFF_TEAM_PORT
1633*4882a593Smuzhiyun #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1634*4882a593Smuzhiyun #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1635*4882a593Smuzhiyun #define IFF_MACVLAN IFF_MACVLAN
1636*4882a593Smuzhiyun #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1637*4882a593Smuzhiyun #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER
1638*4882a593Smuzhiyun #define IFF_NO_QUEUE IFF_NO_QUEUE
1639*4882a593Smuzhiyun #define IFF_OPENVSWITCH IFF_OPENVSWITCH
1640*4882a593Smuzhiyun #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE
1641*4882a593Smuzhiyun #define IFF_TEAM IFF_TEAM
1642*4882a593Smuzhiyun #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED
1643*4882a593Smuzhiyun #define IFF_MACSEC IFF_MACSEC
1644*4882a593Smuzhiyun #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
1645*4882a593Smuzhiyun #define IFF_FAILOVER IFF_FAILOVER
1646*4882a593Smuzhiyun #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
1647*4882a593Smuzhiyun #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
1648*4882a593Smuzhiyun #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun /* Specifies the type of the struct net_device::ml_priv pointer */
1651*4882a593Smuzhiyun enum netdev_ml_priv_type {
1652*4882a593Smuzhiyun ML_PRIV_NONE,
1653*4882a593Smuzhiyun ML_PRIV_CAN,
1654*4882a593Smuzhiyun };
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun /**
1657*4882a593Smuzhiyun * struct net_device - The DEVICE structure.
1658*4882a593Smuzhiyun *
1659*4882a593Smuzhiyun * Actually, this whole structure is a big mistake. It mixes I/O
1660*4882a593Smuzhiyun * data with strictly "high-level" data, and it has to know about
1661*4882a593Smuzhiyun * almost every data structure used in the INET module.
1662*4882a593Smuzhiyun *
1663*4882a593Smuzhiyun * @name: This is the first field of the "visible" part of this structure
1664*4882a593Smuzhiyun * (i.e. as seen by users in the "Space.c" file). It is the name
1665*4882a593Smuzhiyun * of the interface.
1666*4882a593Smuzhiyun *
1667*4882a593Smuzhiyun * @name_node: Name hashlist node
1668*4882a593Smuzhiyun * @ifalias: SNMP alias
1669*4882a593Smuzhiyun * @mem_end: Shared memory end
1670*4882a593Smuzhiyun * @mem_start: Shared memory start
1671*4882a593Smuzhiyun * @base_addr: Device I/O address
1672*4882a593Smuzhiyun * @irq: Device IRQ number
1673*4882a593Smuzhiyun *
1674*4882a593Smuzhiyun * @state: Generic network queuing layer state, see netdev_state_t
1675*4882a593Smuzhiyun * @dev_list: The global list of network devices
1676*4882a593Smuzhiyun * @napi_list: List entry used for polling NAPI devices
1677*4882a593Smuzhiyun * @unreg_list: List entry when we are unregistering the
1678*4882a593Smuzhiyun * device; see the function unregister_netdev
1679*4882a593Smuzhiyun * @close_list: List entry used when we are closing the device
1680*4882a593Smuzhiyun * @ptype_all: Device-specific packet handlers for all protocols
1681*4882a593Smuzhiyun * @ptype_specific: Device-specific, protocol-specific packet handlers
1682*4882a593Smuzhiyun *
1683*4882a593Smuzhiyun * @adj_list: Directly linked devices, like slaves for bonding
1684*4882a593Smuzhiyun * @features: Currently active device features
1685*4882a593Smuzhiyun * @hw_features: User-changeable features
1686*4882a593Smuzhiyun *
1687*4882a593Smuzhiyun * @wanted_features: User-requested features
1688*4882a593Smuzhiyun * @vlan_features: Mask of features inheritable by VLAN devices
1689*4882a593Smuzhiyun *
1690*4882a593Smuzhiyun * @hw_enc_features: Mask of features inherited by encapsulating devices
1691*4882a593Smuzhiyun * This field indicates what encapsulation
1692*4882a593Smuzhiyun * offloads the hardware is capable of doing,
1693*4882a593Smuzhiyun * and drivers will need to set them appropriately.
1694*4882a593Smuzhiyun *
1695*4882a593Smuzhiyun * @mpls_features: Mask of features inheritable by MPLS
1696*4882a593Smuzhiyun * @gso_partial_features: value(s) from NETIF_F_GSO\*
1697*4882a593Smuzhiyun *
1698*4882a593Smuzhiyun * @ifindex: interface index
1699*4882a593Smuzhiyun * @group: The group the device belongs to
1700*4882a593Smuzhiyun *
1701*4882a593Smuzhiyun * @stats: Statistics struct, which was left as a legacy, use
1702*4882a593Smuzhiyun * rtnl_link_stats64 instead
1703*4882a593Smuzhiyun *
1704*4882a593Smuzhiyun * @rx_dropped: Dropped packets by core network,
1705*4882a593Smuzhiyun * do not use this in drivers
1706*4882a593Smuzhiyun * @tx_dropped: Dropped packets by core network,
1707*4882a593Smuzhiyun * do not use this in drivers
1708*4882a593Smuzhiyun * @rx_nohandler: nohandler dropped packets by core network on
1709*4882a593Smuzhiyun * inactive devices, do not use this in drivers
1710*4882a593Smuzhiyun * @carrier_up_count: Number of times the carrier has been up
1711*4882a593Smuzhiyun * @carrier_down_count: Number of times the carrier has been down
1712*4882a593Smuzhiyun *
1713*4882a593Smuzhiyun * @wireless_handlers: List of functions to handle Wireless Extensions,
1714*4882a593Smuzhiyun * instead of ioctl,
1715*4882a593Smuzhiyun * see <net/iw_handler.h> for details.
1716*4882a593Smuzhiyun * @wireless_data: Instance data managed by the core of wireless extensions
1717*4882a593Smuzhiyun *
1718*4882a593Smuzhiyun * @netdev_ops: Includes several pointers to callbacks,
1719*4882a593Smuzhiyun * if one wants to override the ndo_*() functions
1720*4882a593Smuzhiyun * @ethtool_ops: Management operations
1721*4882a593Smuzhiyun * @l3mdev_ops: Layer 3 master device operations
1722*4882a593Smuzhiyun * @ndisc_ops: Includes callbacks for different IPv6 neighbour
1723*4882a593Smuzhiyun * discovery handling. Necessary for e.g. 6LoWPAN.
1724*4882a593Smuzhiyun * @xfrmdev_ops: Transformation offload operations
1725*4882a593Smuzhiyun * @tlsdev_ops: Transport Layer Security offload operations
1726*4882a593Smuzhiyun * @header_ops: Includes callbacks for creating,parsing,caching,etc
1727*4882a593Smuzhiyun * of Layer 2 headers.
1728*4882a593Smuzhiyun *
1729*4882a593Smuzhiyun * @flags: Interface flags (a la BSD)
1730*4882a593Smuzhiyun * @priv_flags: Like 'flags' but invisible to userspace,
1731*4882a593Smuzhiyun * see if.h for the definitions
1732*4882a593Smuzhiyun * @gflags: Global flags ( kept as legacy )
1733*4882a593Smuzhiyun * @padded: How much padding added by alloc_netdev()
1734*4882a593Smuzhiyun * @operstate: RFC2863 operstate
1735*4882a593Smuzhiyun * @link_mode: Mapping policy to operstate
1736*4882a593Smuzhiyun * @if_port: Selectable AUI, TP, ...
1737*4882a593Smuzhiyun * @dma: DMA channel
1738*4882a593Smuzhiyun * @mtu: Interface MTU value
1739*4882a593Smuzhiyun * @min_mtu: Interface Minimum MTU value
1740*4882a593Smuzhiyun * @max_mtu: Interface Maximum MTU value
1741*4882a593Smuzhiyun * @type: Interface hardware type
1742*4882a593Smuzhiyun * @hard_header_len: Maximum hardware header length.
1743*4882a593Smuzhiyun * @min_header_len: Minimum hardware header length
1744*4882a593Smuzhiyun *
1745*4882a593Smuzhiyun * @needed_headroom: Extra headroom the hardware may need, but not in all
1746*4882a593Smuzhiyun * cases can this be guaranteed
1747*4882a593Smuzhiyun * @needed_tailroom: Extra tailroom the hardware may need, but not in all
1748*4882a593Smuzhiyun * cases can this be guaranteed. Some cases also use
1749*4882a593Smuzhiyun * LL_MAX_HEADER instead to allocate the skb
1750*4882a593Smuzhiyun *
1751*4882a593Smuzhiyun * interface address info:
1752*4882a593Smuzhiyun *
1753*4882a593Smuzhiyun * @perm_addr: Permanent hw address
1754*4882a593Smuzhiyun * @addr_assign_type: Hw address assignment type
1755*4882a593Smuzhiyun * @addr_len: Hardware address length
1756*4882a593Smuzhiyun * @upper_level: Maximum depth level of upper devices.
1757*4882a593Smuzhiyun * @lower_level: Maximum depth level of lower devices.
1758*4882a593Smuzhiyun * @neigh_priv_len: Used in neigh_alloc()
1759*4882a593Smuzhiyun * @dev_id: Used to differentiate devices that share
1760*4882a593Smuzhiyun * the same link layer address
1761*4882a593Smuzhiyun * @dev_port: Used to differentiate devices that share
1762*4882a593Smuzhiyun * the same function
1763*4882a593Smuzhiyun * @addr_list_lock: XXX: need comments on this one
1764*4882a593Smuzhiyun * @name_assign_type: network interface name assignment type
1765*4882a593Smuzhiyun * @uc_promisc: Counter that indicates promiscuous mode
1766*4882a593Smuzhiyun * has been enabled due to the need to listen to
1767*4882a593Smuzhiyun * additional unicast addresses in a device that
1768*4882a593Smuzhiyun * does not implement ndo_set_rx_mode()
1769*4882a593Smuzhiyun * @uc: unicast mac addresses
1770*4882a593Smuzhiyun * @mc: multicast mac addresses
1771*4882a593Smuzhiyun * @dev_addrs: list of device hw addresses
1772*4882a593Smuzhiyun * @queues_kset: Group of all Kobjects in the Tx and RX queues
1773*4882a593Smuzhiyun * @promiscuity: Number of times the NIC is told to work in
1774*4882a593Smuzhiyun * promiscuous mode; if it becomes 0 the NIC will
1775*4882a593Smuzhiyun * exit promiscuous mode
1776*4882a593Smuzhiyun * @allmulti: Counter, enables or disables allmulticast mode
1777*4882a593Smuzhiyun *
1778*4882a593Smuzhiyun * @vlan_info: VLAN info
1779*4882a593Smuzhiyun * @dsa_ptr: dsa specific data
1780*4882a593Smuzhiyun * @tipc_ptr: TIPC specific data
1781*4882a593Smuzhiyun * @atalk_ptr: AppleTalk link
1782*4882a593Smuzhiyun * @ip_ptr: IPv4 specific data
1783*4882a593Smuzhiyun * @dn_ptr: DECnet specific data
1784*4882a593Smuzhiyun * @ip6_ptr: IPv6 specific data
1785*4882a593Smuzhiyun * @ax25_ptr: AX.25 specific data
1786*4882a593Smuzhiyun * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
1787*4882a593Smuzhiyun * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
1788*4882a593Smuzhiyun * device struct
1789*4882a593Smuzhiyun * @mpls_ptr: mpls_dev struct pointer
1790*4882a593Smuzhiyun *
1791*4882a593Smuzhiyun * @dev_addr: Hw address (before bcast,
1792*4882a593Smuzhiyun * because most packets are unicast)
1793*4882a593Smuzhiyun *
1794*4882a593Smuzhiyun * @_rx: Array of RX queues
1795*4882a593Smuzhiyun * @num_rx_queues: Number of RX queues
1796*4882a593Smuzhiyun * allocated at register_netdev() time
1797*4882a593Smuzhiyun * @real_num_rx_queues: Number of RX queues currently active in device
1798*4882a593Smuzhiyun * @xdp_prog: XDP sockets filter program pointer
1799*4882a593Smuzhiyun * @gro_flush_timeout: timeout for GRO layer in NAPI
1800*4882a593Smuzhiyun * @napi_defer_hard_irqs: If not zero, provides a counter that would
1801*4882a593Smuzhiyun * allow to avoid NIC hard IRQ, on busy queues.
1802*4882a593Smuzhiyun *
1803*4882a593Smuzhiyun * @rx_handler: handler for received packets
1804*4882a593Smuzhiyun * @rx_handler_data: XXX: need comments on this one
1805*4882a593Smuzhiyun * @miniq_ingress: ingress/clsact qdisc specific data for
1806*4882a593Smuzhiyun * ingress processing
1807*4882a593Smuzhiyun * @ingress_queue: XXX: need comments on this one
1808*4882a593Smuzhiyun * @nf_hooks_ingress: netfilter hooks executed for ingress packets
1809*4882a593Smuzhiyun * @broadcast: hw bcast address
1810*4882a593Smuzhiyun *
1811*4882a593Smuzhiyun * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
1812*4882a593Smuzhiyun * indexed by RX queue number. Assigned by driver.
1813*4882a593Smuzhiyun * This must only be set if the ndo_rx_flow_steer
1814*4882a593Smuzhiyun * operation is defined
1815*4882a593Smuzhiyun * @index_hlist: Device index hash chain
1816*4882a593Smuzhiyun *
1817*4882a593Smuzhiyun * @_tx: Array of TX queues
1818*4882a593Smuzhiyun * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
1819*4882a593Smuzhiyun * @real_num_tx_queues: Number of TX queues currently active in device
1820*4882a593Smuzhiyun * @qdisc: Root qdisc from userspace point of view
1821*4882a593Smuzhiyun * @tx_queue_len: Max frames per queue allowed
1822*4882a593Smuzhiyun * @tx_global_lock: XXX: need comments on this one
1823*4882a593Smuzhiyun * @xdp_bulkq: XDP device bulk queue
1824*4882a593Smuzhiyun * @xps_cpus_map: all CPUs map for XPS device
1825*4882a593Smuzhiyun * @xps_rxqs_map: all RXQs map for XPS device
1826*4882a593Smuzhiyun *
1827*4882a593Smuzhiyun * @xps_maps: XXX: need comments on this one
1828*4882a593Smuzhiyun * @miniq_egress: clsact qdisc specific data for
1829*4882a593Smuzhiyun * egress processing
1830*4882a593Smuzhiyun * @qdisc_hash: qdisc hash table
1831*4882a593Smuzhiyun * @watchdog_timeo: Represents the timeout that is used by
1832*4882a593Smuzhiyun * the watchdog (see dev_watchdog())
1833*4882a593Smuzhiyun * @watchdog_timer: List of timers
1834*4882a593Smuzhiyun *
1835*4882a593Smuzhiyun * @proto_down_reason: reason a netdev interface is held down
1836*4882a593Smuzhiyun * @pcpu_refcnt: Number of references to this device
1837*4882a593Smuzhiyun * @todo_list: Delayed register/unregister
1838*4882a593Smuzhiyun * @link_watch_list: XXX: need comments on this one
1839*4882a593Smuzhiyun *
1840*4882a593Smuzhiyun * @reg_state: Register/unregister state machine
1841*4882a593Smuzhiyun * @dismantle: Device is going to be freed
1842*4882a593Smuzhiyun * @rtnl_link_state: This enum represents the phases of creating
1843*4882a593Smuzhiyun * a new link
1844*4882a593Smuzhiyun *
1845*4882a593Smuzhiyun * @needs_free_netdev: Should unregister perform free_netdev?
1846*4882a593Smuzhiyun * @priv_destructor: Called from unregister
1847*4882a593Smuzhiyun * @npinfo: XXX: need comments on this one
1848*4882a593Smuzhiyun * @nd_net: Network namespace this network device is inside
1849*4882a593Smuzhiyun *
1850*4882a593Smuzhiyun * @ml_priv: Mid-layer private
1851*4882a593Smuzhiyun * @ml_priv_type: Mid-layer private type
1852*4882a593Smuzhiyun * @lstats: Loopback statistics
1853*4882a593Smuzhiyun * @tstats: Tunnel statistics
1854*4882a593Smuzhiyun * @dstats: Dummy statistics
1855*4882a593Smuzhiyun * @vstats: Virtual ethernet statistics
1856*4882a593Smuzhiyun *
1857*4882a593Smuzhiyun * @garp_port: GARP
1858*4882a593Smuzhiyun * @mrp_port: MRP
1859*4882a593Smuzhiyun *
1860*4882a593Smuzhiyun * @dev: Class/net/name entry
1861*4882a593Smuzhiyun * @sysfs_groups: Space for optional device, statistics and wireless
1862*4882a593Smuzhiyun * sysfs groups
1863*4882a593Smuzhiyun *
1864*4882a593Smuzhiyun * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
1865*4882a593Smuzhiyun * @rtnl_link_ops: Rtnl_link_ops
1866*4882a593Smuzhiyun *
1867*4882a593Smuzhiyun * @gso_max_size: Maximum size of generic segmentation offload
1868*4882a593Smuzhiyun * @gso_max_segs: Maximum number of segments that can be passed to the
1869*4882a593Smuzhiyun * NIC for GSO
1870*4882a593Smuzhiyun *
1871*4882a593Smuzhiyun * @dcbnl_ops: Data Center Bridging netlink ops
1872*4882a593Smuzhiyun * @num_tc: Number of traffic classes in the net device
1873*4882a593Smuzhiyun * @tc_to_txq: XXX: need comments on this one
1874*4882a593Smuzhiyun * @prio_tc_map: XXX: need comments on this one
1875*4882a593Smuzhiyun *
1876*4882a593Smuzhiyun * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
1877*4882a593Smuzhiyun *
1878*4882a593Smuzhiyun * @priomap: XXX: need comments on this one
1879*4882a593Smuzhiyun * @phydev: Physical device may attach itself
1880*4882a593Smuzhiyun * for hardware timestamping
1881*4882a593Smuzhiyun * @sfp_bus: attached &struct sfp_bus structure.
1882*4882a593Smuzhiyun *
1883*4882a593Smuzhiyun * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
1884*4882a593Smuzhiyun * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
1885*4882a593Smuzhiyun *
1886*4882a593Smuzhiyun * @proto_down: protocol port state information can be sent to the
1887*4882a593Smuzhiyun * switch driver and used to set the phys state of the
1888*4882a593Smuzhiyun * switch port.
1889*4882a593Smuzhiyun *
1890*4882a593Smuzhiyun * @wol_enabled: Wake-on-LAN is enabled
1891*4882a593Smuzhiyun *
1892*4882a593Smuzhiyun * @net_notifier_list: List of per-net netdev notifier block
1893*4882a593Smuzhiyun * that follow this device when it is moved
1894*4882a593Smuzhiyun * to another network namespace.
1895*4882a593Smuzhiyun *
1896*4882a593Smuzhiyun * @macsec_ops: MACsec offloading ops
1897*4882a593Smuzhiyun *
1898*4882a593Smuzhiyun * @udp_tunnel_nic_info: static structure describing the UDP tunnel
1899*4882a593Smuzhiyun * offload capabilities of the device
1900*4882a593Smuzhiyun * @udp_tunnel_nic: UDP tunnel offload state
1901*4882a593Smuzhiyun * @xdp_state: stores info on attached XDP BPF programs
1902*4882a593Smuzhiyun *
1903*4882a593Smuzhiyun * @nested_level: Used as as a parameter of spin_lock_nested() of
1904*4882a593Smuzhiyun * dev->addr_list_lock.
1905*4882a593Smuzhiyun * @unlink_list: As netif_addr_lock() can be called recursively,
1906*4882a593Smuzhiyun * keep a list of interfaces to be deleted.
1907*4882a593Smuzhiyun *
1908*4882a593Smuzhiyun * FIXME: cleanup struct net_device such that network protocol info
1909*4882a593Smuzhiyun * moves out.
1910*4882a593Smuzhiyun */
1911*4882a593Smuzhiyun
1912*4882a593Smuzhiyun struct net_device {
1913*4882a593Smuzhiyun char name[IFNAMSIZ];
1914*4882a593Smuzhiyun struct netdev_name_node *name_node;
1915*4882a593Smuzhiyun struct dev_ifalias __rcu *ifalias;
1916*4882a593Smuzhiyun /*
1917*4882a593Smuzhiyun * I/O specific fields
1918*4882a593Smuzhiyun * FIXME: Merge these and struct ifmap into one
1919*4882a593Smuzhiyun */
1920*4882a593Smuzhiyun unsigned long mem_end;
1921*4882a593Smuzhiyun unsigned long mem_start;
1922*4882a593Smuzhiyun unsigned long base_addr;
1923*4882a593Smuzhiyun int irq;
1924*4882a593Smuzhiyun
1925*4882a593Smuzhiyun /*
1926*4882a593Smuzhiyun * Some hardware also needs these fields (state,dev_list,
1927*4882a593Smuzhiyun * napi_list,unreg_list,close_list) but they are not
1928*4882a593Smuzhiyun * part of the usual set specified in Space.c.
1929*4882a593Smuzhiyun */
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun unsigned long state;
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun struct list_head dev_list;
1934*4882a593Smuzhiyun struct list_head napi_list;
1935*4882a593Smuzhiyun struct list_head unreg_list;
1936*4882a593Smuzhiyun struct list_head close_list;
1937*4882a593Smuzhiyun struct list_head ptype_all;
1938*4882a593Smuzhiyun struct list_head ptype_specific;
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyun struct {
1941*4882a593Smuzhiyun struct list_head upper;
1942*4882a593Smuzhiyun struct list_head lower;
1943*4882a593Smuzhiyun } adj_list;
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun netdev_features_t features;
1946*4882a593Smuzhiyun netdev_features_t hw_features;
1947*4882a593Smuzhiyun netdev_features_t wanted_features;
1948*4882a593Smuzhiyun netdev_features_t vlan_features;
1949*4882a593Smuzhiyun netdev_features_t hw_enc_features;
1950*4882a593Smuzhiyun netdev_features_t mpls_features;
1951*4882a593Smuzhiyun netdev_features_t gso_partial_features;
1952*4882a593Smuzhiyun
1953*4882a593Smuzhiyun int ifindex;
1954*4882a593Smuzhiyun int group;
1955*4882a593Smuzhiyun
1956*4882a593Smuzhiyun struct net_device_stats stats;
1957*4882a593Smuzhiyun
1958*4882a593Smuzhiyun atomic_long_t rx_dropped;
1959*4882a593Smuzhiyun atomic_long_t tx_dropped;
1960*4882a593Smuzhiyun atomic_long_t rx_nohandler;
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun /* Stats to monitor link on/off, flapping */
1963*4882a593Smuzhiyun atomic_t carrier_up_count;
1964*4882a593Smuzhiyun atomic_t carrier_down_count;
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyun #ifdef CONFIG_WIRELESS_EXT
1967*4882a593Smuzhiyun const struct iw_handler_def *wireless_handlers;
1968*4882a593Smuzhiyun struct iw_public_data *wireless_data;
1969*4882a593Smuzhiyun #endif
1970*4882a593Smuzhiyun const struct net_device_ops *netdev_ops;
1971*4882a593Smuzhiyun const struct ethtool_ops *ethtool_ops;
1972*4882a593Smuzhiyun #ifdef CONFIG_NET_L3_MASTER_DEV
1973*4882a593Smuzhiyun const struct l3mdev_ops *l3mdev_ops;
1974*4882a593Smuzhiyun #endif
1975*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
1976*4882a593Smuzhiyun const struct ndisc_ops *ndisc_ops;
1977*4882a593Smuzhiyun #endif
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun #ifdef CONFIG_XFRM_OFFLOAD
1980*4882a593Smuzhiyun const struct xfrmdev_ops *xfrmdev_ops;
1981*4882a593Smuzhiyun #endif
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_TLS_DEVICE)
1984*4882a593Smuzhiyun const struct tlsdev_ops *tlsdev_ops;
1985*4882a593Smuzhiyun #endif
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun const struct header_ops *header_ops;
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun unsigned int flags;
1990*4882a593Smuzhiyun unsigned int priv_flags;
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun unsigned short gflags;
1993*4882a593Smuzhiyun unsigned short padded;
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun unsigned char operstate;
1996*4882a593Smuzhiyun unsigned char link_mode;
1997*4882a593Smuzhiyun
1998*4882a593Smuzhiyun unsigned char if_port;
1999*4882a593Smuzhiyun unsigned char dma;
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun /* Note : dev->mtu is often read without holding a lock.
2002*4882a593Smuzhiyun * Writers usually hold RTNL.
2003*4882a593Smuzhiyun * It is recommended to use READ_ONCE() to annotate the reads,
2004*4882a593Smuzhiyun * and to use WRITE_ONCE() to annotate the writes.
2005*4882a593Smuzhiyun */
2006*4882a593Smuzhiyun unsigned int mtu;
2007*4882a593Smuzhiyun unsigned int min_mtu;
2008*4882a593Smuzhiyun unsigned int max_mtu;
2009*4882a593Smuzhiyun unsigned short type;
2010*4882a593Smuzhiyun unsigned short hard_header_len;
2011*4882a593Smuzhiyun unsigned char min_header_len;
2012*4882a593Smuzhiyun unsigned char name_assign_type;
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun unsigned short needed_headroom;
2015*4882a593Smuzhiyun unsigned short needed_tailroom;
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun /* Interface address info. */
2018*4882a593Smuzhiyun unsigned char perm_addr[MAX_ADDR_LEN];
2019*4882a593Smuzhiyun unsigned char addr_assign_type;
2020*4882a593Smuzhiyun unsigned char addr_len;
2021*4882a593Smuzhiyun unsigned char upper_level;
2022*4882a593Smuzhiyun unsigned char lower_level;
2023*4882a593Smuzhiyun
2024*4882a593Smuzhiyun unsigned short neigh_priv_len;
2025*4882a593Smuzhiyun unsigned short dev_id;
2026*4882a593Smuzhiyun unsigned short dev_port;
2027*4882a593Smuzhiyun spinlock_t addr_list_lock;
2028*4882a593Smuzhiyun
2029*4882a593Smuzhiyun struct netdev_hw_addr_list uc;
2030*4882a593Smuzhiyun struct netdev_hw_addr_list mc;
2031*4882a593Smuzhiyun struct netdev_hw_addr_list dev_addrs;
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
2034*4882a593Smuzhiyun struct kset *queues_kset;
2035*4882a593Smuzhiyun #endif
2036*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
2037*4882a593Smuzhiyun struct list_head unlink_list;
2038*4882a593Smuzhiyun #endif
2039*4882a593Smuzhiyun unsigned int promiscuity;
2040*4882a593Smuzhiyun unsigned int allmulti;
2041*4882a593Smuzhiyun bool uc_promisc;
2042*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
2043*4882a593Smuzhiyun unsigned char nested_level;
2044*4882a593Smuzhiyun #endif
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun /* Protocol-specific pointers */
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_VLAN_8021Q)
2050*4882a593Smuzhiyun struct vlan_info __rcu *vlan_info;
2051*4882a593Smuzhiyun #endif
2052*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NET_DSA)
2053*4882a593Smuzhiyun struct dsa_port *dsa_ptr;
2054*4882a593Smuzhiyun #endif
2055*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_TIPC)
2056*4882a593Smuzhiyun struct tipc_bearer __rcu *tipc_ptr;
2057*4882a593Smuzhiyun #endif
2058*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK)
2059*4882a593Smuzhiyun void *atalk_ptr;
2060*4882a593Smuzhiyun #endif
2061*4882a593Smuzhiyun struct in_device __rcu *ip_ptr;
2062*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DECNET)
2063*4882a593Smuzhiyun struct dn_dev __rcu *dn_ptr;
2064*4882a593Smuzhiyun #endif
2065*4882a593Smuzhiyun struct inet6_dev __rcu *ip6_ptr;
2066*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_AX25)
2067*4882a593Smuzhiyun void *ax25_ptr;
2068*4882a593Smuzhiyun #endif
2069*4882a593Smuzhiyun struct wireless_dev *ieee80211_ptr;
2070*4882a593Smuzhiyun struct wpan_dev *ieee802154_ptr;
2071*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_MPLS_ROUTING)
2072*4882a593Smuzhiyun struct mpls_dev __rcu *mpls_ptr;
2073*4882a593Smuzhiyun #endif
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun /*
2076*4882a593Smuzhiyun * Cache lines mostly used on receive path (including eth_type_trans())
2077*4882a593Smuzhiyun */
2078*4882a593Smuzhiyun /* Interface address info used in eth_type_trans() */
2079*4882a593Smuzhiyun unsigned char *dev_addr;
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun struct netdev_rx_queue *_rx;
2082*4882a593Smuzhiyun unsigned int num_rx_queues;
2083*4882a593Smuzhiyun unsigned int real_num_rx_queues;
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun struct bpf_prog __rcu *xdp_prog;
2086*4882a593Smuzhiyun unsigned long gro_flush_timeout;
2087*4882a593Smuzhiyun int napi_defer_hard_irqs;
2088*4882a593Smuzhiyun rx_handler_func_t __rcu *rx_handler;
2089*4882a593Smuzhiyun void __rcu *rx_handler_data;
2090*4882a593Smuzhiyun
2091*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
2092*4882a593Smuzhiyun struct mini_Qdisc __rcu *miniq_ingress;
2093*4882a593Smuzhiyun #endif
2094*4882a593Smuzhiyun struct netdev_queue __rcu *ingress_queue;
2095*4882a593Smuzhiyun #ifdef CONFIG_NETFILTER_INGRESS
2096*4882a593Smuzhiyun struct nf_hook_entries __rcu *nf_hooks_ingress;
2097*4882a593Smuzhiyun #endif
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun unsigned char broadcast[MAX_ADDR_LEN];
2100*4882a593Smuzhiyun #ifdef CONFIG_RFS_ACCEL
2101*4882a593Smuzhiyun struct cpu_rmap *rx_cpu_rmap;
2102*4882a593Smuzhiyun #endif
2103*4882a593Smuzhiyun struct hlist_node index_hlist;
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun /*
2106*4882a593Smuzhiyun * Cache lines mostly used on transmit path
2107*4882a593Smuzhiyun */
2108*4882a593Smuzhiyun struct netdev_queue *_tx ____cacheline_aligned_in_smp;
2109*4882a593Smuzhiyun unsigned int num_tx_queues;
2110*4882a593Smuzhiyun unsigned int real_num_tx_queues;
2111*4882a593Smuzhiyun struct Qdisc __rcu *qdisc;
2112*4882a593Smuzhiyun unsigned int tx_queue_len;
2113*4882a593Smuzhiyun spinlock_t tx_global_lock;
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
2116*4882a593Smuzhiyun
2117*4882a593Smuzhiyun #ifdef CONFIG_XPS
2118*4882a593Smuzhiyun struct xps_dev_maps __rcu *xps_cpus_map;
2119*4882a593Smuzhiyun struct xps_dev_maps __rcu *xps_rxqs_map;
2120*4882a593Smuzhiyun #endif
2121*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
2122*4882a593Smuzhiyun struct mini_Qdisc __rcu *miniq_egress;
2123*4882a593Smuzhiyun #endif
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun #ifdef CONFIG_NET_SCHED
2126*4882a593Smuzhiyun DECLARE_HASHTABLE (qdisc_hash, 4);
2127*4882a593Smuzhiyun #endif
2128*4882a593Smuzhiyun /* These may be needed for future network-power-down code. */
2129*4882a593Smuzhiyun struct timer_list watchdog_timer;
2130*4882a593Smuzhiyun int watchdog_timeo;
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun u32 proto_down_reason;
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun struct list_head todo_list;
2135*4882a593Smuzhiyun int __percpu *pcpu_refcnt;
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun struct list_head link_watch_list;
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun enum { NETREG_UNINITIALIZED=0,
2140*4882a593Smuzhiyun NETREG_REGISTERED, /* completed register_netdevice */
2141*4882a593Smuzhiyun NETREG_UNREGISTERING, /* called unregister_netdevice */
2142*4882a593Smuzhiyun NETREG_UNREGISTERED, /* completed unregister todo */
2143*4882a593Smuzhiyun NETREG_RELEASED, /* called free_netdev */
2144*4882a593Smuzhiyun NETREG_DUMMY, /* dummy device for NAPI poll */
2145*4882a593Smuzhiyun } reg_state:8;
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun bool dismantle;
2148*4882a593Smuzhiyun
2149*4882a593Smuzhiyun enum {
2150*4882a593Smuzhiyun RTNL_LINK_INITIALIZED,
2151*4882a593Smuzhiyun RTNL_LINK_INITIALIZING,
2152*4882a593Smuzhiyun } rtnl_link_state:16;
2153*4882a593Smuzhiyun
2154*4882a593Smuzhiyun bool needs_free_netdev;
2155*4882a593Smuzhiyun void (*priv_destructor)(struct net_device *dev);
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun #ifdef CONFIG_NETPOLL
2158*4882a593Smuzhiyun struct netpoll_info __rcu *npinfo;
2159*4882a593Smuzhiyun #endif
2160*4882a593Smuzhiyun
2161*4882a593Smuzhiyun possible_net_t nd_net;
2162*4882a593Smuzhiyun
2163*4882a593Smuzhiyun /* mid-layer private */
2164*4882a593Smuzhiyun void *ml_priv;
2165*4882a593Smuzhiyun enum netdev_ml_priv_type ml_priv_type;
2166*4882a593Smuzhiyun
2167*4882a593Smuzhiyun union {
2168*4882a593Smuzhiyun struct pcpu_lstats __percpu *lstats;
2169*4882a593Smuzhiyun struct pcpu_sw_netstats __percpu *tstats;
2170*4882a593Smuzhiyun struct pcpu_dstats __percpu *dstats;
2171*4882a593Smuzhiyun };
2172*4882a593Smuzhiyun
2173*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_GARP)
2174*4882a593Smuzhiyun struct garp_port __rcu *garp_port;
2175*4882a593Smuzhiyun #endif
2176*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_MRP)
2177*4882a593Smuzhiyun struct mrp_port __rcu *mrp_port;
2178*4882a593Smuzhiyun #endif
2179*4882a593Smuzhiyun
2180*4882a593Smuzhiyun struct device dev;
2181*4882a593Smuzhiyun const struct attribute_group *sysfs_groups[4];
2182*4882a593Smuzhiyun const struct attribute_group *sysfs_rx_queue_group;
2183*4882a593Smuzhiyun
2184*4882a593Smuzhiyun const struct rtnl_link_ops *rtnl_link_ops;
2185*4882a593Smuzhiyun
2186*4882a593Smuzhiyun /* for setting kernel sock attribute on TCP connection setup */
2187*4882a593Smuzhiyun #define GSO_MAX_SIZE 65536
2188*4882a593Smuzhiyun unsigned int gso_max_size;
2189*4882a593Smuzhiyun #define GSO_MAX_SEGS 65535
2190*4882a593Smuzhiyun u16 gso_max_segs;
2191*4882a593Smuzhiyun
2192*4882a593Smuzhiyun #ifdef CONFIG_DCB
2193*4882a593Smuzhiyun const struct dcbnl_rtnl_ops *dcbnl_ops;
2194*4882a593Smuzhiyun #endif
2195*4882a593Smuzhiyun s16 num_tc;
2196*4882a593Smuzhiyun struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
2197*4882a593Smuzhiyun u8 prio_tc_map[TC_BITMASK + 1];
2198*4882a593Smuzhiyun
2199*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_FCOE)
2200*4882a593Smuzhiyun unsigned int fcoe_ddp_xid;
2201*4882a593Smuzhiyun #endif
2202*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2203*4882a593Smuzhiyun struct netprio_map __rcu *priomap;
2204*4882a593Smuzhiyun #endif
2205*4882a593Smuzhiyun struct phy_device *phydev;
2206*4882a593Smuzhiyun struct sfp_bus *sfp_bus;
2207*4882a593Smuzhiyun struct lock_class_key *qdisc_tx_busylock;
2208*4882a593Smuzhiyun struct lock_class_key *qdisc_running_key;
2209*4882a593Smuzhiyun bool proto_down;
2210*4882a593Smuzhiyun unsigned wol_enabled:1;
2211*4882a593Smuzhiyun
2212*4882a593Smuzhiyun struct list_head net_notifier_list;
2213*4882a593Smuzhiyun
2214*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_MACSEC)
2215*4882a593Smuzhiyun /* MACsec management functions */
2216*4882a593Smuzhiyun const struct macsec_ops *macsec_ops;
2217*4882a593Smuzhiyun #endif
2218*4882a593Smuzhiyun const struct udp_tunnel_nic_info *udp_tunnel_nic_info;
2219*4882a593Smuzhiyun struct udp_tunnel_nic *udp_tunnel_nic;
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun /* protected by rtnl_lock */
2222*4882a593Smuzhiyun struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE];
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
2225*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
2226*4882a593Smuzhiyun ANDROID_KABI_RESERVE(3);
2227*4882a593Smuzhiyun ANDROID_KABI_RESERVE(4);
2228*4882a593Smuzhiyun ANDROID_KABI_RESERVE(5);
2229*4882a593Smuzhiyun ANDROID_KABI_RESERVE(6);
2230*4882a593Smuzhiyun ANDROID_KABI_RESERVE(7);
2231*4882a593Smuzhiyun ANDROID_KABI_RESERVE(8);
2232*4882a593Smuzhiyun };
2233*4882a593Smuzhiyun #define to_net_dev(d) container_of(d, struct net_device, dev)
2234*4882a593Smuzhiyun
netif_elide_gro(const struct net_device * dev)2235*4882a593Smuzhiyun static inline bool netif_elide_gro(const struct net_device *dev)
2236*4882a593Smuzhiyun {
2237*4882a593Smuzhiyun if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
2238*4882a593Smuzhiyun return true;
2239*4882a593Smuzhiyun return false;
2240*4882a593Smuzhiyun }
2241*4882a593Smuzhiyun
2242*4882a593Smuzhiyun #define NETDEV_ALIGN 32
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun static inline
netdev_get_prio_tc_map(const struct net_device * dev,u32 prio)2245*4882a593Smuzhiyun int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
2246*4882a593Smuzhiyun {
2247*4882a593Smuzhiyun return dev->prio_tc_map[prio & TC_BITMASK];
2248*4882a593Smuzhiyun }
2249*4882a593Smuzhiyun
2250*4882a593Smuzhiyun static inline
netdev_set_prio_tc_map(struct net_device * dev,u8 prio,u8 tc)2251*4882a593Smuzhiyun int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
2252*4882a593Smuzhiyun {
2253*4882a593Smuzhiyun if (tc >= dev->num_tc)
2254*4882a593Smuzhiyun return -EINVAL;
2255*4882a593Smuzhiyun
2256*4882a593Smuzhiyun dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
2257*4882a593Smuzhiyun return 0;
2258*4882a593Smuzhiyun }
2259*4882a593Smuzhiyun
2260*4882a593Smuzhiyun int netdev_txq_to_tc(struct net_device *dev, unsigned int txq);
2261*4882a593Smuzhiyun void netdev_reset_tc(struct net_device *dev);
2262*4882a593Smuzhiyun int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset);
2263*4882a593Smuzhiyun int netdev_set_num_tc(struct net_device *dev, u8 num_tc);
2264*4882a593Smuzhiyun
2265*4882a593Smuzhiyun static inline
netdev_get_num_tc(struct net_device * dev)2266*4882a593Smuzhiyun int netdev_get_num_tc(struct net_device *dev)
2267*4882a593Smuzhiyun {
2268*4882a593Smuzhiyun return dev->num_tc;
2269*4882a593Smuzhiyun }
2270*4882a593Smuzhiyun
net_prefetch(void * p)2271*4882a593Smuzhiyun static inline void net_prefetch(void *p)
2272*4882a593Smuzhiyun {
2273*4882a593Smuzhiyun prefetch(p);
2274*4882a593Smuzhiyun #if L1_CACHE_BYTES < 128
2275*4882a593Smuzhiyun prefetch((u8 *)p + L1_CACHE_BYTES);
2276*4882a593Smuzhiyun #endif
2277*4882a593Smuzhiyun }
2278*4882a593Smuzhiyun
net_prefetchw(void * p)2279*4882a593Smuzhiyun static inline void net_prefetchw(void *p)
2280*4882a593Smuzhiyun {
2281*4882a593Smuzhiyun prefetchw(p);
2282*4882a593Smuzhiyun #if L1_CACHE_BYTES < 128
2283*4882a593Smuzhiyun prefetchw((u8 *)p + L1_CACHE_BYTES);
2284*4882a593Smuzhiyun #endif
2285*4882a593Smuzhiyun }
2286*4882a593Smuzhiyun
2287*4882a593Smuzhiyun void netdev_unbind_sb_channel(struct net_device *dev,
2288*4882a593Smuzhiyun struct net_device *sb_dev);
2289*4882a593Smuzhiyun int netdev_bind_sb_channel_queue(struct net_device *dev,
2290*4882a593Smuzhiyun struct net_device *sb_dev,
2291*4882a593Smuzhiyun u8 tc, u16 count, u16 offset);
2292*4882a593Smuzhiyun int netdev_set_sb_channel(struct net_device *dev, u16 channel);
netdev_get_sb_channel(struct net_device * dev)2293*4882a593Smuzhiyun static inline int netdev_get_sb_channel(struct net_device *dev)
2294*4882a593Smuzhiyun {
2295*4882a593Smuzhiyun return max_t(int, -dev->num_tc, 0);
2296*4882a593Smuzhiyun }
2297*4882a593Smuzhiyun
2298*4882a593Smuzhiyun static inline
netdev_get_tx_queue(const struct net_device * dev,unsigned int index)2299*4882a593Smuzhiyun struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
2300*4882a593Smuzhiyun unsigned int index)
2301*4882a593Smuzhiyun {
2302*4882a593Smuzhiyun return &dev->_tx[index];
2303*4882a593Smuzhiyun }
2304*4882a593Smuzhiyun
skb_get_tx_queue(const struct net_device * dev,const struct sk_buff * skb)2305*4882a593Smuzhiyun static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
2306*4882a593Smuzhiyun const struct sk_buff *skb)
2307*4882a593Smuzhiyun {
2308*4882a593Smuzhiyun return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2309*4882a593Smuzhiyun }
2310*4882a593Smuzhiyun
netdev_for_each_tx_queue(struct net_device * dev,void (* f)(struct net_device *,struct netdev_queue *,void *),void * arg)2311*4882a593Smuzhiyun static inline void netdev_for_each_tx_queue(struct net_device *dev,
2312*4882a593Smuzhiyun void (*f)(struct net_device *,
2313*4882a593Smuzhiyun struct netdev_queue *,
2314*4882a593Smuzhiyun void *),
2315*4882a593Smuzhiyun void *arg)
2316*4882a593Smuzhiyun {
2317*4882a593Smuzhiyun unsigned int i;
2318*4882a593Smuzhiyun
2319*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++)
2320*4882a593Smuzhiyun f(dev, &dev->_tx[i], arg);
2321*4882a593Smuzhiyun }
2322*4882a593Smuzhiyun
2323*4882a593Smuzhiyun #define netdev_lockdep_set_classes(dev) \
2324*4882a593Smuzhiyun { \
2325*4882a593Smuzhiyun static struct lock_class_key qdisc_tx_busylock_key; \
2326*4882a593Smuzhiyun static struct lock_class_key qdisc_running_key; \
2327*4882a593Smuzhiyun static struct lock_class_key qdisc_xmit_lock_key; \
2328*4882a593Smuzhiyun static struct lock_class_key dev_addr_list_lock_key; \
2329*4882a593Smuzhiyun unsigned int i; \
2330*4882a593Smuzhiyun \
2331*4882a593Smuzhiyun (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \
2332*4882a593Smuzhiyun (dev)->qdisc_running_key = &qdisc_running_key; \
2333*4882a593Smuzhiyun lockdep_set_class(&(dev)->addr_list_lock, \
2334*4882a593Smuzhiyun &dev_addr_list_lock_key); \
2335*4882a593Smuzhiyun for (i = 0; i < (dev)->num_tx_queues; i++) \
2336*4882a593Smuzhiyun lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \
2337*4882a593Smuzhiyun &qdisc_xmit_lock_key); \
2338*4882a593Smuzhiyun }
2339*4882a593Smuzhiyun
2340*4882a593Smuzhiyun u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
2341*4882a593Smuzhiyun struct net_device *sb_dev);
2342*4882a593Smuzhiyun struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
2343*4882a593Smuzhiyun struct sk_buff *skb,
2344*4882a593Smuzhiyun struct net_device *sb_dev);
2345*4882a593Smuzhiyun
2346*4882a593Smuzhiyun /* returns the headroom that the master device needs to take in account
2347*4882a593Smuzhiyun * when forwarding to this dev
2348*4882a593Smuzhiyun */
netdev_get_fwd_headroom(struct net_device * dev)2349*4882a593Smuzhiyun static inline unsigned netdev_get_fwd_headroom(struct net_device *dev)
2350*4882a593Smuzhiyun {
2351*4882a593Smuzhiyun return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom;
2352*4882a593Smuzhiyun }
2353*4882a593Smuzhiyun
netdev_set_rx_headroom(struct net_device * dev,int new_hr)2354*4882a593Smuzhiyun static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr)
2355*4882a593Smuzhiyun {
2356*4882a593Smuzhiyun if (dev->netdev_ops->ndo_set_rx_headroom)
2357*4882a593Smuzhiyun dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr);
2358*4882a593Smuzhiyun }
2359*4882a593Smuzhiyun
2360*4882a593Smuzhiyun /* set the device rx headroom to the dev's default */
netdev_reset_rx_headroom(struct net_device * dev)2361*4882a593Smuzhiyun static inline void netdev_reset_rx_headroom(struct net_device *dev)
2362*4882a593Smuzhiyun {
2363*4882a593Smuzhiyun netdev_set_rx_headroom(dev, -1);
2364*4882a593Smuzhiyun }
2365*4882a593Smuzhiyun
netdev_get_ml_priv(struct net_device * dev,enum netdev_ml_priv_type type)2366*4882a593Smuzhiyun static inline void *netdev_get_ml_priv(struct net_device *dev,
2367*4882a593Smuzhiyun enum netdev_ml_priv_type type)
2368*4882a593Smuzhiyun {
2369*4882a593Smuzhiyun if (dev->ml_priv_type != type)
2370*4882a593Smuzhiyun return NULL;
2371*4882a593Smuzhiyun
2372*4882a593Smuzhiyun return dev->ml_priv;
2373*4882a593Smuzhiyun }
2374*4882a593Smuzhiyun
netdev_set_ml_priv(struct net_device * dev,void * ml_priv,enum netdev_ml_priv_type type)2375*4882a593Smuzhiyun static inline void netdev_set_ml_priv(struct net_device *dev,
2376*4882a593Smuzhiyun void *ml_priv,
2377*4882a593Smuzhiyun enum netdev_ml_priv_type type)
2378*4882a593Smuzhiyun {
2379*4882a593Smuzhiyun WARN(dev->ml_priv_type && dev->ml_priv_type != type,
2380*4882a593Smuzhiyun "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
2381*4882a593Smuzhiyun dev->ml_priv_type, type);
2382*4882a593Smuzhiyun WARN(!dev->ml_priv_type && dev->ml_priv,
2383*4882a593Smuzhiyun "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
2384*4882a593Smuzhiyun
2385*4882a593Smuzhiyun dev->ml_priv = ml_priv;
2386*4882a593Smuzhiyun dev->ml_priv_type = type;
2387*4882a593Smuzhiyun }
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun /*
2390*4882a593Smuzhiyun * Net namespace inlines
2391*4882a593Smuzhiyun */
2392*4882a593Smuzhiyun static inline
dev_net(const struct net_device * dev)2393*4882a593Smuzhiyun struct net *dev_net(const struct net_device *dev)
2394*4882a593Smuzhiyun {
2395*4882a593Smuzhiyun return read_pnet(&dev->nd_net);
2396*4882a593Smuzhiyun }
2397*4882a593Smuzhiyun
2398*4882a593Smuzhiyun static inline
dev_net_set(struct net_device * dev,struct net * net)2399*4882a593Smuzhiyun void dev_net_set(struct net_device *dev, struct net *net)
2400*4882a593Smuzhiyun {
2401*4882a593Smuzhiyun write_pnet(&dev->nd_net, net);
2402*4882a593Smuzhiyun }
2403*4882a593Smuzhiyun
2404*4882a593Smuzhiyun /**
2405*4882a593Smuzhiyun * netdev_priv - access network device private data
2406*4882a593Smuzhiyun * @dev: network device
2407*4882a593Smuzhiyun *
2408*4882a593Smuzhiyun * Get network device private data
2409*4882a593Smuzhiyun */
netdev_priv(const struct net_device * dev)2410*4882a593Smuzhiyun static inline void *netdev_priv(const struct net_device *dev)
2411*4882a593Smuzhiyun {
2412*4882a593Smuzhiyun return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
2413*4882a593Smuzhiyun }
2414*4882a593Smuzhiyun
2415*4882a593Smuzhiyun /* Set the sysfs physical device reference for the network logical device
2416*4882a593Smuzhiyun * if set prior to registration will cause a symlink during initialization.
2417*4882a593Smuzhiyun */
2418*4882a593Smuzhiyun #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
2419*4882a593Smuzhiyun
2420*4882a593Smuzhiyun /* Set the sysfs device type for the network logical device to allow
2421*4882a593Smuzhiyun * fine-grained identification of different network device types. For
2422*4882a593Smuzhiyun * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
2423*4882a593Smuzhiyun */
2424*4882a593Smuzhiyun #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
2425*4882a593Smuzhiyun
2426*4882a593Smuzhiyun /* Default NAPI poll() weight
2427*4882a593Smuzhiyun * Device drivers are strongly advised to not use bigger value
2428*4882a593Smuzhiyun */
2429*4882a593Smuzhiyun #define NAPI_POLL_WEIGHT 64
2430*4882a593Smuzhiyun
2431*4882a593Smuzhiyun /**
2432*4882a593Smuzhiyun * netif_napi_add - initialize a NAPI context
2433*4882a593Smuzhiyun * @dev: network device
2434*4882a593Smuzhiyun * @napi: NAPI context
2435*4882a593Smuzhiyun * @poll: polling function
2436*4882a593Smuzhiyun * @weight: default weight
2437*4882a593Smuzhiyun *
2438*4882a593Smuzhiyun * netif_napi_add() must be used to initialize a NAPI context prior to calling
2439*4882a593Smuzhiyun * *any* of the other NAPI-related functions.
2440*4882a593Smuzhiyun */
2441*4882a593Smuzhiyun void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2442*4882a593Smuzhiyun int (*poll)(struct napi_struct *, int), int weight);
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun /**
2445*4882a593Smuzhiyun * netif_tx_napi_add - initialize a NAPI context
2446*4882a593Smuzhiyun * @dev: network device
2447*4882a593Smuzhiyun * @napi: NAPI context
2448*4882a593Smuzhiyun * @poll: polling function
2449*4882a593Smuzhiyun * @weight: default weight
2450*4882a593Smuzhiyun *
2451*4882a593Smuzhiyun * This variant of netif_napi_add() should be used from drivers using NAPI
2452*4882a593Smuzhiyun * to exclusively poll a TX queue.
2453*4882a593Smuzhiyun * This will avoid we add it into napi_hash[], thus polluting this hash table.
2454*4882a593Smuzhiyun */
netif_tx_napi_add(struct net_device * dev,struct napi_struct * napi,int (* poll)(struct napi_struct *,int),int weight)2455*4882a593Smuzhiyun static inline void netif_tx_napi_add(struct net_device *dev,
2456*4882a593Smuzhiyun struct napi_struct *napi,
2457*4882a593Smuzhiyun int (*poll)(struct napi_struct *, int),
2458*4882a593Smuzhiyun int weight)
2459*4882a593Smuzhiyun {
2460*4882a593Smuzhiyun set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
2461*4882a593Smuzhiyun netif_napi_add(dev, napi, poll, weight);
2462*4882a593Smuzhiyun }
2463*4882a593Smuzhiyun
2464*4882a593Smuzhiyun /**
2465*4882a593Smuzhiyun * __netif_napi_del - remove a NAPI context
2466*4882a593Smuzhiyun * @napi: NAPI context
2467*4882a593Smuzhiyun *
2468*4882a593Smuzhiyun * Warning: caller must observe RCU grace period before freeing memory
2469*4882a593Smuzhiyun * containing @napi. Drivers might want to call this helper to combine
2470*4882a593Smuzhiyun * all the needed RCU grace periods into a single one.
2471*4882a593Smuzhiyun */
2472*4882a593Smuzhiyun void __netif_napi_del(struct napi_struct *napi);
2473*4882a593Smuzhiyun
2474*4882a593Smuzhiyun /**
2475*4882a593Smuzhiyun * netif_napi_del - remove a NAPI context
2476*4882a593Smuzhiyun * @napi: NAPI context
2477*4882a593Smuzhiyun *
2478*4882a593Smuzhiyun * netif_napi_del() removes a NAPI context from the network device NAPI list
2479*4882a593Smuzhiyun */
netif_napi_del(struct napi_struct * napi)2480*4882a593Smuzhiyun static inline void netif_napi_del(struct napi_struct *napi)
2481*4882a593Smuzhiyun {
2482*4882a593Smuzhiyun __netif_napi_del(napi);
2483*4882a593Smuzhiyun synchronize_net();
2484*4882a593Smuzhiyun }
2485*4882a593Smuzhiyun
2486*4882a593Smuzhiyun struct napi_gro_cb {
2487*4882a593Smuzhiyun /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
2488*4882a593Smuzhiyun void *frag0;
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun /* Length of frag0. */
2491*4882a593Smuzhiyun unsigned int frag0_len;
2492*4882a593Smuzhiyun
2493*4882a593Smuzhiyun /* This indicates where we are processing relative to skb->data. */
2494*4882a593Smuzhiyun int data_offset;
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun /* This is non-zero if the packet cannot be merged with the new skb. */
2497*4882a593Smuzhiyun u16 flush;
2498*4882a593Smuzhiyun
2499*4882a593Smuzhiyun /* Save the IP ID here and check when we get to the transport layer */
2500*4882a593Smuzhiyun u16 flush_id;
2501*4882a593Smuzhiyun
2502*4882a593Smuzhiyun /* Number of segments aggregated. */
2503*4882a593Smuzhiyun u16 count;
2504*4882a593Smuzhiyun
2505*4882a593Smuzhiyun /* Start offset for remote checksum offload */
2506*4882a593Smuzhiyun u16 gro_remcsum_start;
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun /* jiffies when first packet was created/queued */
2509*4882a593Smuzhiyun unsigned long age;
2510*4882a593Smuzhiyun
2511*4882a593Smuzhiyun /* Used in ipv6_gro_receive() and foo-over-udp */
2512*4882a593Smuzhiyun u16 proto;
2513*4882a593Smuzhiyun
2514*4882a593Smuzhiyun /* This is non-zero if the packet may be of the same flow. */
2515*4882a593Smuzhiyun u8 same_flow:1;
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun /* Used in tunnel GRO receive */
2518*4882a593Smuzhiyun u8 encap_mark:1;
2519*4882a593Smuzhiyun
2520*4882a593Smuzhiyun /* GRO checksum is valid */
2521*4882a593Smuzhiyun u8 csum_valid:1;
2522*4882a593Smuzhiyun
2523*4882a593Smuzhiyun /* Number of checksums via CHECKSUM_UNNECESSARY */
2524*4882a593Smuzhiyun u8 csum_cnt:3;
2525*4882a593Smuzhiyun
2526*4882a593Smuzhiyun /* Free the skb? */
2527*4882a593Smuzhiyun u8 free:2;
2528*4882a593Smuzhiyun #define NAPI_GRO_FREE 1
2529*4882a593Smuzhiyun #define NAPI_GRO_FREE_STOLEN_HEAD 2
2530*4882a593Smuzhiyun
2531*4882a593Smuzhiyun /* Used in foo-over-udp, set in udp[46]_gro_receive */
2532*4882a593Smuzhiyun u8 is_ipv6:1;
2533*4882a593Smuzhiyun
2534*4882a593Smuzhiyun /* Used in GRE, set in fou/gue_gro_receive */
2535*4882a593Smuzhiyun u8 is_fou:1;
2536*4882a593Smuzhiyun
2537*4882a593Smuzhiyun /* Used to determine if flush_id can be ignored */
2538*4882a593Smuzhiyun u8 is_atomic:1;
2539*4882a593Smuzhiyun
2540*4882a593Smuzhiyun /* Number of gro_receive callbacks this packet already went through */
2541*4882a593Smuzhiyun u8 recursion_counter:4;
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun /* GRO is done by frag_list pointer chaining. */
2544*4882a593Smuzhiyun u8 is_flist:1;
2545*4882a593Smuzhiyun
2546*4882a593Smuzhiyun /* used to support CHECKSUM_COMPLETE for tunneling protocols */
2547*4882a593Smuzhiyun __wsum csum;
2548*4882a593Smuzhiyun
2549*4882a593Smuzhiyun /* used in skb_gro_receive() slow path */
2550*4882a593Smuzhiyun struct sk_buff *last;
2551*4882a593Smuzhiyun };
2552*4882a593Smuzhiyun
2553*4882a593Smuzhiyun #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
2554*4882a593Smuzhiyun
2555*4882a593Smuzhiyun #define GRO_RECURSION_LIMIT 15
gro_recursion_inc_test(struct sk_buff * skb)2556*4882a593Smuzhiyun static inline int gro_recursion_inc_test(struct sk_buff *skb)
2557*4882a593Smuzhiyun {
2558*4882a593Smuzhiyun return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2559*4882a593Smuzhiyun }
2560*4882a593Smuzhiyun
2561*4882a593Smuzhiyun typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
call_gro_receive(gro_receive_t cb,struct list_head * head,struct sk_buff * skb)2562*4882a593Smuzhiyun static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2563*4882a593Smuzhiyun struct list_head *head,
2564*4882a593Smuzhiyun struct sk_buff *skb)
2565*4882a593Smuzhiyun {
2566*4882a593Smuzhiyun if (unlikely(gro_recursion_inc_test(skb))) {
2567*4882a593Smuzhiyun NAPI_GRO_CB(skb)->flush |= 1;
2568*4882a593Smuzhiyun return NULL;
2569*4882a593Smuzhiyun }
2570*4882a593Smuzhiyun
2571*4882a593Smuzhiyun return cb(head, skb);
2572*4882a593Smuzhiyun }
2573*4882a593Smuzhiyun
2574*4882a593Smuzhiyun typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2575*4882a593Smuzhiyun struct sk_buff *);
call_gro_receive_sk(gro_receive_sk_t cb,struct sock * sk,struct list_head * head,struct sk_buff * skb)2576*4882a593Smuzhiyun static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2577*4882a593Smuzhiyun struct sock *sk,
2578*4882a593Smuzhiyun struct list_head *head,
2579*4882a593Smuzhiyun struct sk_buff *skb)
2580*4882a593Smuzhiyun {
2581*4882a593Smuzhiyun if (unlikely(gro_recursion_inc_test(skb))) {
2582*4882a593Smuzhiyun NAPI_GRO_CB(skb)->flush |= 1;
2583*4882a593Smuzhiyun return NULL;
2584*4882a593Smuzhiyun }
2585*4882a593Smuzhiyun
2586*4882a593Smuzhiyun return cb(sk, head, skb);
2587*4882a593Smuzhiyun }
2588*4882a593Smuzhiyun
2589*4882a593Smuzhiyun struct packet_type {
2590*4882a593Smuzhiyun __be16 type; /* This is really htons(ether_type). */
2591*4882a593Smuzhiyun bool ignore_outgoing;
2592*4882a593Smuzhiyun struct net_device *dev; /* NULL is wildcarded here */
2593*4882a593Smuzhiyun int (*func) (struct sk_buff *,
2594*4882a593Smuzhiyun struct net_device *,
2595*4882a593Smuzhiyun struct packet_type *,
2596*4882a593Smuzhiyun struct net_device *);
2597*4882a593Smuzhiyun void (*list_func) (struct list_head *,
2598*4882a593Smuzhiyun struct packet_type *,
2599*4882a593Smuzhiyun struct net_device *);
2600*4882a593Smuzhiyun bool (*id_match)(struct packet_type *ptype,
2601*4882a593Smuzhiyun struct sock *sk);
2602*4882a593Smuzhiyun struct net *af_packet_net;
2603*4882a593Smuzhiyun void *af_packet_priv;
2604*4882a593Smuzhiyun struct list_head list;
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
2607*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
2608*4882a593Smuzhiyun ANDROID_KABI_RESERVE(3);
2609*4882a593Smuzhiyun ANDROID_KABI_RESERVE(4);
2610*4882a593Smuzhiyun };
2611*4882a593Smuzhiyun
2612*4882a593Smuzhiyun struct offload_callbacks {
2613*4882a593Smuzhiyun struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2614*4882a593Smuzhiyun netdev_features_t features);
2615*4882a593Smuzhiyun struct sk_buff *(*gro_receive)(struct list_head *head,
2616*4882a593Smuzhiyun struct sk_buff *skb);
2617*4882a593Smuzhiyun int (*gro_complete)(struct sk_buff *skb, int nhoff);
2618*4882a593Smuzhiyun };
2619*4882a593Smuzhiyun
2620*4882a593Smuzhiyun struct packet_offload {
2621*4882a593Smuzhiyun __be16 type; /* This is really htons(ether_type). */
2622*4882a593Smuzhiyun u16 priority;
2623*4882a593Smuzhiyun struct offload_callbacks callbacks;
2624*4882a593Smuzhiyun struct list_head list;
2625*4882a593Smuzhiyun };
2626*4882a593Smuzhiyun
2627*4882a593Smuzhiyun /* often modified stats are per-CPU, other are shared (netdev->stats) */
2628*4882a593Smuzhiyun struct pcpu_sw_netstats {
2629*4882a593Smuzhiyun u64 rx_packets;
2630*4882a593Smuzhiyun u64 rx_bytes;
2631*4882a593Smuzhiyun u64 tx_packets;
2632*4882a593Smuzhiyun u64 tx_bytes;
2633*4882a593Smuzhiyun struct u64_stats_sync syncp;
2634*4882a593Smuzhiyun } __aligned(4 * sizeof(u64));
2635*4882a593Smuzhiyun
2636*4882a593Smuzhiyun struct pcpu_lstats {
2637*4882a593Smuzhiyun u64_stats_t packets;
2638*4882a593Smuzhiyun u64_stats_t bytes;
2639*4882a593Smuzhiyun struct u64_stats_sync syncp;
2640*4882a593Smuzhiyun } __aligned(2 * sizeof(u64));
2641*4882a593Smuzhiyun
2642*4882a593Smuzhiyun void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
2643*4882a593Smuzhiyun
dev_sw_netstats_rx_add(struct net_device * dev,unsigned int len)2644*4882a593Smuzhiyun static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
2645*4882a593Smuzhiyun {
2646*4882a593Smuzhiyun struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
2647*4882a593Smuzhiyun
2648*4882a593Smuzhiyun u64_stats_update_begin(&tstats->syncp);
2649*4882a593Smuzhiyun tstats->rx_bytes += len;
2650*4882a593Smuzhiyun tstats->rx_packets++;
2651*4882a593Smuzhiyun u64_stats_update_end(&tstats->syncp);
2652*4882a593Smuzhiyun }
2653*4882a593Smuzhiyun
dev_lstats_add(struct net_device * dev,unsigned int len)2654*4882a593Smuzhiyun static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
2655*4882a593Smuzhiyun {
2656*4882a593Smuzhiyun struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
2657*4882a593Smuzhiyun
2658*4882a593Smuzhiyun u64_stats_update_begin(&lstats->syncp);
2659*4882a593Smuzhiyun u64_stats_add(&lstats->bytes, len);
2660*4882a593Smuzhiyun u64_stats_inc(&lstats->packets);
2661*4882a593Smuzhiyun u64_stats_update_end(&lstats->syncp);
2662*4882a593Smuzhiyun }
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun #define __netdev_alloc_pcpu_stats(type, gfp) \
2665*4882a593Smuzhiyun ({ \
2666*4882a593Smuzhiyun typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
2667*4882a593Smuzhiyun if (pcpu_stats) { \
2668*4882a593Smuzhiyun int __cpu; \
2669*4882a593Smuzhiyun for_each_possible_cpu(__cpu) { \
2670*4882a593Smuzhiyun typeof(type) *stat; \
2671*4882a593Smuzhiyun stat = per_cpu_ptr(pcpu_stats, __cpu); \
2672*4882a593Smuzhiyun u64_stats_init(&stat->syncp); \
2673*4882a593Smuzhiyun } \
2674*4882a593Smuzhiyun } \
2675*4882a593Smuzhiyun pcpu_stats; \
2676*4882a593Smuzhiyun })
2677*4882a593Smuzhiyun
2678*4882a593Smuzhiyun #define netdev_alloc_pcpu_stats(type) \
2679*4882a593Smuzhiyun __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
2680*4882a593Smuzhiyun
2681*4882a593Smuzhiyun enum netdev_lag_tx_type {
2682*4882a593Smuzhiyun NETDEV_LAG_TX_TYPE_UNKNOWN,
2683*4882a593Smuzhiyun NETDEV_LAG_TX_TYPE_RANDOM,
2684*4882a593Smuzhiyun NETDEV_LAG_TX_TYPE_BROADCAST,
2685*4882a593Smuzhiyun NETDEV_LAG_TX_TYPE_ROUNDROBIN,
2686*4882a593Smuzhiyun NETDEV_LAG_TX_TYPE_ACTIVEBACKUP,
2687*4882a593Smuzhiyun NETDEV_LAG_TX_TYPE_HASH,
2688*4882a593Smuzhiyun };
2689*4882a593Smuzhiyun
2690*4882a593Smuzhiyun enum netdev_lag_hash {
2691*4882a593Smuzhiyun NETDEV_LAG_HASH_NONE,
2692*4882a593Smuzhiyun NETDEV_LAG_HASH_L2,
2693*4882a593Smuzhiyun NETDEV_LAG_HASH_L34,
2694*4882a593Smuzhiyun NETDEV_LAG_HASH_L23,
2695*4882a593Smuzhiyun NETDEV_LAG_HASH_E23,
2696*4882a593Smuzhiyun NETDEV_LAG_HASH_E34,
2697*4882a593Smuzhiyun NETDEV_LAG_HASH_UNKNOWN,
2698*4882a593Smuzhiyun };
2699*4882a593Smuzhiyun
2700*4882a593Smuzhiyun struct netdev_lag_upper_info {
2701*4882a593Smuzhiyun enum netdev_lag_tx_type tx_type;
2702*4882a593Smuzhiyun enum netdev_lag_hash hash_type;
2703*4882a593Smuzhiyun };
2704*4882a593Smuzhiyun
2705*4882a593Smuzhiyun struct netdev_lag_lower_state_info {
2706*4882a593Smuzhiyun u8 link_up : 1,
2707*4882a593Smuzhiyun tx_enabled : 1;
2708*4882a593Smuzhiyun };
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun #include <linux/notifier.h>
2711*4882a593Smuzhiyun
2712*4882a593Smuzhiyun /* netdevice notifier chain. Please remember to update netdev_cmd_to_name()
2713*4882a593Smuzhiyun * and the rtnetlink notification exclusion list in rtnetlink_event() when
2714*4882a593Smuzhiyun * adding new types.
2715*4882a593Smuzhiyun */
2716*4882a593Smuzhiyun enum netdev_cmd {
2717*4882a593Smuzhiyun NETDEV_UP = 1, /* For now you can't veto a device up/down */
2718*4882a593Smuzhiyun NETDEV_DOWN,
2719*4882a593Smuzhiyun NETDEV_REBOOT, /* Tell a protocol stack a network interface
2720*4882a593Smuzhiyun detected a hardware crash and restarted
2721*4882a593Smuzhiyun - we can use this eg to kick tcp sessions
2722*4882a593Smuzhiyun once done */
2723*4882a593Smuzhiyun NETDEV_CHANGE, /* Notify device state change */
2724*4882a593Smuzhiyun NETDEV_REGISTER,
2725*4882a593Smuzhiyun NETDEV_UNREGISTER,
2726*4882a593Smuzhiyun NETDEV_CHANGEMTU, /* notify after mtu change happened */
2727*4882a593Smuzhiyun NETDEV_CHANGEADDR, /* notify after the address change */
2728*4882a593Smuzhiyun NETDEV_PRE_CHANGEADDR, /* notify before the address change */
2729*4882a593Smuzhiyun NETDEV_GOING_DOWN,
2730*4882a593Smuzhiyun NETDEV_CHANGENAME,
2731*4882a593Smuzhiyun NETDEV_FEAT_CHANGE,
2732*4882a593Smuzhiyun NETDEV_BONDING_FAILOVER,
2733*4882a593Smuzhiyun NETDEV_PRE_UP,
2734*4882a593Smuzhiyun NETDEV_PRE_TYPE_CHANGE,
2735*4882a593Smuzhiyun NETDEV_POST_TYPE_CHANGE,
2736*4882a593Smuzhiyun NETDEV_POST_INIT,
2737*4882a593Smuzhiyun NETDEV_RELEASE,
2738*4882a593Smuzhiyun NETDEV_NOTIFY_PEERS,
2739*4882a593Smuzhiyun NETDEV_JOIN,
2740*4882a593Smuzhiyun NETDEV_CHANGEUPPER,
2741*4882a593Smuzhiyun NETDEV_RESEND_IGMP,
2742*4882a593Smuzhiyun NETDEV_PRECHANGEMTU, /* notify before mtu change happened */
2743*4882a593Smuzhiyun NETDEV_CHANGEINFODATA,
2744*4882a593Smuzhiyun NETDEV_BONDING_INFO,
2745*4882a593Smuzhiyun NETDEV_PRECHANGEUPPER,
2746*4882a593Smuzhiyun NETDEV_CHANGELOWERSTATE,
2747*4882a593Smuzhiyun NETDEV_UDP_TUNNEL_PUSH_INFO,
2748*4882a593Smuzhiyun NETDEV_UDP_TUNNEL_DROP_INFO,
2749*4882a593Smuzhiyun NETDEV_CHANGE_TX_QUEUE_LEN,
2750*4882a593Smuzhiyun NETDEV_CVLAN_FILTER_PUSH_INFO,
2751*4882a593Smuzhiyun NETDEV_CVLAN_FILTER_DROP_INFO,
2752*4882a593Smuzhiyun NETDEV_SVLAN_FILTER_PUSH_INFO,
2753*4882a593Smuzhiyun NETDEV_SVLAN_FILTER_DROP_INFO,
2754*4882a593Smuzhiyun };
2755*4882a593Smuzhiyun const char *netdev_cmd_to_name(enum netdev_cmd cmd);
2756*4882a593Smuzhiyun
2757*4882a593Smuzhiyun int register_netdevice_notifier(struct notifier_block *nb);
2758*4882a593Smuzhiyun int unregister_netdevice_notifier(struct notifier_block *nb);
2759*4882a593Smuzhiyun int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
2760*4882a593Smuzhiyun int unregister_netdevice_notifier_net(struct net *net,
2761*4882a593Smuzhiyun struct notifier_block *nb);
2762*4882a593Smuzhiyun int register_netdevice_notifier_dev_net(struct net_device *dev,
2763*4882a593Smuzhiyun struct notifier_block *nb,
2764*4882a593Smuzhiyun struct netdev_net_notifier *nn);
2765*4882a593Smuzhiyun int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2766*4882a593Smuzhiyun struct notifier_block *nb,
2767*4882a593Smuzhiyun struct netdev_net_notifier *nn);
2768*4882a593Smuzhiyun
2769*4882a593Smuzhiyun struct netdev_notifier_info {
2770*4882a593Smuzhiyun struct net_device *dev;
2771*4882a593Smuzhiyun struct netlink_ext_ack *extack;
2772*4882a593Smuzhiyun };
2773*4882a593Smuzhiyun
2774*4882a593Smuzhiyun struct netdev_notifier_info_ext {
2775*4882a593Smuzhiyun struct netdev_notifier_info info; /* must be first */
2776*4882a593Smuzhiyun union {
2777*4882a593Smuzhiyun u32 mtu;
2778*4882a593Smuzhiyun } ext;
2779*4882a593Smuzhiyun };
2780*4882a593Smuzhiyun
2781*4882a593Smuzhiyun struct netdev_notifier_change_info {
2782*4882a593Smuzhiyun struct netdev_notifier_info info; /* must be first */
2783*4882a593Smuzhiyun unsigned int flags_changed;
2784*4882a593Smuzhiyun };
2785*4882a593Smuzhiyun
2786*4882a593Smuzhiyun struct netdev_notifier_changeupper_info {
2787*4882a593Smuzhiyun struct netdev_notifier_info info; /* must be first */
2788*4882a593Smuzhiyun struct net_device *upper_dev; /* new upper dev */
2789*4882a593Smuzhiyun bool master; /* is upper dev master */
2790*4882a593Smuzhiyun bool linking; /* is the notification for link or unlink */
2791*4882a593Smuzhiyun void *upper_info; /* upper dev info */
2792*4882a593Smuzhiyun };
2793*4882a593Smuzhiyun
2794*4882a593Smuzhiyun struct netdev_notifier_changelowerstate_info {
2795*4882a593Smuzhiyun struct netdev_notifier_info info; /* must be first */
2796*4882a593Smuzhiyun void *lower_state_info; /* is lower dev state */
2797*4882a593Smuzhiyun };
2798*4882a593Smuzhiyun
2799*4882a593Smuzhiyun struct netdev_notifier_pre_changeaddr_info {
2800*4882a593Smuzhiyun struct netdev_notifier_info info; /* must be first */
2801*4882a593Smuzhiyun const unsigned char *dev_addr;
2802*4882a593Smuzhiyun };
2803*4882a593Smuzhiyun
netdev_notifier_info_init(struct netdev_notifier_info * info,struct net_device * dev)2804*4882a593Smuzhiyun static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
2805*4882a593Smuzhiyun struct net_device *dev)
2806*4882a593Smuzhiyun {
2807*4882a593Smuzhiyun info->dev = dev;
2808*4882a593Smuzhiyun info->extack = NULL;
2809*4882a593Smuzhiyun }
2810*4882a593Smuzhiyun
2811*4882a593Smuzhiyun static inline struct net_device *
netdev_notifier_info_to_dev(const struct netdev_notifier_info * info)2812*4882a593Smuzhiyun netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
2813*4882a593Smuzhiyun {
2814*4882a593Smuzhiyun return info->dev;
2815*4882a593Smuzhiyun }
2816*4882a593Smuzhiyun
2817*4882a593Smuzhiyun static inline struct netlink_ext_ack *
netdev_notifier_info_to_extack(const struct netdev_notifier_info * info)2818*4882a593Smuzhiyun netdev_notifier_info_to_extack(const struct netdev_notifier_info *info)
2819*4882a593Smuzhiyun {
2820*4882a593Smuzhiyun return info->extack;
2821*4882a593Smuzhiyun }
2822*4882a593Smuzhiyun
2823*4882a593Smuzhiyun int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
2824*4882a593Smuzhiyun
2825*4882a593Smuzhiyun
2826*4882a593Smuzhiyun extern rwlock_t dev_base_lock; /* Device list lock */
2827*4882a593Smuzhiyun
2828*4882a593Smuzhiyun #define for_each_netdev(net, d) \
2829*4882a593Smuzhiyun list_for_each_entry(d, &(net)->dev_base_head, dev_list)
2830*4882a593Smuzhiyun #define for_each_netdev_reverse(net, d) \
2831*4882a593Smuzhiyun list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
2832*4882a593Smuzhiyun #define for_each_netdev_rcu(net, d) \
2833*4882a593Smuzhiyun list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
2834*4882a593Smuzhiyun #define for_each_netdev_safe(net, d, n) \
2835*4882a593Smuzhiyun list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
2836*4882a593Smuzhiyun #define for_each_netdev_continue(net, d) \
2837*4882a593Smuzhiyun list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
2838*4882a593Smuzhiyun #define for_each_netdev_continue_reverse(net, d) \
2839*4882a593Smuzhiyun list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
2840*4882a593Smuzhiyun dev_list)
2841*4882a593Smuzhiyun #define for_each_netdev_continue_rcu(net, d) \
2842*4882a593Smuzhiyun list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
2843*4882a593Smuzhiyun #define for_each_netdev_in_bond_rcu(bond, slave) \
2844*4882a593Smuzhiyun for_each_netdev_rcu(&init_net, slave) \
2845*4882a593Smuzhiyun if (netdev_master_upper_dev_get_rcu(slave) == (bond))
2846*4882a593Smuzhiyun #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
2847*4882a593Smuzhiyun
next_net_device(struct net_device * dev)2848*4882a593Smuzhiyun static inline struct net_device *next_net_device(struct net_device *dev)
2849*4882a593Smuzhiyun {
2850*4882a593Smuzhiyun struct list_head *lh;
2851*4882a593Smuzhiyun struct net *net;
2852*4882a593Smuzhiyun
2853*4882a593Smuzhiyun net = dev_net(dev);
2854*4882a593Smuzhiyun lh = dev->dev_list.next;
2855*4882a593Smuzhiyun return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2856*4882a593Smuzhiyun }
2857*4882a593Smuzhiyun
next_net_device_rcu(struct net_device * dev)2858*4882a593Smuzhiyun static inline struct net_device *next_net_device_rcu(struct net_device *dev)
2859*4882a593Smuzhiyun {
2860*4882a593Smuzhiyun struct list_head *lh;
2861*4882a593Smuzhiyun struct net *net;
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun net = dev_net(dev);
2864*4882a593Smuzhiyun lh = rcu_dereference(list_next_rcu(&dev->dev_list));
2865*4882a593Smuzhiyun return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2866*4882a593Smuzhiyun }
2867*4882a593Smuzhiyun
first_net_device(struct net * net)2868*4882a593Smuzhiyun static inline struct net_device *first_net_device(struct net *net)
2869*4882a593Smuzhiyun {
2870*4882a593Smuzhiyun return list_empty(&net->dev_base_head) ? NULL :
2871*4882a593Smuzhiyun net_device_entry(net->dev_base_head.next);
2872*4882a593Smuzhiyun }
2873*4882a593Smuzhiyun
first_net_device_rcu(struct net * net)2874*4882a593Smuzhiyun static inline struct net_device *first_net_device_rcu(struct net *net)
2875*4882a593Smuzhiyun {
2876*4882a593Smuzhiyun struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
2877*4882a593Smuzhiyun
2878*4882a593Smuzhiyun return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
2879*4882a593Smuzhiyun }
2880*4882a593Smuzhiyun
2881*4882a593Smuzhiyun int netdev_boot_setup_check(struct net_device *dev);
2882*4882a593Smuzhiyun unsigned long netdev_boot_base(const char *prefix, int unit);
2883*4882a593Smuzhiyun struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
2884*4882a593Smuzhiyun const char *hwaddr);
2885*4882a593Smuzhiyun struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
2886*4882a593Smuzhiyun struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
2887*4882a593Smuzhiyun void dev_add_pack(struct packet_type *pt);
2888*4882a593Smuzhiyun void dev_remove_pack(struct packet_type *pt);
2889*4882a593Smuzhiyun void __dev_remove_pack(struct packet_type *pt);
2890*4882a593Smuzhiyun void dev_add_offload(struct packet_offload *po);
2891*4882a593Smuzhiyun void dev_remove_offload(struct packet_offload *po);
2892*4882a593Smuzhiyun
2893*4882a593Smuzhiyun int dev_get_iflink(const struct net_device *dev);
2894*4882a593Smuzhiyun int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2895*4882a593Smuzhiyun struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2896*4882a593Smuzhiyun unsigned short mask);
2897*4882a593Smuzhiyun struct net_device *dev_get_by_name(struct net *net, const char *name);
2898*4882a593Smuzhiyun struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2899*4882a593Smuzhiyun struct net_device *__dev_get_by_name(struct net *net, const char *name);
2900*4882a593Smuzhiyun int dev_alloc_name(struct net_device *dev, const char *name);
2901*4882a593Smuzhiyun int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
2902*4882a593Smuzhiyun void dev_close(struct net_device *dev);
2903*4882a593Smuzhiyun void dev_close_many(struct list_head *head, bool unlink);
2904*4882a593Smuzhiyun void dev_disable_lro(struct net_device *dev);
2905*4882a593Smuzhiyun int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
2906*4882a593Smuzhiyun u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
2907*4882a593Smuzhiyun struct net_device *sb_dev);
2908*4882a593Smuzhiyun u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
2909*4882a593Smuzhiyun struct net_device *sb_dev);
2910*4882a593Smuzhiyun
2911*4882a593Smuzhiyun int dev_queue_xmit(struct sk_buff *skb);
2912*4882a593Smuzhiyun int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
2913*4882a593Smuzhiyun int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
2914*4882a593Smuzhiyun
dev_direct_xmit(struct sk_buff * skb,u16 queue_id)2915*4882a593Smuzhiyun static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
2916*4882a593Smuzhiyun {
2917*4882a593Smuzhiyun int ret;
2918*4882a593Smuzhiyun
2919*4882a593Smuzhiyun ret = __dev_direct_xmit(skb, queue_id);
2920*4882a593Smuzhiyun if (!dev_xmit_complete(ret))
2921*4882a593Smuzhiyun kfree_skb(skb);
2922*4882a593Smuzhiyun return ret;
2923*4882a593Smuzhiyun }
2924*4882a593Smuzhiyun
2925*4882a593Smuzhiyun int register_netdevice(struct net_device *dev);
2926*4882a593Smuzhiyun void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
2927*4882a593Smuzhiyun void unregister_netdevice_many(struct list_head *head);
unregister_netdevice(struct net_device * dev)2928*4882a593Smuzhiyun static inline void unregister_netdevice(struct net_device *dev)
2929*4882a593Smuzhiyun {
2930*4882a593Smuzhiyun unregister_netdevice_queue(dev, NULL);
2931*4882a593Smuzhiyun }
2932*4882a593Smuzhiyun
2933*4882a593Smuzhiyun int netdev_refcnt_read(const struct net_device *dev);
2934*4882a593Smuzhiyun void free_netdev(struct net_device *dev);
2935*4882a593Smuzhiyun void netdev_freemem(struct net_device *dev);
2936*4882a593Smuzhiyun int init_dummy_netdev(struct net_device *dev);
2937*4882a593Smuzhiyun
2938*4882a593Smuzhiyun struct net_device *netdev_get_xmit_slave(struct net_device *dev,
2939*4882a593Smuzhiyun struct sk_buff *skb,
2940*4882a593Smuzhiyun bool all_slaves);
2941*4882a593Smuzhiyun struct net_device *dev_get_by_index(struct net *net, int ifindex);
2942*4882a593Smuzhiyun struct net_device *__dev_get_by_index(struct net *net, int ifindex);
2943*4882a593Smuzhiyun struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2944*4882a593Smuzhiyun struct net_device *dev_get_by_napi_id(unsigned int napi_id);
2945*4882a593Smuzhiyun int netdev_get_name(struct net *net, char *name, int ifindex);
2946*4882a593Smuzhiyun int dev_restart(struct net_device *dev);
2947*4882a593Smuzhiyun int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2948*4882a593Smuzhiyun int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
2949*4882a593Smuzhiyun
skb_gro_offset(const struct sk_buff * skb)2950*4882a593Smuzhiyun static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2951*4882a593Smuzhiyun {
2952*4882a593Smuzhiyun return NAPI_GRO_CB(skb)->data_offset;
2953*4882a593Smuzhiyun }
2954*4882a593Smuzhiyun
skb_gro_len(const struct sk_buff * skb)2955*4882a593Smuzhiyun static inline unsigned int skb_gro_len(const struct sk_buff *skb)
2956*4882a593Smuzhiyun {
2957*4882a593Smuzhiyun return skb->len - NAPI_GRO_CB(skb)->data_offset;
2958*4882a593Smuzhiyun }
2959*4882a593Smuzhiyun
skb_gro_pull(struct sk_buff * skb,unsigned int len)2960*4882a593Smuzhiyun static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
2961*4882a593Smuzhiyun {
2962*4882a593Smuzhiyun NAPI_GRO_CB(skb)->data_offset += len;
2963*4882a593Smuzhiyun }
2964*4882a593Smuzhiyun
skb_gro_header_fast(struct sk_buff * skb,unsigned int offset)2965*4882a593Smuzhiyun static inline void *skb_gro_header_fast(struct sk_buff *skb,
2966*4882a593Smuzhiyun unsigned int offset)
2967*4882a593Smuzhiyun {
2968*4882a593Smuzhiyun return NAPI_GRO_CB(skb)->frag0 + offset;
2969*4882a593Smuzhiyun }
2970*4882a593Smuzhiyun
skb_gro_header_hard(struct sk_buff * skb,unsigned int hlen)2971*4882a593Smuzhiyun static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
2972*4882a593Smuzhiyun {
2973*4882a593Smuzhiyun return NAPI_GRO_CB(skb)->frag0_len < hlen;
2974*4882a593Smuzhiyun }
2975*4882a593Smuzhiyun
skb_gro_frag0_invalidate(struct sk_buff * skb)2976*4882a593Smuzhiyun static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
2977*4882a593Smuzhiyun {
2978*4882a593Smuzhiyun NAPI_GRO_CB(skb)->frag0 = NULL;
2979*4882a593Smuzhiyun NAPI_GRO_CB(skb)->frag0_len = 0;
2980*4882a593Smuzhiyun }
2981*4882a593Smuzhiyun
skb_gro_header_slow(struct sk_buff * skb,unsigned int hlen,unsigned int offset)2982*4882a593Smuzhiyun static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
2983*4882a593Smuzhiyun unsigned int offset)
2984*4882a593Smuzhiyun {
2985*4882a593Smuzhiyun if (!pskb_may_pull(skb, hlen))
2986*4882a593Smuzhiyun return NULL;
2987*4882a593Smuzhiyun
2988*4882a593Smuzhiyun skb_gro_frag0_invalidate(skb);
2989*4882a593Smuzhiyun return skb->data + offset;
2990*4882a593Smuzhiyun }
2991*4882a593Smuzhiyun
skb_gro_network_header(struct sk_buff * skb)2992*4882a593Smuzhiyun static inline void *skb_gro_network_header(struct sk_buff *skb)
2993*4882a593Smuzhiyun {
2994*4882a593Smuzhiyun return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
2995*4882a593Smuzhiyun skb_network_offset(skb);
2996*4882a593Smuzhiyun }
2997*4882a593Smuzhiyun
skb_gro_postpull_rcsum(struct sk_buff * skb,const void * start,unsigned int len)2998*4882a593Smuzhiyun static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2999*4882a593Smuzhiyun const void *start, unsigned int len)
3000*4882a593Smuzhiyun {
3001*4882a593Smuzhiyun if (NAPI_GRO_CB(skb)->csum_valid)
3002*4882a593Smuzhiyun NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
3003*4882a593Smuzhiyun csum_partial(start, len, 0));
3004*4882a593Smuzhiyun }
3005*4882a593Smuzhiyun
3006*4882a593Smuzhiyun /* GRO checksum functions. These are logical equivalents of the normal
3007*4882a593Smuzhiyun * checksum functions (in skbuff.h) except that they operate on the GRO
3008*4882a593Smuzhiyun * offsets and fields in sk_buff.
3009*4882a593Smuzhiyun */
3010*4882a593Smuzhiyun
3011*4882a593Smuzhiyun __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
3012*4882a593Smuzhiyun
skb_at_gro_remcsum_start(struct sk_buff * skb)3013*4882a593Smuzhiyun static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
3014*4882a593Smuzhiyun {
3015*4882a593Smuzhiyun return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
3016*4882a593Smuzhiyun }
3017*4882a593Smuzhiyun
__skb_gro_checksum_validate_needed(struct sk_buff * skb,bool zero_okay,__sum16 check)3018*4882a593Smuzhiyun static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
3019*4882a593Smuzhiyun bool zero_okay,
3020*4882a593Smuzhiyun __sum16 check)
3021*4882a593Smuzhiyun {
3022*4882a593Smuzhiyun return ((skb->ip_summed != CHECKSUM_PARTIAL ||
3023*4882a593Smuzhiyun skb_checksum_start_offset(skb) <
3024*4882a593Smuzhiyun skb_gro_offset(skb)) &&
3025*4882a593Smuzhiyun !skb_at_gro_remcsum_start(skb) &&
3026*4882a593Smuzhiyun NAPI_GRO_CB(skb)->csum_cnt == 0 &&
3027*4882a593Smuzhiyun (!zero_okay || check));
3028*4882a593Smuzhiyun }
3029*4882a593Smuzhiyun
__skb_gro_checksum_validate_complete(struct sk_buff * skb,__wsum psum)3030*4882a593Smuzhiyun static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
3031*4882a593Smuzhiyun __wsum psum)
3032*4882a593Smuzhiyun {
3033*4882a593Smuzhiyun if (NAPI_GRO_CB(skb)->csum_valid &&
3034*4882a593Smuzhiyun !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
3035*4882a593Smuzhiyun return 0;
3036*4882a593Smuzhiyun
3037*4882a593Smuzhiyun NAPI_GRO_CB(skb)->csum = psum;
3038*4882a593Smuzhiyun
3039*4882a593Smuzhiyun return __skb_gro_checksum_complete(skb);
3040*4882a593Smuzhiyun }
3041*4882a593Smuzhiyun
skb_gro_incr_csum_unnecessary(struct sk_buff * skb)3042*4882a593Smuzhiyun static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
3043*4882a593Smuzhiyun {
3044*4882a593Smuzhiyun if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
3045*4882a593Smuzhiyun /* Consume a checksum from CHECKSUM_UNNECESSARY */
3046*4882a593Smuzhiyun NAPI_GRO_CB(skb)->csum_cnt--;
3047*4882a593Smuzhiyun } else {
3048*4882a593Smuzhiyun /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
3049*4882a593Smuzhiyun * verified a new top level checksum or an encapsulated one
3050*4882a593Smuzhiyun * during GRO. This saves work if we fallback to normal path.
3051*4882a593Smuzhiyun */
3052*4882a593Smuzhiyun __skb_incr_checksum_unnecessary(skb);
3053*4882a593Smuzhiyun }
3054*4882a593Smuzhiyun }
3055*4882a593Smuzhiyun
3056*4882a593Smuzhiyun #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
3057*4882a593Smuzhiyun compute_pseudo) \
3058*4882a593Smuzhiyun ({ \
3059*4882a593Smuzhiyun __sum16 __ret = 0; \
3060*4882a593Smuzhiyun if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
3061*4882a593Smuzhiyun __ret = __skb_gro_checksum_validate_complete(skb, \
3062*4882a593Smuzhiyun compute_pseudo(skb, proto)); \
3063*4882a593Smuzhiyun if (!__ret) \
3064*4882a593Smuzhiyun skb_gro_incr_csum_unnecessary(skb); \
3065*4882a593Smuzhiyun __ret; \
3066*4882a593Smuzhiyun })
3067*4882a593Smuzhiyun
3068*4882a593Smuzhiyun #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
3069*4882a593Smuzhiyun __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
3070*4882a593Smuzhiyun
3071*4882a593Smuzhiyun #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
3072*4882a593Smuzhiyun compute_pseudo) \
3073*4882a593Smuzhiyun __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
3074*4882a593Smuzhiyun
3075*4882a593Smuzhiyun #define skb_gro_checksum_simple_validate(skb) \
3076*4882a593Smuzhiyun __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
3077*4882a593Smuzhiyun
__skb_gro_checksum_convert_check(struct sk_buff * skb)3078*4882a593Smuzhiyun static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
3079*4882a593Smuzhiyun {
3080*4882a593Smuzhiyun return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
3081*4882a593Smuzhiyun !NAPI_GRO_CB(skb)->csum_valid);
3082*4882a593Smuzhiyun }
3083*4882a593Smuzhiyun
__skb_gro_checksum_convert(struct sk_buff * skb,__wsum pseudo)3084*4882a593Smuzhiyun static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
3085*4882a593Smuzhiyun __wsum pseudo)
3086*4882a593Smuzhiyun {
3087*4882a593Smuzhiyun NAPI_GRO_CB(skb)->csum = ~pseudo;
3088*4882a593Smuzhiyun NAPI_GRO_CB(skb)->csum_valid = 1;
3089*4882a593Smuzhiyun }
3090*4882a593Smuzhiyun
3091*4882a593Smuzhiyun #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
3092*4882a593Smuzhiyun do { \
3093*4882a593Smuzhiyun if (__skb_gro_checksum_convert_check(skb)) \
3094*4882a593Smuzhiyun __skb_gro_checksum_convert(skb, \
3095*4882a593Smuzhiyun compute_pseudo(skb, proto)); \
3096*4882a593Smuzhiyun } while (0)
3097*4882a593Smuzhiyun
3098*4882a593Smuzhiyun struct gro_remcsum {
3099*4882a593Smuzhiyun int offset;
3100*4882a593Smuzhiyun __wsum delta;
3101*4882a593Smuzhiyun };
3102*4882a593Smuzhiyun
skb_gro_remcsum_init(struct gro_remcsum * grc)3103*4882a593Smuzhiyun static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
3104*4882a593Smuzhiyun {
3105*4882a593Smuzhiyun grc->offset = 0;
3106*4882a593Smuzhiyun grc->delta = 0;
3107*4882a593Smuzhiyun }
3108*4882a593Smuzhiyun
skb_gro_remcsum_process(struct sk_buff * skb,void * ptr,unsigned int off,size_t hdrlen,int start,int offset,struct gro_remcsum * grc,bool nopartial)3109*4882a593Smuzhiyun static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
3110*4882a593Smuzhiyun unsigned int off, size_t hdrlen,
3111*4882a593Smuzhiyun int start, int offset,
3112*4882a593Smuzhiyun struct gro_remcsum *grc,
3113*4882a593Smuzhiyun bool nopartial)
3114*4882a593Smuzhiyun {
3115*4882a593Smuzhiyun __wsum delta;
3116*4882a593Smuzhiyun size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
3117*4882a593Smuzhiyun
3118*4882a593Smuzhiyun BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
3119*4882a593Smuzhiyun
3120*4882a593Smuzhiyun if (!nopartial) {
3121*4882a593Smuzhiyun NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
3122*4882a593Smuzhiyun return ptr;
3123*4882a593Smuzhiyun }
3124*4882a593Smuzhiyun
3125*4882a593Smuzhiyun ptr = skb_gro_header_fast(skb, off);
3126*4882a593Smuzhiyun if (skb_gro_header_hard(skb, off + plen)) {
3127*4882a593Smuzhiyun ptr = skb_gro_header_slow(skb, off + plen, off);
3128*4882a593Smuzhiyun if (!ptr)
3129*4882a593Smuzhiyun return NULL;
3130*4882a593Smuzhiyun }
3131*4882a593Smuzhiyun
3132*4882a593Smuzhiyun delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
3133*4882a593Smuzhiyun start, offset);
3134*4882a593Smuzhiyun
3135*4882a593Smuzhiyun /* Adjust skb->csum since we changed the packet */
3136*4882a593Smuzhiyun NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
3137*4882a593Smuzhiyun
3138*4882a593Smuzhiyun grc->offset = off + hdrlen + offset;
3139*4882a593Smuzhiyun grc->delta = delta;
3140*4882a593Smuzhiyun
3141*4882a593Smuzhiyun return ptr;
3142*4882a593Smuzhiyun }
3143*4882a593Smuzhiyun
skb_gro_remcsum_cleanup(struct sk_buff * skb,struct gro_remcsum * grc)3144*4882a593Smuzhiyun static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
3145*4882a593Smuzhiyun struct gro_remcsum *grc)
3146*4882a593Smuzhiyun {
3147*4882a593Smuzhiyun void *ptr;
3148*4882a593Smuzhiyun size_t plen = grc->offset + sizeof(u16);
3149*4882a593Smuzhiyun
3150*4882a593Smuzhiyun if (!grc->delta)
3151*4882a593Smuzhiyun return;
3152*4882a593Smuzhiyun
3153*4882a593Smuzhiyun ptr = skb_gro_header_fast(skb, grc->offset);
3154*4882a593Smuzhiyun if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
3155*4882a593Smuzhiyun ptr = skb_gro_header_slow(skb, plen, grc->offset);
3156*4882a593Smuzhiyun if (!ptr)
3157*4882a593Smuzhiyun return;
3158*4882a593Smuzhiyun }
3159*4882a593Smuzhiyun
3160*4882a593Smuzhiyun remcsum_unadjust((__sum16 *)ptr, grc->delta);
3161*4882a593Smuzhiyun }
3162*4882a593Smuzhiyun
3163*4882a593Smuzhiyun #ifdef CONFIG_XFRM_OFFLOAD
skb_gro_flush_final(struct sk_buff * skb,struct sk_buff * pp,int flush)3164*4882a593Smuzhiyun static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
3165*4882a593Smuzhiyun {
3166*4882a593Smuzhiyun if (PTR_ERR(pp) != -EINPROGRESS)
3167*4882a593Smuzhiyun NAPI_GRO_CB(skb)->flush |= flush;
3168*4882a593Smuzhiyun }
skb_gro_flush_final_remcsum(struct sk_buff * skb,struct sk_buff * pp,int flush,struct gro_remcsum * grc)3169*4882a593Smuzhiyun static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3170*4882a593Smuzhiyun struct sk_buff *pp,
3171*4882a593Smuzhiyun int flush,
3172*4882a593Smuzhiyun struct gro_remcsum *grc)
3173*4882a593Smuzhiyun {
3174*4882a593Smuzhiyun if (PTR_ERR(pp) != -EINPROGRESS) {
3175*4882a593Smuzhiyun NAPI_GRO_CB(skb)->flush |= flush;
3176*4882a593Smuzhiyun skb_gro_remcsum_cleanup(skb, grc);
3177*4882a593Smuzhiyun skb->remcsum_offload = 0;
3178*4882a593Smuzhiyun }
3179*4882a593Smuzhiyun }
3180*4882a593Smuzhiyun #else
skb_gro_flush_final(struct sk_buff * skb,struct sk_buff * pp,int flush)3181*4882a593Smuzhiyun static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
3182*4882a593Smuzhiyun {
3183*4882a593Smuzhiyun NAPI_GRO_CB(skb)->flush |= flush;
3184*4882a593Smuzhiyun }
skb_gro_flush_final_remcsum(struct sk_buff * skb,struct sk_buff * pp,int flush,struct gro_remcsum * grc)3185*4882a593Smuzhiyun static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
3186*4882a593Smuzhiyun struct sk_buff *pp,
3187*4882a593Smuzhiyun int flush,
3188*4882a593Smuzhiyun struct gro_remcsum *grc)
3189*4882a593Smuzhiyun {
3190*4882a593Smuzhiyun NAPI_GRO_CB(skb)->flush |= flush;
3191*4882a593Smuzhiyun skb_gro_remcsum_cleanup(skb, grc);
3192*4882a593Smuzhiyun skb->remcsum_offload = 0;
3193*4882a593Smuzhiyun }
3194*4882a593Smuzhiyun #endif
3195*4882a593Smuzhiyun
dev_hard_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned int len)3196*4882a593Smuzhiyun static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
3197*4882a593Smuzhiyun unsigned short type,
3198*4882a593Smuzhiyun const void *daddr, const void *saddr,
3199*4882a593Smuzhiyun unsigned int len)
3200*4882a593Smuzhiyun {
3201*4882a593Smuzhiyun if (!dev->header_ops || !dev->header_ops->create)
3202*4882a593Smuzhiyun return 0;
3203*4882a593Smuzhiyun
3204*4882a593Smuzhiyun return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
3205*4882a593Smuzhiyun }
3206*4882a593Smuzhiyun
dev_parse_header(const struct sk_buff * skb,unsigned char * haddr)3207*4882a593Smuzhiyun static inline int dev_parse_header(const struct sk_buff *skb,
3208*4882a593Smuzhiyun unsigned char *haddr)
3209*4882a593Smuzhiyun {
3210*4882a593Smuzhiyun const struct net_device *dev = skb->dev;
3211*4882a593Smuzhiyun
3212*4882a593Smuzhiyun if (!dev->header_ops || !dev->header_ops->parse)
3213*4882a593Smuzhiyun return 0;
3214*4882a593Smuzhiyun return dev->header_ops->parse(skb, haddr);
3215*4882a593Smuzhiyun }
3216*4882a593Smuzhiyun
dev_parse_header_protocol(const struct sk_buff * skb)3217*4882a593Smuzhiyun static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
3218*4882a593Smuzhiyun {
3219*4882a593Smuzhiyun const struct net_device *dev = skb->dev;
3220*4882a593Smuzhiyun
3221*4882a593Smuzhiyun if (!dev->header_ops || !dev->header_ops->parse_protocol)
3222*4882a593Smuzhiyun return 0;
3223*4882a593Smuzhiyun return dev->header_ops->parse_protocol(skb);
3224*4882a593Smuzhiyun }
3225*4882a593Smuzhiyun
3226*4882a593Smuzhiyun /* ll_header must have at least hard_header_len allocated */
dev_validate_header(const struct net_device * dev,char * ll_header,int len)3227*4882a593Smuzhiyun static inline bool dev_validate_header(const struct net_device *dev,
3228*4882a593Smuzhiyun char *ll_header, int len)
3229*4882a593Smuzhiyun {
3230*4882a593Smuzhiyun if (likely(len >= dev->hard_header_len))
3231*4882a593Smuzhiyun return true;
3232*4882a593Smuzhiyun if (len < dev->min_header_len)
3233*4882a593Smuzhiyun return false;
3234*4882a593Smuzhiyun
3235*4882a593Smuzhiyun if (capable(CAP_SYS_RAWIO)) {
3236*4882a593Smuzhiyun memset(ll_header + len, 0, dev->hard_header_len - len);
3237*4882a593Smuzhiyun return true;
3238*4882a593Smuzhiyun }
3239*4882a593Smuzhiyun
3240*4882a593Smuzhiyun if (dev->header_ops && dev->header_ops->validate)
3241*4882a593Smuzhiyun return dev->header_ops->validate(ll_header, len);
3242*4882a593Smuzhiyun
3243*4882a593Smuzhiyun return false;
3244*4882a593Smuzhiyun }
3245*4882a593Smuzhiyun
dev_has_header(const struct net_device * dev)3246*4882a593Smuzhiyun static inline bool dev_has_header(const struct net_device *dev)
3247*4882a593Smuzhiyun {
3248*4882a593Smuzhiyun return dev->header_ops && dev->header_ops->create;
3249*4882a593Smuzhiyun }
3250*4882a593Smuzhiyun
3251*4882a593Smuzhiyun #ifdef CONFIG_NET_FLOW_LIMIT
3252*4882a593Smuzhiyun #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
3253*4882a593Smuzhiyun struct sd_flow_limit {
3254*4882a593Smuzhiyun u64 count;
3255*4882a593Smuzhiyun unsigned int num_buckets;
3256*4882a593Smuzhiyun unsigned int history_head;
3257*4882a593Smuzhiyun u16 history[FLOW_LIMIT_HISTORY];
3258*4882a593Smuzhiyun u8 buckets[];
3259*4882a593Smuzhiyun };
3260*4882a593Smuzhiyun
3261*4882a593Smuzhiyun extern int netdev_flow_limit_table_len;
3262*4882a593Smuzhiyun #endif /* CONFIG_NET_FLOW_LIMIT */
3263*4882a593Smuzhiyun
3264*4882a593Smuzhiyun /*
3265*4882a593Smuzhiyun * Incoming packets are placed on per-CPU queues
3266*4882a593Smuzhiyun */
3267*4882a593Smuzhiyun struct softnet_data {
3268*4882a593Smuzhiyun struct list_head poll_list;
3269*4882a593Smuzhiyun struct sk_buff_head process_queue;
3270*4882a593Smuzhiyun
3271*4882a593Smuzhiyun /* stats */
3272*4882a593Smuzhiyun unsigned int processed;
3273*4882a593Smuzhiyun unsigned int time_squeeze;
3274*4882a593Smuzhiyun unsigned int received_rps;
3275*4882a593Smuzhiyun #ifdef CONFIG_RPS
3276*4882a593Smuzhiyun struct softnet_data *rps_ipi_list;
3277*4882a593Smuzhiyun #endif
3278*4882a593Smuzhiyun #ifdef CONFIG_NET_FLOW_LIMIT
3279*4882a593Smuzhiyun struct sd_flow_limit __rcu *flow_limit;
3280*4882a593Smuzhiyun #endif
3281*4882a593Smuzhiyun struct Qdisc *output_queue;
3282*4882a593Smuzhiyun struct Qdisc **output_queue_tailp;
3283*4882a593Smuzhiyun struct sk_buff *completion_queue;
3284*4882a593Smuzhiyun #ifdef CONFIG_XFRM_OFFLOAD
3285*4882a593Smuzhiyun struct sk_buff_head xfrm_backlog;
3286*4882a593Smuzhiyun #endif
3287*4882a593Smuzhiyun /* written and read only by owning cpu: */
3288*4882a593Smuzhiyun struct {
3289*4882a593Smuzhiyun u16 recursion;
3290*4882a593Smuzhiyun u8 more;
3291*4882a593Smuzhiyun } xmit;
3292*4882a593Smuzhiyun #ifdef CONFIG_RPS
3293*4882a593Smuzhiyun /* input_queue_head should be written by cpu owning this struct,
3294*4882a593Smuzhiyun * and only read by other cpus. Worth using a cache line.
3295*4882a593Smuzhiyun */
3296*4882a593Smuzhiyun unsigned int input_queue_head ____cacheline_aligned_in_smp;
3297*4882a593Smuzhiyun
3298*4882a593Smuzhiyun /* Elements below can be accessed between CPUs for RPS/RFS */
3299*4882a593Smuzhiyun call_single_data_t csd ____cacheline_aligned_in_smp;
3300*4882a593Smuzhiyun struct softnet_data *rps_ipi_next;
3301*4882a593Smuzhiyun unsigned int cpu;
3302*4882a593Smuzhiyun unsigned int input_queue_tail;
3303*4882a593Smuzhiyun #endif
3304*4882a593Smuzhiyun unsigned int dropped;
3305*4882a593Smuzhiyun struct sk_buff_head input_pkt_queue;
3306*4882a593Smuzhiyun struct napi_struct backlog;
3307*4882a593Smuzhiyun
3308*4882a593Smuzhiyun };
3309*4882a593Smuzhiyun
input_queue_head_incr(struct softnet_data * sd)3310*4882a593Smuzhiyun static inline void input_queue_head_incr(struct softnet_data *sd)
3311*4882a593Smuzhiyun {
3312*4882a593Smuzhiyun #ifdef CONFIG_RPS
3313*4882a593Smuzhiyun sd->input_queue_head++;
3314*4882a593Smuzhiyun #endif
3315*4882a593Smuzhiyun }
3316*4882a593Smuzhiyun
input_queue_tail_incr_save(struct softnet_data * sd,unsigned int * qtail)3317*4882a593Smuzhiyun static inline void input_queue_tail_incr_save(struct softnet_data *sd,
3318*4882a593Smuzhiyun unsigned int *qtail)
3319*4882a593Smuzhiyun {
3320*4882a593Smuzhiyun #ifdef CONFIG_RPS
3321*4882a593Smuzhiyun *qtail = ++sd->input_queue_tail;
3322*4882a593Smuzhiyun #endif
3323*4882a593Smuzhiyun }
3324*4882a593Smuzhiyun
3325*4882a593Smuzhiyun DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3326*4882a593Smuzhiyun
dev_recursion_level(void)3327*4882a593Smuzhiyun static inline int dev_recursion_level(void)
3328*4882a593Smuzhiyun {
3329*4882a593Smuzhiyun return this_cpu_read(softnet_data.xmit.recursion);
3330*4882a593Smuzhiyun }
3331*4882a593Smuzhiyun
3332*4882a593Smuzhiyun #define XMIT_RECURSION_LIMIT 8
dev_xmit_recursion(void)3333*4882a593Smuzhiyun static inline bool dev_xmit_recursion(void)
3334*4882a593Smuzhiyun {
3335*4882a593Smuzhiyun return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
3336*4882a593Smuzhiyun XMIT_RECURSION_LIMIT);
3337*4882a593Smuzhiyun }
3338*4882a593Smuzhiyun
dev_xmit_recursion_inc(void)3339*4882a593Smuzhiyun static inline void dev_xmit_recursion_inc(void)
3340*4882a593Smuzhiyun {
3341*4882a593Smuzhiyun __this_cpu_inc(softnet_data.xmit.recursion);
3342*4882a593Smuzhiyun }
3343*4882a593Smuzhiyun
dev_xmit_recursion_dec(void)3344*4882a593Smuzhiyun static inline void dev_xmit_recursion_dec(void)
3345*4882a593Smuzhiyun {
3346*4882a593Smuzhiyun __this_cpu_dec(softnet_data.xmit.recursion);
3347*4882a593Smuzhiyun }
3348*4882a593Smuzhiyun
3349*4882a593Smuzhiyun void __netif_schedule(struct Qdisc *q);
3350*4882a593Smuzhiyun void netif_schedule_queue(struct netdev_queue *txq);
3351*4882a593Smuzhiyun
netif_tx_schedule_all(struct net_device * dev)3352*4882a593Smuzhiyun static inline void netif_tx_schedule_all(struct net_device *dev)
3353*4882a593Smuzhiyun {
3354*4882a593Smuzhiyun unsigned int i;
3355*4882a593Smuzhiyun
3356*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++)
3357*4882a593Smuzhiyun netif_schedule_queue(netdev_get_tx_queue(dev, i));
3358*4882a593Smuzhiyun }
3359*4882a593Smuzhiyun
netif_tx_start_queue(struct netdev_queue * dev_queue)3360*4882a593Smuzhiyun static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
3361*4882a593Smuzhiyun {
3362*4882a593Smuzhiyun clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3363*4882a593Smuzhiyun }
3364*4882a593Smuzhiyun
3365*4882a593Smuzhiyun /**
3366*4882a593Smuzhiyun * netif_start_queue - allow transmit
3367*4882a593Smuzhiyun * @dev: network device
3368*4882a593Smuzhiyun *
3369*4882a593Smuzhiyun * Allow upper layers to call the device hard_start_xmit routine.
3370*4882a593Smuzhiyun */
netif_start_queue(struct net_device * dev)3371*4882a593Smuzhiyun static inline void netif_start_queue(struct net_device *dev)
3372*4882a593Smuzhiyun {
3373*4882a593Smuzhiyun netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
3374*4882a593Smuzhiyun }
3375*4882a593Smuzhiyun
netif_tx_start_all_queues(struct net_device * dev)3376*4882a593Smuzhiyun static inline void netif_tx_start_all_queues(struct net_device *dev)
3377*4882a593Smuzhiyun {
3378*4882a593Smuzhiyun unsigned int i;
3379*4882a593Smuzhiyun
3380*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
3381*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3382*4882a593Smuzhiyun netif_tx_start_queue(txq);
3383*4882a593Smuzhiyun }
3384*4882a593Smuzhiyun }
3385*4882a593Smuzhiyun
3386*4882a593Smuzhiyun void netif_tx_wake_queue(struct netdev_queue *dev_queue);
3387*4882a593Smuzhiyun
3388*4882a593Smuzhiyun /**
3389*4882a593Smuzhiyun * netif_wake_queue - restart transmit
3390*4882a593Smuzhiyun * @dev: network device
3391*4882a593Smuzhiyun *
3392*4882a593Smuzhiyun * Allow upper layers to call the device hard_start_xmit routine.
3393*4882a593Smuzhiyun * Used for flow control when transmit resources are available.
3394*4882a593Smuzhiyun */
netif_wake_queue(struct net_device * dev)3395*4882a593Smuzhiyun static inline void netif_wake_queue(struct net_device *dev)
3396*4882a593Smuzhiyun {
3397*4882a593Smuzhiyun netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
3398*4882a593Smuzhiyun }
3399*4882a593Smuzhiyun
netif_tx_wake_all_queues(struct net_device * dev)3400*4882a593Smuzhiyun static inline void netif_tx_wake_all_queues(struct net_device *dev)
3401*4882a593Smuzhiyun {
3402*4882a593Smuzhiyun unsigned int i;
3403*4882a593Smuzhiyun
3404*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
3405*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
3406*4882a593Smuzhiyun netif_tx_wake_queue(txq);
3407*4882a593Smuzhiyun }
3408*4882a593Smuzhiyun }
3409*4882a593Smuzhiyun
netif_tx_stop_queue(struct netdev_queue * dev_queue)3410*4882a593Smuzhiyun static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
3411*4882a593Smuzhiyun {
3412*4882a593Smuzhiyun set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3413*4882a593Smuzhiyun }
3414*4882a593Smuzhiyun
3415*4882a593Smuzhiyun /**
3416*4882a593Smuzhiyun * netif_stop_queue - stop transmitted packets
3417*4882a593Smuzhiyun * @dev: network device
3418*4882a593Smuzhiyun *
3419*4882a593Smuzhiyun * Stop upper layers calling the device hard_start_xmit routine.
3420*4882a593Smuzhiyun * Used for flow control when transmit resources are unavailable.
3421*4882a593Smuzhiyun */
netif_stop_queue(struct net_device * dev)3422*4882a593Smuzhiyun static inline void netif_stop_queue(struct net_device *dev)
3423*4882a593Smuzhiyun {
3424*4882a593Smuzhiyun netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
3425*4882a593Smuzhiyun }
3426*4882a593Smuzhiyun
3427*4882a593Smuzhiyun void netif_tx_stop_all_queues(struct net_device *dev);
3428*4882a593Smuzhiyun
netif_tx_queue_stopped(const struct netdev_queue * dev_queue)3429*4882a593Smuzhiyun static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
3430*4882a593Smuzhiyun {
3431*4882a593Smuzhiyun return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
3432*4882a593Smuzhiyun }
3433*4882a593Smuzhiyun
3434*4882a593Smuzhiyun /**
3435*4882a593Smuzhiyun * netif_queue_stopped - test if transmit queue is flowblocked
3436*4882a593Smuzhiyun * @dev: network device
3437*4882a593Smuzhiyun *
3438*4882a593Smuzhiyun * Test if transmit queue on device is currently unable to send.
3439*4882a593Smuzhiyun */
netif_queue_stopped(const struct net_device * dev)3440*4882a593Smuzhiyun static inline bool netif_queue_stopped(const struct net_device *dev)
3441*4882a593Smuzhiyun {
3442*4882a593Smuzhiyun return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
3443*4882a593Smuzhiyun }
3444*4882a593Smuzhiyun
netif_xmit_stopped(const struct netdev_queue * dev_queue)3445*4882a593Smuzhiyun static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
3446*4882a593Smuzhiyun {
3447*4882a593Smuzhiyun return dev_queue->state & QUEUE_STATE_ANY_XOFF;
3448*4882a593Smuzhiyun }
3449*4882a593Smuzhiyun
3450*4882a593Smuzhiyun static inline bool
netif_xmit_frozen_or_stopped(const struct netdev_queue * dev_queue)3451*4882a593Smuzhiyun netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
3452*4882a593Smuzhiyun {
3453*4882a593Smuzhiyun return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
3454*4882a593Smuzhiyun }
3455*4882a593Smuzhiyun
3456*4882a593Smuzhiyun static inline bool
netif_xmit_frozen_or_drv_stopped(const struct netdev_queue * dev_queue)3457*4882a593Smuzhiyun netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
3458*4882a593Smuzhiyun {
3459*4882a593Smuzhiyun return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
3460*4882a593Smuzhiyun }
3461*4882a593Smuzhiyun
3462*4882a593Smuzhiyun /**
3463*4882a593Smuzhiyun * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
3464*4882a593Smuzhiyun * @dev_queue: pointer to transmit queue
3465*4882a593Smuzhiyun *
3466*4882a593Smuzhiyun * BQL enabled drivers might use this helper in their ndo_start_xmit(),
3467*4882a593Smuzhiyun * to give appropriate hint to the CPU.
3468*4882a593Smuzhiyun */
netdev_txq_bql_enqueue_prefetchw(struct netdev_queue * dev_queue)3469*4882a593Smuzhiyun static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
3470*4882a593Smuzhiyun {
3471*4882a593Smuzhiyun #ifdef CONFIG_BQL
3472*4882a593Smuzhiyun prefetchw(&dev_queue->dql.num_queued);
3473*4882a593Smuzhiyun #endif
3474*4882a593Smuzhiyun }
3475*4882a593Smuzhiyun
3476*4882a593Smuzhiyun /**
3477*4882a593Smuzhiyun * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
3478*4882a593Smuzhiyun * @dev_queue: pointer to transmit queue
3479*4882a593Smuzhiyun *
3480*4882a593Smuzhiyun * BQL enabled drivers might use this helper in their TX completion path,
3481*4882a593Smuzhiyun * to give appropriate hint to the CPU.
3482*4882a593Smuzhiyun */
netdev_txq_bql_complete_prefetchw(struct netdev_queue * dev_queue)3483*4882a593Smuzhiyun static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
3484*4882a593Smuzhiyun {
3485*4882a593Smuzhiyun #ifdef CONFIG_BQL
3486*4882a593Smuzhiyun prefetchw(&dev_queue->dql.limit);
3487*4882a593Smuzhiyun #endif
3488*4882a593Smuzhiyun }
3489*4882a593Smuzhiyun
netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes)3490*4882a593Smuzhiyun static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3491*4882a593Smuzhiyun unsigned int bytes)
3492*4882a593Smuzhiyun {
3493*4882a593Smuzhiyun #ifdef CONFIG_BQL
3494*4882a593Smuzhiyun dql_queued(&dev_queue->dql, bytes);
3495*4882a593Smuzhiyun
3496*4882a593Smuzhiyun if (likely(dql_avail(&dev_queue->dql) >= 0))
3497*4882a593Smuzhiyun return;
3498*4882a593Smuzhiyun
3499*4882a593Smuzhiyun set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3500*4882a593Smuzhiyun
3501*4882a593Smuzhiyun /*
3502*4882a593Smuzhiyun * The XOFF flag must be set before checking the dql_avail below,
3503*4882a593Smuzhiyun * because in netdev_tx_completed_queue we update the dql_completed
3504*4882a593Smuzhiyun * before checking the XOFF flag.
3505*4882a593Smuzhiyun */
3506*4882a593Smuzhiyun smp_mb();
3507*4882a593Smuzhiyun
3508*4882a593Smuzhiyun /* check again in case another CPU has just made room avail */
3509*4882a593Smuzhiyun if (unlikely(dql_avail(&dev_queue->dql) >= 0))
3510*4882a593Smuzhiyun clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
3511*4882a593Smuzhiyun #endif
3512*4882a593Smuzhiyun }
3513*4882a593Smuzhiyun
3514*4882a593Smuzhiyun /* Variant of netdev_tx_sent_queue() for drivers that are aware
3515*4882a593Smuzhiyun * that they should not test BQL status themselves.
3516*4882a593Smuzhiyun * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
3517*4882a593Smuzhiyun * skb of a batch.
3518*4882a593Smuzhiyun * Returns true if the doorbell must be used to kick the NIC.
3519*4882a593Smuzhiyun */
__netdev_tx_sent_queue(struct netdev_queue * dev_queue,unsigned int bytes,bool xmit_more)3520*4882a593Smuzhiyun static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
3521*4882a593Smuzhiyun unsigned int bytes,
3522*4882a593Smuzhiyun bool xmit_more)
3523*4882a593Smuzhiyun {
3524*4882a593Smuzhiyun if (xmit_more) {
3525*4882a593Smuzhiyun #ifdef CONFIG_BQL
3526*4882a593Smuzhiyun dql_queued(&dev_queue->dql, bytes);
3527*4882a593Smuzhiyun #endif
3528*4882a593Smuzhiyun return netif_tx_queue_stopped(dev_queue);
3529*4882a593Smuzhiyun }
3530*4882a593Smuzhiyun netdev_tx_sent_queue(dev_queue, bytes);
3531*4882a593Smuzhiyun return true;
3532*4882a593Smuzhiyun }
3533*4882a593Smuzhiyun
3534*4882a593Smuzhiyun /**
3535*4882a593Smuzhiyun * netdev_sent_queue - report the number of bytes queued to hardware
3536*4882a593Smuzhiyun * @dev: network device
3537*4882a593Smuzhiyun * @bytes: number of bytes queued to the hardware device queue
3538*4882a593Smuzhiyun *
3539*4882a593Smuzhiyun * Report the number of bytes queued for sending/completion to the network
3540*4882a593Smuzhiyun * device hardware queue. @bytes should be a good approximation and should
3541*4882a593Smuzhiyun * exactly match netdev_completed_queue() @bytes
3542*4882a593Smuzhiyun */
netdev_sent_queue(struct net_device * dev,unsigned int bytes)3543*4882a593Smuzhiyun static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
3544*4882a593Smuzhiyun {
3545*4882a593Smuzhiyun netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
3546*4882a593Smuzhiyun }
3547*4882a593Smuzhiyun
__netdev_sent_queue(struct net_device * dev,unsigned int bytes,bool xmit_more)3548*4882a593Smuzhiyun static inline bool __netdev_sent_queue(struct net_device *dev,
3549*4882a593Smuzhiyun unsigned int bytes,
3550*4882a593Smuzhiyun bool xmit_more)
3551*4882a593Smuzhiyun {
3552*4882a593Smuzhiyun return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
3553*4882a593Smuzhiyun xmit_more);
3554*4882a593Smuzhiyun }
3555*4882a593Smuzhiyun
netdev_tx_completed_queue(struct netdev_queue * dev_queue,unsigned int pkts,unsigned int bytes)3556*4882a593Smuzhiyun static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
3557*4882a593Smuzhiyun unsigned int pkts, unsigned int bytes)
3558*4882a593Smuzhiyun {
3559*4882a593Smuzhiyun #ifdef CONFIG_BQL
3560*4882a593Smuzhiyun if (unlikely(!bytes))
3561*4882a593Smuzhiyun return;
3562*4882a593Smuzhiyun
3563*4882a593Smuzhiyun dql_completed(&dev_queue->dql, bytes);
3564*4882a593Smuzhiyun
3565*4882a593Smuzhiyun /*
3566*4882a593Smuzhiyun * Without the memory barrier there is a small possiblity that
3567*4882a593Smuzhiyun * netdev_tx_sent_queue will miss the update and cause the queue to
3568*4882a593Smuzhiyun * be stopped forever
3569*4882a593Smuzhiyun */
3570*4882a593Smuzhiyun smp_mb();
3571*4882a593Smuzhiyun
3572*4882a593Smuzhiyun if (unlikely(dql_avail(&dev_queue->dql) < 0))
3573*4882a593Smuzhiyun return;
3574*4882a593Smuzhiyun
3575*4882a593Smuzhiyun if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
3576*4882a593Smuzhiyun netif_schedule_queue(dev_queue);
3577*4882a593Smuzhiyun #endif
3578*4882a593Smuzhiyun }
3579*4882a593Smuzhiyun
3580*4882a593Smuzhiyun /**
3581*4882a593Smuzhiyun * netdev_completed_queue - report bytes and packets completed by device
3582*4882a593Smuzhiyun * @dev: network device
3583*4882a593Smuzhiyun * @pkts: actual number of packets sent over the medium
3584*4882a593Smuzhiyun * @bytes: actual number of bytes sent over the medium
3585*4882a593Smuzhiyun *
3586*4882a593Smuzhiyun * Report the number of bytes and packets transmitted by the network device
3587*4882a593Smuzhiyun * hardware queue over the physical medium, @bytes must exactly match the
3588*4882a593Smuzhiyun * @bytes amount passed to netdev_sent_queue()
3589*4882a593Smuzhiyun */
netdev_completed_queue(struct net_device * dev,unsigned int pkts,unsigned int bytes)3590*4882a593Smuzhiyun static inline void netdev_completed_queue(struct net_device *dev,
3591*4882a593Smuzhiyun unsigned int pkts, unsigned int bytes)
3592*4882a593Smuzhiyun {
3593*4882a593Smuzhiyun netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
3594*4882a593Smuzhiyun }
3595*4882a593Smuzhiyun
netdev_tx_reset_queue(struct netdev_queue * q)3596*4882a593Smuzhiyun static inline void netdev_tx_reset_queue(struct netdev_queue *q)
3597*4882a593Smuzhiyun {
3598*4882a593Smuzhiyun #ifdef CONFIG_BQL
3599*4882a593Smuzhiyun clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
3600*4882a593Smuzhiyun dql_reset(&q->dql);
3601*4882a593Smuzhiyun #endif
3602*4882a593Smuzhiyun }
3603*4882a593Smuzhiyun
3604*4882a593Smuzhiyun /**
3605*4882a593Smuzhiyun * netdev_reset_queue - reset the packets and bytes count of a network device
3606*4882a593Smuzhiyun * @dev_queue: network device
3607*4882a593Smuzhiyun *
3608*4882a593Smuzhiyun * Reset the bytes and packet count of a network device and clear the
3609*4882a593Smuzhiyun * software flow control OFF bit for this network device
3610*4882a593Smuzhiyun */
netdev_reset_queue(struct net_device * dev_queue)3611*4882a593Smuzhiyun static inline void netdev_reset_queue(struct net_device *dev_queue)
3612*4882a593Smuzhiyun {
3613*4882a593Smuzhiyun netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
3614*4882a593Smuzhiyun }
3615*4882a593Smuzhiyun
3616*4882a593Smuzhiyun /**
3617*4882a593Smuzhiyun * netdev_cap_txqueue - check if selected tx queue exceeds device queues
3618*4882a593Smuzhiyun * @dev: network device
3619*4882a593Smuzhiyun * @queue_index: given tx queue index
3620*4882a593Smuzhiyun *
3621*4882a593Smuzhiyun * Returns 0 if given tx queue index >= number of device tx queues,
3622*4882a593Smuzhiyun * otherwise returns the originally passed tx queue index.
3623*4882a593Smuzhiyun */
netdev_cap_txqueue(struct net_device * dev,u16 queue_index)3624*4882a593Smuzhiyun static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
3625*4882a593Smuzhiyun {
3626*4882a593Smuzhiyun if (unlikely(queue_index >= dev->real_num_tx_queues)) {
3627*4882a593Smuzhiyun net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
3628*4882a593Smuzhiyun dev->name, queue_index,
3629*4882a593Smuzhiyun dev->real_num_tx_queues);
3630*4882a593Smuzhiyun return 0;
3631*4882a593Smuzhiyun }
3632*4882a593Smuzhiyun
3633*4882a593Smuzhiyun return queue_index;
3634*4882a593Smuzhiyun }
3635*4882a593Smuzhiyun
3636*4882a593Smuzhiyun /**
3637*4882a593Smuzhiyun * netif_running - test if up
3638*4882a593Smuzhiyun * @dev: network device
3639*4882a593Smuzhiyun *
3640*4882a593Smuzhiyun * Test if the device has been brought up.
3641*4882a593Smuzhiyun */
netif_running(const struct net_device * dev)3642*4882a593Smuzhiyun static inline bool netif_running(const struct net_device *dev)
3643*4882a593Smuzhiyun {
3644*4882a593Smuzhiyun return test_bit(__LINK_STATE_START, &dev->state);
3645*4882a593Smuzhiyun }
3646*4882a593Smuzhiyun
3647*4882a593Smuzhiyun /*
3648*4882a593Smuzhiyun * Routines to manage the subqueues on a device. We only need start,
3649*4882a593Smuzhiyun * stop, and a check if it's stopped. All other device management is
3650*4882a593Smuzhiyun * done at the overall netdevice level.
3651*4882a593Smuzhiyun * Also test the device if we're multiqueue.
3652*4882a593Smuzhiyun */
3653*4882a593Smuzhiyun
3654*4882a593Smuzhiyun /**
3655*4882a593Smuzhiyun * netif_start_subqueue - allow sending packets on subqueue
3656*4882a593Smuzhiyun * @dev: network device
3657*4882a593Smuzhiyun * @queue_index: sub queue index
3658*4882a593Smuzhiyun *
3659*4882a593Smuzhiyun * Start individual transmit queue of a device with multiple transmit queues.
3660*4882a593Smuzhiyun */
netif_start_subqueue(struct net_device * dev,u16 queue_index)3661*4882a593Smuzhiyun static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
3662*4882a593Smuzhiyun {
3663*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3664*4882a593Smuzhiyun
3665*4882a593Smuzhiyun netif_tx_start_queue(txq);
3666*4882a593Smuzhiyun }
3667*4882a593Smuzhiyun
3668*4882a593Smuzhiyun /**
3669*4882a593Smuzhiyun * netif_stop_subqueue - stop sending packets on subqueue
3670*4882a593Smuzhiyun * @dev: network device
3671*4882a593Smuzhiyun * @queue_index: sub queue index
3672*4882a593Smuzhiyun *
3673*4882a593Smuzhiyun * Stop individual transmit queue of a device with multiple transmit queues.
3674*4882a593Smuzhiyun */
netif_stop_subqueue(struct net_device * dev,u16 queue_index)3675*4882a593Smuzhiyun static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
3676*4882a593Smuzhiyun {
3677*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3678*4882a593Smuzhiyun netif_tx_stop_queue(txq);
3679*4882a593Smuzhiyun }
3680*4882a593Smuzhiyun
3681*4882a593Smuzhiyun /**
3682*4882a593Smuzhiyun * netif_subqueue_stopped - test status of subqueue
3683*4882a593Smuzhiyun * @dev: network device
3684*4882a593Smuzhiyun * @queue_index: sub queue index
3685*4882a593Smuzhiyun *
3686*4882a593Smuzhiyun * Check individual transmit queue of a device with multiple transmit queues.
3687*4882a593Smuzhiyun */
__netif_subqueue_stopped(const struct net_device * dev,u16 queue_index)3688*4882a593Smuzhiyun static inline bool __netif_subqueue_stopped(const struct net_device *dev,
3689*4882a593Smuzhiyun u16 queue_index)
3690*4882a593Smuzhiyun {
3691*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3692*4882a593Smuzhiyun
3693*4882a593Smuzhiyun return netif_tx_queue_stopped(txq);
3694*4882a593Smuzhiyun }
3695*4882a593Smuzhiyun
netif_subqueue_stopped(const struct net_device * dev,struct sk_buff * skb)3696*4882a593Smuzhiyun static inline bool netif_subqueue_stopped(const struct net_device *dev,
3697*4882a593Smuzhiyun struct sk_buff *skb)
3698*4882a593Smuzhiyun {
3699*4882a593Smuzhiyun return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
3700*4882a593Smuzhiyun }
3701*4882a593Smuzhiyun
3702*4882a593Smuzhiyun /**
3703*4882a593Smuzhiyun * netif_wake_subqueue - allow sending packets on subqueue
3704*4882a593Smuzhiyun * @dev: network device
3705*4882a593Smuzhiyun * @queue_index: sub queue index
3706*4882a593Smuzhiyun *
3707*4882a593Smuzhiyun * Resume individual transmit queue of a device with multiple transmit queues.
3708*4882a593Smuzhiyun */
netif_wake_subqueue(struct net_device * dev,u16 queue_index)3709*4882a593Smuzhiyun static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
3710*4882a593Smuzhiyun {
3711*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
3712*4882a593Smuzhiyun
3713*4882a593Smuzhiyun netif_tx_wake_queue(txq);
3714*4882a593Smuzhiyun }
3715*4882a593Smuzhiyun
3716*4882a593Smuzhiyun #ifdef CONFIG_XPS
3717*4882a593Smuzhiyun int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
3718*4882a593Smuzhiyun u16 index);
3719*4882a593Smuzhiyun int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
3720*4882a593Smuzhiyun u16 index, bool is_rxqs_map);
3721*4882a593Smuzhiyun
3722*4882a593Smuzhiyun /**
3723*4882a593Smuzhiyun * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
3724*4882a593Smuzhiyun * @j: CPU/Rx queue index
3725*4882a593Smuzhiyun * @mask: bitmask of all cpus/rx queues
3726*4882a593Smuzhiyun * @nr_bits: number of bits in the bitmask
3727*4882a593Smuzhiyun *
3728*4882a593Smuzhiyun * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
3729*4882a593Smuzhiyun */
netif_attr_test_mask(unsigned long j,const unsigned long * mask,unsigned int nr_bits)3730*4882a593Smuzhiyun static inline bool netif_attr_test_mask(unsigned long j,
3731*4882a593Smuzhiyun const unsigned long *mask,
3732*4882a593Smuzhiyun unsigned int nr_bits)
3733*4882a593Smuzhiyun {
3734*4882a593Smuzhiyun cpu_max_bits_warn(j, nr_bits);
3735*4882a593Smuzhiyun return test_bit(j, mask);
3736*4882a593Smuzhiyun }
3737*4882a593Smuzhiyun
3738*4882a593Smuzhiyun /**
3739*4882a593Smuzhiyun * netif_attr_test_online - Test for online CPU/Rx queue
3740*4882a593Smuzhiyun * @j: CPU/Rx queue index
3741*4882a593Smuzhiyun * @online_mask: bitmask for CPUs/Rx queues that are online
3742*4882a593Smuzhiyun * @nr_bits: number of bits in the bitmask
3743*4882a593Smuzhiyun *
3744*4882a593Smuzhiyun * Returns true if a CPU/Rx queue is online.
3745*4882a593Smuzhiyun */
netif_attr_test_online(unsigned long j,const unsigned long * online_mask,unsigned int nr_bits)3746*4882a593Smuzhiyun static inline bool netif_attr_test_online(unsigned long j,
3747*4882a593Smuzhiyun const unsigned long *online_mask,
3748*4882a593Smuzhiyun unsigned int nr_bits)
3749*4882a593Smuzhiyun {
3750*4882a593Smuzhiyun cpu_max_bits_warn(j, nr_bits);
3751*4882a593Smuzhiyun
3752*4882a593Smuzhiyun if (online_mask)
3753*4882a593Smuzhiyun return test_bit(j, online_mask);
3754*4882a593Smuzhiyun
3755*4882a593Smuzhiyun return (j < nr_bits);
3756*4882a593Smuzhiyun }
3757*4882a593Smuzhiyun
3758*4882a593Smuzhiyun /**
3759*4882a593Smuzhiyun * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
3760*4882a593Smuzhiyun * @n: CPU/Rx queue index
3761*4882a593Smuzhiyun * @srcp: the cpumask/Rx queue mask pointer
3762*4882a593Smuzhiyun * @nr_bits: number of bits in the bitmask
3763*4882a593Smuzhiyun *
3764*4882a593Smuzhiyun * Returns >= nr_bits if no further CPUs/Rx queues set.
3765*4882a593Smuzhiyun */
netif_attrmask_next(int n,const unsigned long * srcp,unsigned int nr_bits)3766*4882a593Smuzhiyun static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
3767*4882a593Smuzhiyun unsigned int nr_bits)
3768*4882a593Smuzhiyun {
3769*4882a593Smuzhiyun /* -1 is a legal arg here. */
3770*4882a593Smuzhiyun if (n != -1)
3771*4882a593Smuzhiyun cpu_max_bits_warn(n, nr_bits);
3772*4882a593Smuzhiyun
3773*4882a593Smuzhiyun if (srcp)
3774*4882a593Smuzhiyun return find_next_bit(srcp, nr_bits, n + 1);
3775*4882a593Smuzhiyun
3776*4882a593Smuzhiyun return n + 1;
3777*4882a593Smuzhiyun }
3778*4882a593Smuzhiyun
3779*4882a593Smuzhiyun /**
3780*4882a593Smuzhiyun * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
3781*4882a593Smuzhiyun * @n: CPU/Rx queue index
3782*4882a593Smuzhiyun * @src1p: the first CPUs/Rx queues mask pointer
3783*4882a593Smuzhiyun * @src2p: the second CPUs/Rx queues mask pointer
3784*4882a593Smuzhiyun * @nr_bits: number of bits in the bitmask
3785*4882a593Smuzhiyun *
3786*4882a593Smuzhiyun * Returns >= nr_bits if no further CPUs/Rx queues set in both.
3787*4882a593Smuzhiyun */
netif_attrmask_next_and(int n,const unsigned long * src1p,const unsigned long * src2p,unsigned int nr_bits)3788*4882a593Smuzhiyun static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
3789*4882a593Smuzhiyun const unsigned long *src2p,
3790*4882a593Smuzhiyun unsigned int nr_bits)
3791*4882a593Smuzhiyun {
3792*4882a593Smuzhiyun /* -1 is a legal arg here. */
3793*4882a593Smuzhiyun if (n != -1)
3794*4882a593Smuzhiyun cpu_max_bits_warn(n, nr_bits);
3795*4882a593Smuzhiyun
3796*4882a593Smuzhiyun if (src1p && src2p)
3797*4882a593Smuzhiyun return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
3798*4882a593Smuzhiyun else if (src1p)
3799*4882a593Smuzhiyun return find_next_bit(src1p, nr_bits, n + 1);
3800*4882a593Smuzhiyun else if (src2p)
3801*4882a593Smuzhiyun return find_next_bit(src2p, nr_bits, n + 1);
3802*4882a593Smuzhiyun
3803*4882a593Smuzhiyun return n + 1;
3804*4882a593Smuzhiyun }
3805*4882a593Smuzhiyun #else
netif_set_xps_queue(struct net_device * dev,const struct cpumask * mask,u16 index)3806*4882a593Smuzhiyun static inline int netif_set_xps_queue(struct net_device *dev,
3807*4882a593Smuzhiyun const struct cpumask *mask,
3808*4882a593Smuzhiyun u16 index)
3809*4882a593Smuzhiyun {
3810*4882a593Smuzhiyun return 0;
3811*4882a593Smuzhiyun }
3812*4882a593Smuzhiyun
__netif_set_xps_queue(struct net_device * dev,const unsigned long * mask,u16 index,bool is_rxqs_map)3813*4882a593Smuzhiyun static inline int __netif_set_xps_queue(struct net_device *dev,
3814*4882a593Smuzhiyun const unsigned long *mask,
3815*4882a593Smuzhiyun u16 index, bool is_rxqs_map)
3816*4882a593Smuzhiyun {
3817*4882a593Smuzhiyun return 0;
3818*4882a593Smuzhiyun }
3819*4882a593Smuzhiyun #endif
3820*4882a593Smuzhiyun
3821*4882a593Smuzhiyun /**
3822*4882a593Smuzhiyun * netif_is_multiqueue - test if device has multiple transmit queues
3823*4882a593Smuzhiyun * @dev: network device
3824*4882a593Smuzhiyun *
3825*4882a593Smuzhiyun * Check if device has multiple transmit queues
3826*4882a593Smuzhiyun */
netif_is_multiqueue(const struct net_device * dev)3827*4882a593Smuzhiyun static inline bool netif_is_multiqueue(const struct net_device *dev)
3828*4882a593Smuzhiyun {
3829*4882a593Smuzhiyun return dev->num_tx_queues > 1;
3830*4882a593Smuzhiyun }
3831*4882a593Smuzhiyun
3832*4882a593Smuzhiyun int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3833*4882a593Smuzhiyun
3834*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
3835*4882a593Smuzhiyun int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
3836*4882a593Smuzhiyun #else
netif_set_real_num_rx_queues(struct net_device * dev,unsigned int rxqs)3837*4882a593Smuzhiyun static inline int netif_set_real_num_rx_queues(struct net_device *dev,
3838*4882a593Smuzhiyun unsigned int rxqs)
3839*4882a593Smuzhiyun {
3840*4882a593Smuzhiyun dev->real_num_rx_queues = rxqs;
3841*4882a593Smuzhiyun return 0;
3842*4882a593Smuzhiyun }
3843*4882a593Smuzhiyun #endif
3844*4882a593Smuzhiyun
3845*4882a593Smuzhiyun static inline struct netdev_rx_queue *
__netif_get_rx_queue(struct net_device * dev,unsigned int rxq)3846*4882a593Smuzhiyun __netif_get_rx_queue(struct net_device *dev, unsigned int rxq)
3847*4882a593Smuzhiyun {
3848*4882a593Smuzhiyun return dev->_rx + rxq;
3849*4882a593Smuzhiyun }
3850*4882a593Smuzhiyun
3851*4882a593Smuzhiyun #ifdef CONFIG_SYSFS
get_netdev_rx_queue_index(struct netdev_rx_queue * queue)3852*4882a593Smuzhiyun static inline unsigned int get_netdev_rx_queue_index(
3853*4882a593Smuzhiyun struct netdev_rx_queue *queue)
3854*4882a593Smuzhiyun {
3855*4882a593Smuzhiyun struct net_device *dev = queue->dev;
3856*4882a593Smuzhiyun int index = queue - dev->_rx;
3857*4882a593Smuzhiyun
3858*4882a593Smuzhiyun BUG_ON(index >= dev->num_rx_queues);
3859*4882a593Smuzhiyun return index;
3860*4882a593Smuzhiyun }
3861*4882a593Smuzhiyun #endif
3862*4882a593Smuzhiyun
3863*4882a593Smuzhiyun #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
3864*4882a593Smuzhiyun int netif_get_num_default_rss_queues(void);
3865*4882a593Smuzhiyun
3866*4882a593Smuzhiyun enum skb_free_reason {
3867*4882a593Smuzhiyun SKB_REASON_CONSUMED,
3868*4882a593Smuzhiyun SKB_REASON_DROPPED,
3869*4882a593Smuzhiyun };
3870*4882a593Smuzhiyun
3871*4882a593Smuzhiyun void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
3872*4882a593Smuzhiyun void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
3873*4882a593Smuzhiyun
3874*4882a593Smuzhiyun /*
3875*4882a593Smuzhiyun * It is not allowed to call kfree_skb() or consume_skb() from hardware
3876*4882a593Smuzhiyun * interrupt context or with hardware interrupts being disabled.
3877*4882a593Smuzhiyun * (in_irq() || irqs_disabled())
3878*4882a593Smuzhiyun *
3879*4882a593Smuzhiyun * We provide four helpers that can be used in following contexts :
3880*4882a593Smuzhiyun *
3881*4882a593Smuzhiyun * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
3882*4882a593Smuzhiyun * replacing kfree_skb(skb)
3883*4882a593Smuzhiyun *
3884*4882a593Smuzhiyun * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
3885*4882a593Smuzhiyun * Typically used in place of consume_skb(skb) in TX completion path
3886*4882a593Smuzhiyun *
3887*4882a593Smuzhiyun * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
3888*4882a593Smuzhiyun * replacing kfree_skb(skb)
3889*4882a593Smuzhiyun *
3890*4882a593Smuzhiyun * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
3891*4882a593Smuzhiyun * and consumed a packet. Used in place of consume_skb(skb)
3892*4882a593Smuzhiyun */
dev_kfree_skb_irq(struct sk_buff * skb)3893*4882a593Smuzhiyun static inline void dev_kfree_skb_irq(struct sk_buff *skb)
3894*4882a593Smuzhiyun {
3895*4882a593Smuzhiyun __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
3896*4882a593Smuzhiyun }
3897*4882a593Smuzhiyun
dev_consume_skb_irq(struct sk_buff * skb)3898*4882a593Smuzhiyun static inline void dev_consume_skb_irq(struct sk_buff *skb)
3899*4882a593Smuzhiyun {
3900*4882a593Smuzhiyun __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
3901*4882a593Smuzhiyun }
3902*4882a593Smuzhiyun
dev_kfree_skb_any(struct sk_buff * skb)3903*4882a593Smuzhiyun static inline void dev_kfree_skb_any(struct sk_buff *skb)
3904*4882a593Smuzhiyun {
3905*4882a593Smuzhiyun __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
3906*4882a593Smuzhiyun }
3907*4882a593Smuzhiyun
dev_consume_skb_any(struct sk_buff * skb)3908*4882a593Smuzhiyun static inline void dev_consume_skb_any(struct sk_buff *skb)
3909*4882a593Smuzhiyun {
3910*4882a593Smuzhiyun __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
3911*4882a593Smuzhiyun }
3912*4882a593Smuzhiyun
3913*4882a593Smuzhiyun void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
3914*4882a593Smuzhiyun int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
3915*4882a593Smuzhiyun int netif_rx(struct sk_buff *skb);
3916*4882a593Smuzhiyun int netif_rx_ni(struct sk_buff *skb);
3917*4882a593Smuzhiyun int netif_rx_any_context(struct sk_buff *skb);
3918*4882a593Smuzhiyun int netif_receive_skb(struct sk_buff *skb);
3919*4882a593Smuzhiyun int netif_receive_skb_core(struct sk_buff *skb);
3920*4882a593Smuzhiyun void netif_receive_skb_list(struct list_head *head);
3921*4882a593Smuzhiyun gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
3922*4882a593Smuzhiyun void napi_gro_flush(struct napi_struct *napi, bool flush_old);
3923*4882a593Smuzhiyun struct sk_buff *napi_get_frags(struct napi_struct *napi);
3924*4882a593Smuzhiyun gro_result_t napi_gro_frags(struct napi_struct *napi);
3925*4882a593Smuzhiyun struct packet_offload *gro_find_receive_by_type(__be16 type);
3926*4882a593Smuzhiyun struct packet_offload *gro_find_complete_by_type(__be16 type);
3927*4882a593Smuzhiyun
napi_free_frags(struct napi_struct * napi)3928*4882a593Smuzhiyun static inline void napi_free_frags(struct napi_struct *napi)
3929*4882a593Smuzhiyun {
3930*4882a593Smuzhiyun kfree_skb(napi->skb);
3931*4882a593Smuzhiyun napi->skb = NULL;
3932*4882a593Smuzhiyun }
3933*4882a593Smuzhiyun
3934*4882a593Smuzhiyun bool netdev_is_rx_handler_busy(struct net_device *dev);
3935*4882a593Smuzhiyun int netdev_rx_handler_register(struct net_device *dev,
3936*4882a593Smuzhiyun rx_handler_func_t *rx_handler,
3937*4882a593Smuzhiyun void *rx_handler_data);
3938*4882a593Smuzhiyun void netdev_rx_handler_unregister(struct net_device *dev);
3939*4882a593Smuzhiyun
3940*4882a593Smuzhiyun bool dev_valid_name(const char *name);
is_socket_ioctl_cmd(unsigned int cmd)3941*4882a593Smuzhiyun static inline bool is_socket_ioctl_cmd(unsigned int cmd)
3942*4882a593Smuzhiyun {
3943*4882a593Smuzhiyun return _IOC_TYPE(cmd) == SOCK_IOC_TYPE;
3944*4882a593Smuzhiyun }
3945*4882a593Smuzhiyun int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
3946*4882a593Smuzhiyun bool *need_copyout);
3947*4882a593Smuzhiyun int dev_ifconf(struct net *net, struct ifconf *, int);
3948*4882a593Smuzhiyun int dev_ethtool(struct net *net, struct ifreq *);
3949*4882a593Smuzhiyun unsigned int dev_get_flags(const struct net_device *);
3950*4882a593Smuzhiyun int __dev_change_flags(struct net_device *dev, unsigned int flags,
3951*4882a593Smuzhiyun struct netlink_ext_ack *extack);
3952*4882a593Smuzhiyun int dev_change_flags(struct net_device *dev, unsigned int flags,
3953*4882a593Smuzhiyun struct netlink_ext_ack *extack);
3954*4882a593Smuzhiyun void __dev_notify_flags(struct net_device *, unsigned int old_flags,
3955*4882a593Smuzhiyun unsigned int gchanges);
3956*4882a593Smuzhiyun int dev_change_name(struct net_device *, const char *);
3957*4882a593Smuzhiyun int dev_set_alias(struct net_device *, const char *, size_t);
3958*4882a593Smuzhiyun int dev_get_alias(const struct net_device *, char *, size_t);
3959*4882a593Smuzhiyun int dev_change_net_namespace(struct net_device *, struct net *, const char *);
3960*4882a593Smuzhiyun int __dev_set_mtu(struct net_device *, int);
3961*4882a593Smuzhiyun int dev_validate_mtu(struct net_device *dev, int mtu,
3962*4882a593Smuzhiyun struct netlink_ext_ack *extack);
3963*4882a593Smuzhiyun int dev_set_mtu_ext(struct net_device *dev, int mtu,
3964*4882a593Smuzhiyun struct netlink_ext_ack *extack);
3965*4882a593Smuzhiyun int dev_set_mtu(struct net_device *, int);
3966*4882a593Smuzhiyun int dev_change_tx_queue_len(struct net_device *, unsigned long);
3967*4882a593Smuzhiyun void dev_set_group(struct net_device *, int);
3968*4882a593Smuzhiyun int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
3969*4882a593Smuzhiyun struct netlink_ext_ack *extack);
3970*4882a593Smuzhiyun int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
3971*4882a593Smuzhiyun struct netlink_ext_ack *extack);
3972*4882a593Smuzhiyun int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
3973*4882a593Smuzhiyun struct netlink_ext_ack *extack);
3974*4882a593Smuzhiyun int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
3975*4882a593Smuzhiyun int dev_change_carrier(struct net_device *, bool new_carrier);
3976*4882a593Smuzhiyun int dev_get_phys_port_id(struct net_device *dev,
3977*4882a593Smuzhiyun struct netdev_phys_item_id *ppid);
3978*4882a593Smuzhiyun int dev_get_phys_port_name(struct net_device *dev,
3979*4882a593Smuzhiyun char *name, size_t len);
3980*4882a593Smuzhiyun int dev_get_port_parent_id(struct net_device *dev,
3981*4882a593Smuzhiyun struct netdev_phys_item_id *ppid, bool recurse);
3982*4882a593Smuzhiyun bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
3983*4882a593Smuzhiyun int dev_change_proto_down(struct net_device *dev, bool proto_down);
3984*4882a593Smuzhiyun int dev_change_proto_down_generic(struct net_device *dev, bool proto_down);
3985*4882a593Smuzhiyun void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
3986*4882a593Smuzhiyun u32 value);
3987*4882a593Smuzhiyun struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
3988*4882a593Smuzhiyun struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
3989*4882a593Smuzhiyun struct netdev_queue *txq, int *ret);
3990*4882a593Smuzhiyun
3991*4882a593Smuzhiyun typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
3992*4882a593Smuzhiyun int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
3993*4882a593Smuzhiyun int fd, int expected_fd, u32 flags);
3994*4882a593Smuzhiyun int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
3995*4882a593Smuzhiyun u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode);
3996*4882a593Smuzhiyun
3997*4882a593Smuzhiyun int xdp_umem_query(struct net_device *dev, u16 queue_id);
3998*4882a593Smuzhiyun
3999*4882a593Smuzhiyun int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4000*4882a593Smuzhiyun int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
4001*4882a593Smuzhiyun bool is_skb_forwardable(const struct net_device *dev,
4002*4882a593Smuzhiyun const struct sk_buff *skb);
4003*4882a593Smuzhiyun
____dev_forward_skb(struct net_device * dev,struct sk_buff * skb)4004*4882a593Smuzhiyun static __always_inline int ____dev_forward_skb(struct net_device *dev,
4005*4882a593Smuzhiyun struct sk_buff *skb)
4006*4882a593Smuzhiyun {
4007*4882a593Smuzhiyun if (skb_orphan_frags(skb, GFP_ATOMIC) ||
4008*4882a593Smuzhiyun unlikely(!is_skb_forwardable(dev, skb))) {
4009*4882a593Smuzhiyun atomic_long_inc(&dev->rx_dropped);
4010*4882a593Smuzhiyun kfree_skb(skb);
4011*4882a593Smuzhiyun return NET_RX_DROP;
4012*4882a593Smuzhiyun }
4013*4882a593Smuzhiyun
4014*4882a593Smuzhiyun skb_scrub_packet(skb, true);
4015*4882a593Smuzhiyun skb->priority = 0;
4016*4882a593Smuzhiyun return 0;
4017*4882a593Smuzhiyun }
4018*4882a593Smuzhiyun
4019*4882a593Smuzhiyun bool dev_nit_active(struct net_device *dev);
4020*4882a593Smuzhiyun void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
4021*4882a593Smuzhiyun
4022*4882a593Smuzhiyun extern int netdev_budget;
4023*4882a593Smuzhiyun extern unsigned int netdev_budget_usecs;
4024*4882a593Smuzhiyun
4025*4882a593Smuzhiyun /* Called by rtnetlink.c:rtnl_unlock() */
4026*4882a593Smuzhiyun void netdev_run_todo(void);
4027*4882a593Smuzhiyun
4028*4882a593Smuzhiyun /**
4029*4882a593Smuzhiyun * dev_put - release reference to device
4030*4882a593Smuzhiyun * @dev: network device
4031*4882a593Smuzhiyun *
4032*4882a593Smuzhiyun * Release reference to device to allow it to be freed.
4033*4882a593Smuzhiyun */
dev_put(struct net_device * dev)4034*4882a593Smuzhiyun static inline void dev_put(struct net_device *dev)
4035*4882a593Smuzhiyun {
4036*4882a593Smuzhiyun if (dev)
4037*4882a593Smuzhiyun this_cpu_dec(*dev->pcpu_refcnt);
4038*4882a593Smuzhiyun }
4039*4882a593Smuzhiyun
4040*4882a593Smuzhiyun /**
4041*4882a593Smuzhiyun * dev_hold - get reference to device
4042*4882a593Smuzhiyun * @dev: network device
4043*4882a593Smuzhiyun *
4044*4882a593Smuzhiyun * Hold reference to device to keep it from being freed.
4045*4882a593Smuzhiyun */
dev_hold(struct net_device * dev)4046*4882a593Smuzhiyun static inline void dev_hold(struct net_device *dev)
4047*4882a593Smuzhiyun {
4048*4882a593Smuzhiyun if (dev)
4049*4882a593Smuzhiyun this_cpu_inc(*dev->pcpu_refcnt);
4050*4882a593Smuzhiyun }
4051*4882a593Smuzhiyun
4052*4882a593Smuzhiyun /* Carrier loss detection, dial on demand. The functions netif_carrier_on
4053*4882a593Smuzhiyun * and _off may be called from IRQ context, but it is caller
4054*4882a593Smuzhiyun * who is responsible for serialization of these calls.
4055*4882a593Smuzhiyun *
4056*4882a593Smuzhiyun * The name carrier is inappropriate, these functions should really be
4057*4882a593Smuzhiyun * called netif_lowerlayer_*() because they represent the state of any
4058*4882a593Smuzhiyun * kind of lower layer not just hardware media.
4059*4882a593Smuzhiyun */
4060*4882a593Smuzhiyun
4061*4882a593Smuzhiyun void linkwatch_init_dev(struct net_device *dev);
4062*4882a593Smuzhiyun void linkwatch_fire_event(struct net_device *dev);
4063*4882a593Smuzhiyun void linkwatch_forget_dev(struct net_device *dev);
4064*4882a593Smuzhiyun
4065*4882a593Smuzhiyun /**
4066*4882a593Smuzhiyun * netif_carrier_ok - test if carrier present
4067*4882a593Smuzhiyun * @dev: network device
4068*4882a593Smuzhiyun *
4069*4882a593Smuzhiyun * Check if carrier is present on device
4070*4882a593Smuzhiyun */
netif_carrier_ok(const struct net_device * dev)4071*4882a593Smuzhiyun static inline bool netif_carrier_ok(const struct net_device *dev)
4072*4882a593Smuzhiyun {
4073*4882a593Smuzhiyun return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
4074*4882a593Smuzhiyun }
4075*4882a593Smuzhiyun
4076*4882a593Smuzhiyun unsigned long dev_trans_start(struct net_device *dev);
4077*4882a593Smuzhiyun
4078*4882a593Smuzhiyun void __netdev_watchdog_up(struct net_device *dev);
4079*4882a593Smuzhiyun
4080*4882a593Smuzhiyun void netif_carrier_on(struct net_device *dev);
4081*4882a593Smuzhiyun
4082*4882a593Smuzhiyun void netif_carrier_off(struct net_device *dev);
4083*4882a593Smuzhiyun
4084*4882a593Smuzhiyun /**
4085*4882a593Smuzhiyun * netif_dormant_on - mark device as dormant.
4086*4882a593Smuzhiyun * @dev: network device
4087*4882a593Smuzhiyun *
4088*4882a593Smuzhiyun * Mark device as dormant (as per RFC2863).
4089*4882a593Smuzhiyun *
4090*4882a593Smuzhiyun * The dormant state indicates that the relevant interface is not
4091*4882a593Smuzhiyun * actually in a condition to pass packets (i.e., it is not 'up') but is
4092*4882a593Smuzhiyun * in a "pending" state, waiting for some external event. For "on-
4093*4882a593Smuzhiyun * demand" interfaces, this new state identifies the situation where the
4094*4882a593Smuzhiyun * interface is waiting for events to place it in the up state.
4095*4882a593Smuzhiyun */
netif_dormant_on(struct net_device * dev)4096*4882a593Smuzhiyun static inline void netif_dormant_on(struct net_device *dev)
4097*4882a593Smuzhiyun {
4098*4882a593Smuzhiyun if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
4099*4882a593Smuzhiyun linkwatch_fire_event(dev);
4100*4882a593Smuzhiyun }
4101*4882a593Smuzhiyun
4102*4882a593Smuzhiyun /**
4103*4882a593Smuzhiyun * netif_dormant_off - set device as not dormant.
4104*4882a593Smuzhiyun * @dev: network device
4105*4882a593Smuzhiyun *
4106*4882a593Smuzhiyun * Device is not in dormant state.
4107*4882a593Smuzhiyun */
netif_dormant_off(struct net_device * dev)4108*4882a593Smuzhiyun static inline void netif_dormant_off(struct net_device *dev)
4109*4882a593Smuzhiyun {
4110*4882a593Smuzhiyun if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
4111*4882a593Smuzhiyun linkwatch_fire_event(dev);
4112*4882a593Smuzhiyun }
4113*4882a593Smuzhiyun
4114*4882a593Smuzhiyun /**
4115*4882a593Smuzhiyun * netif_dormant - test if device is dormant
4116*4882a593Smuzhiyun * @dev: network device
4117*4882a593Smuzhiyun *
4118*4882a593Smuzhiyun * Check if device is dormant.
4119*4882a593Smuzhiyun */
netif_dormant(const struct net_device * dev)4120*4882a593Smuzhiyun static inline bool netif_dormant(const struct net_device *dev)
4121*4882a593Smuzhiyun {
4122*4882a593Smuzhiyun return test_bit(__LINK_STATE_DORMANT, &dev->state);
4123*4882a593Smuzhiyun }
4124*4882a593Smuzhiyun
4125*4882a593Smuzhiyun
4126*4882a593Smuzhiyun /**
4127*4882a593Smuzhiyun * netif_testing_on - mark device as under test.
4128*4882a593Smuzhiyun * @dev: network device
4129*4882a593Smuzhiyun *
4130*4882a593Smuzhiyun * Mark device as under test (as per RFC2863).
4131*4882a593Smuzhiyun *
4132*4882a593Smuzhiyun * The testing state indicates that some test(s) must be performed on
4133*4882a593Smuzhiyun * the interface. After completion, of the test, the interface state
4134*4882a593Smuzhiyun * will change to up, dormant, or down, as appropriate.
4135*4882a593Smuzhiyun */
netif_testing_on(struct net_device * dev)4136*4882a593Smuzhiyun static inline void netif_testing_on(struct net_device *dev)
4137*4882a593Smuzhiyun {
4138*4882a593Smuzhiyun if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state))
4139*4882a593Smuzhiyun linkwatch_fire_event(dev);
4140*4882a593Smuzhiyun }
4141*4882a593Smuzhiyun
4142*4882a593Smuzhiyun /**
4143*4882a593Smuzhiyun * netif_testing_off - set device as not under test.
4144*4882a593Smuzhiyun * @dev: network device
4145*4882a593Smuzhiyun *
4146*4882a593Smuzhiyun * Device is not in testing state.
4147*4882a593Smuzhiyun */
netif_testing_off(struct net_device * dev)4148*4882a593Smuzhiyun static inline void netif_testing_off(struct net_device *dev)
4149*4882a593Smuzhiyun {
4150*4882a593Smuzhiyun if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state))
4151*4882a593Smuzhiyun linkwatch_fire_event(dev);
4152*4882a593Smuzhiyun }
4153*4882a593Smuzhiyun
4154*4882a593Smuzhiyun /**
4155*4882a593Smuzhiyun * netif_testing - test if device is under test
4156*4882a593Smuzhiyun * @dev: network device
4157*4882a593Smuzhiyun *
4158*4882a593Smuzhiyun * Check if device is under test
4159*4882a593Smuzhiyun */
netif_testing(const struct net_device * dev)4160*4882a593Smuzhiyun static inline bool netif_testing(const struct net_device *dev)
4161*4882a593Smuzhiyun {
4162*4882a593Smuzhiyun return test_bit(__LINK_STATE_TESTING, &dev->state);
4163*4882a593Smuzhiyun }
4164*4882a593Smuzhiyun
4165*4882a593Smuzhiyun
4166*4882a593Smuzhiyun /**
4167*4882a593Smuzhiyun * netif_oper_up - test if device is operational
4168*4882a593Smuzhiyun * @dev: network device
4169*4882a593Smuzhiyun *
4170*4882a593Smuzhiyun * Check if carrier is operational
4171*4882a593Smuzhiyun */
netif_oper_up(const struct net_device * dev)4172*4882a593Smuzhiyun static inline bool netif_oper_up(const struct net_device *dev)
4173*4882a593Smuzhiyun {
4174*4882a593Smuzhiyun return (dev->operstate == IF_OPER_UP ||
4175*4882a593Smuzhiyun dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
4176*4882a593Smuzhiyun }
4177*4882a593Smuzhiyun
4178*4882a593Smuzhiyun /**
4179*4882a593Smuzhiyun * netif_device_present - is device available or removed
4180*4882a593Smuzhiyun * @dev: network device
4181*4882a593Smuzhiyun *
4182*4882a593Smuzhiyun * Check if device has not been removed from system.
4183*4882a593Smuzhiyun */
netif_device_present(struct net_device * dev)4184*4882a593Smuzhiyun static inline bool netif_device_present(struct net_device *dev)
4185*4882a593Smuzhiyun {
4186*4882a593Smuzhiyun return test_bit(__LINK_STATE_PRESENT, &dev->state);
4187*4882a593Smuzhiyun }
4188*4882a593Smuzhiyun
4189*4882a593Smuzhiyun void netif_device_detach(struct net_device *dev);
4190*4882a593Smuzhiyun
4191*4882a593Smuzhiyun void netif_device_attach(struct net_device *dev);
4192*4882a593Smuzhiyun
4193*4882a593Smuzhiyun /*
4194*4882a593Smuzhiyun * Network interface message level settings
4195*4882a593Smuzhiyun */
4196*4882a593Smuzhiyun
4197*4882a593Smuzhiyun enum {
4198*4882a593Smuzhiyun NETIF_MSG_DRV_BIT,
4199*4882a593Smuzhiyun NETIF_MSG_PROBE_BIT,
4200*4882a593Smuzhiyun NETIF_MSG_LINK_BIT,
4201*4882a593Smuzhiyun NETIF_MSG_TIMER_BIT,
4202*4882a593Smuzhiyun NETIF_MSG_IFDOWN_BIT,
4203*4882a593Smuzhiyun NETIF_MSG_IFUP_BIT,
4204*4882a593Smuzhiyun NETIF_MSG_RX_ERR_BIT,
4205*4882a593Smuzhiyun NETIF_MSG_TX_ERR_BIT,
4206*4882a593Smuzhiyun NETIF_MSG_TX_QUEUED_BIT,
4207*4882a593Smuzhiyun NETIF_MSG_INTR_BIT,
4208*4882a593Smuzhiyun NETIF_MSG_TX_DONE_BIT,
4209*4882a593Smuzhiyun NETIF_MSG_RX_STATUS_BIT,
4210*4882a593Smuzhiyun NETIF_MSG_PKTDATA_BIT,
4211*4882a593Smuzhiyun NETIF_MSG_HW_BIT,
4212*4882a593Smuzhiyun NETIF_MSG_WOL_BIT,
4213*4882a593Smuzhiyun
4214*4882a593Smuzhiyun /* When you add a new bit above, update netif_msg_class_names array
4215*4882a593Smuzhiyun * in net/ethtool/common.c
4216*4882a593Smuzhiyun */
4217*4882a593Smuzhiyun NETIF_MSG_CLASS_COUNT,
4218*4882a593Smuzhiyun };
4219*4882a593Smuzhiyun /* Both ethtool_ops interface and internal driver implementation use u32 */
4220*4882a593Smuzhiyun static_assert(NETIF_MSG_CLASS_COUNT <= 32);
4221*4882a593Smuzhiyun
4222*4882a593Smuzhiyun #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
4223*4882a593Smuzhiyun #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
4224*4882a593Smuzhiyun
4225*4882a593Smuzhiyun #define NETIF_MSG_DRV __NETIF_MSG(DRV)
4226*4882a593Smuzhiyun #define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
4227*4882a593Smuzhiyun #define NETIF_MSG_LINK __NETIF_MSG(LINK)
4228*4882a593Smuzhiyun #define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
4229*4882a593Smuzhiyun #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
4230*4882a593Smuzhiyun #define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
4231*4882a593Smuzhiyun #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
4232*4882a593Smuzhiyun #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
4233*4882a593Smuzhiyun #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
4234*4882a593Smuzhiyun #define NETIF_MSG_INTR __NETIF_MSG(INTR)
4235*4882a593Smuzhiyun #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
4236*4882a593Smuzhiyun #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
4237*4882a593Smuzhiyun #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
4238*4882a593Smuzhiyun #define NETIF_MSG_HW __NETIF_MSG(HW)
4239*4882a593Smuzhiyun #define NETIF_MSG_WOL __NETIF_MSG(WOL)
4240*4882a593Smuzhiyun
4241*4882a593Smuzhiyun #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
4242*4882a593Smuzhiyun #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
4243*4882a593Smuzhiyun #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
4244*4882a593Smuzhiyun #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
4245*4882a593Smuzhiyun #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
4246*4882a593Smuzhiyun #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
4247*4882a593Smuzhiyun #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
4248*4882a593Smuzhiyun #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
4249*4882a593Smuzhiyun #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
4250*4882a593Smuzhiyun #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
4251*4882a593Smuzhiyun #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
4252*4882a593Smuzhiyun #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
4253*4882a593Smuzhiyun #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
4254*4882a593Smuzhiyun #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
4255*4882a593Smuzhiyun #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
4256*4882a593Smuzhiyun
netif_msg_init(int debug_value,int default_msg_enable_bits)4257*4882a593Smuzhiyun static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
4258*4882a593Smuzhiyun {
4259*4882a593Smuzhiyun /* use default */
4260*4882a593Smuzhiyun if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
4261*4882a593Smuzhiyun return default_msg_enable_bits;
4262*4882a593Smuzhiyun if (debug_value == 0) /* no output */
4263*4882a593Smuzhiyun return 0;
4264*4882a593Smuzhiyun /* set low N bits */
4265*4882a593Smuzhiyun return (1U << debug_value) - 1;
4266*4882a593Smuzhiyun }
4267*4882a593Smuzhiyun
__netif_tx_lock(struct netdev_queue * txq,int cpu)4268*4882a593Smuzhiyun static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
4269*4882a593Smuzhiyun {
4270*4882a593Smuzhiyun spin_lock(&txq->_xmit_lock);
4271*4882a593Smuzhiyun /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4272*4882a593Smuzhiyun WRITE_ONCE(txq->xmit_lock_owner, cpu);
4273*4882a593Smuzhiyun }
4274*4882a593Smuzhiyun
__netif_tx_acquire(struct netdev_queue * txq)4275*4882a593Smuzhiyun static inline bool __netif_tx_acquire(struct netdev_queue *txq)
4276*4882a593Smuzhiyun {
4277*4882a593Smuzhiyun __acquire(&txq->_xmit_lock);
4278*4882a593Smuzhiyun return true;
4279*4882a593Smuzhiyun }
4280*4882a593Smuzhiyun
__netif_tx_release(struct netdev_queue * txq)4281*4882a593Smuzhiyun static inline void __netif_tx_release(struct netdev_queue *txq)
4282*4882a593Smuzhiyun {
4283*4882a593Smuzhiyun __release(&txq->_xmit_lock);
4284*4882a593Smuzhiyun }
4285*4882a593Smuzhiyun
__netif_tx_lock_bh(struct netdev_queue * txq)4286*4882a593Smuzhiyun static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
4287*4882a593Smuzhiyun {
4288*4882a593Smuzhiyun spin_lock_bh(&txq->_xmit_lock);
4289*4882a593Smuzhiyun /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4290*4882a593Smuzhiyun WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4291*4882a593Smuzhiyun }
4292*4882a593Smuzhiyun
__netif_tx_trylock(struct netdev_queue * txq)4293*4882a593Smuzhiyun static inline bool __netif_tx_trylock(struct netdev_queue *txq)
4294*4882a593Smuzhiyun {
4295*4882a593Smuzhiyun bool ok = spin_trylock(&txq->_xmit_lock);
4296*4882a593Smuzhiyun
4297*4882a593Smuzhiyun if (likely(ok)) {
4298*4882a593Smuzhiyun /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4299*4882a593Smuzhiyun WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
4300*4882a593Smuzhiyun }
4301*4882a593Smuzhiyun return ok;
4302*4882a593Smuzhiyun }
4303*4882a593Smuzhiyun
__netif_tx_unlock(struct netdev_queue * txq)4304*4882a593Smuzhiyun static inline void __netif_tx_unlock(struct netdev_queue *txq)
4305*4882a593Smuzhiyun {
4306*4882a593Smuzhiyun /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4307*4882a593Smuzhiyun WRITE_ONCE(txq->xmit_lock_owner, -1);
4308*4882a593Smuzhiyun spin_unlock(&txq->_xmit_lock);
4309*4882a593Smuzhiyun }
4310*4882a593Smuzhiyun
__netif_tx_unlock_bh(struct netdev_queue * txq)4311*4882a593Smuzhiyun static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
4312*4882a593Smuzhiyun {
4313*4882a593Smuzhiyun /* Pairs with READ_ONCE() in __dev_queue_xmit() */
4314*4882a593Smuzhiyun WRITE_ONCE(txq->xmit_lock_owner, -1);
4315*4882a593Smuzhiyun spin_unlock_bh(&txq->_xmit_lock);
4316*4882a593Smuzhiyun }
4317*4882a593Smuzhiyun
txq_trans_update(struct netdev_queue * txq)4318*4882a593Smuzhiyun static inline void txq_trans_update(struct netdev_queue *txq)
4319*4882a593Smuzhiyun {
4320*4882a593Smuzhiyun if (txq->xmit_lock_owner != -1)
4321*4882a593Smuzhiyun txq->trans_start = jiffies;
4322*4882a593Smuzhiyun }
4323*4882a593Smuzhiyun
4324*4882a593Smuzhiyun /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */
netif_trans_update(struct net_device * dev)4325*4882a593Smuzhiyun static inline void netif_trans_update(struct net_device *dev)
4326*4882a593Smuzhiyun {
4327*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
4328*4882a593Smuzhiyun
4329*4882a593Smuzhiyun if (txq->trans_start != jiffies)
4330*4882a593Smuzhiyun txq->trans_start = jiffies;
4331*4882a593Smuzhiyun }
4332*4882a593Smuzhiyun
4333*4882a593Smuzhiyun /**
4334*4882a593Smuzhiyun * netif_tx_lock - grab network device transmit lock
4335*4882a593Smuzhiyun * @dev: network device
4336*4882a593Smuzhiyun *
4337*4882a593Smuzhiyun * Get network device transmit lock
4338*4882a593Smuzhiyun */
netif_tx_lock(struct net_device * dev)4339*4882a593Smuzhiyun static inline void netif_tx_lock(struct net_device *dev)
4340*4882a593Smuzhiyun {
4341*4882a593Smuzhiyun unsigned int i;
4342*4882a593Smuzhiyun int cpu;
4343*4882a593Smuzhiyun
4344*4882a593Smuzhiyun spin_lock(&dev->tx_global_lock);
4345*4882a593Smuzhiyun cpu = smp_processor_id();
4346*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
4347*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4348*4882a593Smuzhiyun
4349*4882a593Smuzhiyun /* We are the only thread of execution doing a
4350*4882a593Smuzhiyun * freeze, but we have to grab the _xmit_lock in
4351*4882a593Smuzhiyun * order to synchronize with threads which are in
4352*4882a593Smuzhiyun * the ->hard_start_xmit() handler and already
4353*4882a593Smuzhiyun * checked the frozen bit.
4354*4882a593Smuzhiyun */
4355*4882a593Smuzhiyun __netif_tx_lock(txq, cpu);
4356*4882a593Smuzhiyun set_bit(__QUEUE_STATE_FROZEN, &txq->state);
4357*4882a593Smuzhiyun __netif_tx_unlock(txq);
4358*4882a593Smuzhiyun }
4359*4882a593Smuzhiyun }
4360*4882a593Smuzhiyun
netif_tx_lock_bh(struct net_device * dev)4361*4882a593Smuzhiyun static inline void netif_tx_lock_bh(struct net_device *dev)
4362*4882a593Smuzhiyun {
4363*4882a593Smuzhiyun local_bh_disable();
4364*4882a593Smuzhiyun netif_tx_lock(dev);
4365*4882a593Smuzhiyun }
4366*4882a593Smuzhiyun
netif_tx_unlock(struct net_device * dev)4367*4882a593Smuzhiyun static inline void netif_tx_unlock(struct net_device *dev)
4368*4882a593Smuzhiyun {
4369*4882a593Smuzhiyun unsigned int i;
4370*4882a593Smuzhiyun
4371*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
4372*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4373*4882a593Smuzhiyun
4374*4882a593Smuzhiyun /* No need to grab the _xmit_lock here. If the
4375*4882a593Smuzhiyun * queue is not stopped for another reason, we
4376*4882a593Smuzhiyun * force a schedule.
4377*4882a593Smuzhiyun */
4378*4882a593Smuzhiyun clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
4379*4882a593Smuzhiyun netif_schedule_queue(txq);
4380*4882a593Smuzhiyun }
4381*4882a593Smuzhiyun spin_unlock(&dev->tx_global_lock);
4382*4882a593Smuzhiyun }
4383*4882a593Smuzhiyun
netif_tx_unlock_bh(struct net_device * dev)4384*4882a593Smuzhiyun static inline void netif_tx_unlock_bh(struct net_device *dev)
4385*4882a593Smuzhiyun {
4386*4882a593Smuzhiyun netif_tx_unlock(dev);
4387*4882a593Smuzhiyun local_bh_enable();
4388*4882a593Smuzhiyun }
4389*4882a593Smuzhiyun
4390*4882a593Smuzhiyun #define HARD_TX_LOCK(dev, txq, cpu) { \
4391*4882a593Smuzhiyun if ((dev->features & NETIF_F_LLTX) == 0) { \
4392*4882a593Smuzhiyun __netif_tx_lock(txq, cpu); \
4393*4882a593Smuzhiyun } else { \
4394*4882a593Smuzhiyun __netif_tx_acquire(txq); \
4395*4882a593Smuzhiyun } \
4396*4882a593Smuzhiyun }
4397*4882a593Smuzhiyun
4398*4882a593Smuzhiyun #define HARD_TX_TRYLOCK(dev, txq) \
4399*4882a593Smuzhiyun (((dev->features & NETIF_F_LLTX) == 0) ? \
4400*4882a593Smuzhiyun __netif_tx_trylock(txq) : \
4401*4882a593Smuzhiyun __netif_tx_acquire(txq))
4402*4882a593Smuzhiyun
4403*4882a593Smuzhiyun #define HARD_TX_UNLOCK(dev, txq) { \
4404*4882a593Smuzhiyun if ((dev->features & NETIF_F_LLTX) == 0) { \
4405*4882a593Smuzhiyun __netif_tx_unlock(txq); \
4406*4882a593Smuzhiyun } else { \
4407*4882a593Smuzhiyun __netif_tx_release(txq); \
4408*4882a593Smuzhiyun } \
4409*4882a593Smuzhiyun }
4410*4882a593Smuzhiyun
netif_tx_disable(struct net_device * dev)4411*4882a593Smuzhiyun static inline void netif_tx_disable(struct net_device *dev)
4412*4882a593Smuzhiyun {
4413*4882a593Smuzhiyun unsigned int i;
4414*4882a593Smuzhiyun int cpu;
4415*4882a593Smuzhiyun
4416*4882a593Smuzhiyun local_bh_disable();
4417*4882a593Smuzhiyun cpu = smp_processor_id();
4418*4882a593Smuzhiyun spin_lock(&dev->tx_global_lock);
4419*4882a593Smuzhiyun for (i = 0; i < dev->num_tx_queues; i++) {
4420*4882a593Smuzhiyun struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
4421*4882a593Smuzhiyun
4422*4882a593Smuzhiyun __netif_tx_lock(txq, cpu);
4423*4882a593Smuzhiyun netif_tx_stop_queue(txq);
4424*4882a593Smuzhiyun __netif_tx_unlock(txq);
4425*4882a593Smuzhiyun }
4426*4882a593Smuzhiyun spin_unlock(&dev->tx_global_lock);
4427*4882a593Smuzhiyun local_bh_enable();
4428*4882a593Smuzhiyun }
4429*4882a593Smuzhiyun
netif_addr_lock(struct net_device * dev)4430*4882a593Smuzhiyun static inline void netif_addr_lock(struct net_device *dev)
4431*4882a593Smuzhiyun {
4432*4882a593Smuzhiyun unsigned char nest_level = 0;
4433*4882a593Smuzhiyun
4434*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
4435*4882a593Smuzhiyun nest_level = dev->nested_level;
4436*4882a593Smuzhiyun #endif
4437*4882a593Smuzhiyun spin_lock_nested(&dev->addr_list_lock, nest_level);
4438*4882a593Smuzhiyun }
4439*4882a593Smuzhiyun
netif_addr_lock_bh(struct net_device * dev)4440*4882a593Smuzhiyun static inline void netif_addr_lock_bh(struct net_device *dev)
4441*4882a593Smuzhiyun {
4442*4882a593Smuzhiyun unsigned char nest_level = 0;
4443*4882a593Smuzhiyun
4444*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
4445*4882a593Smuzhiyun nest_level = dev->nested_level;
4446*4882a593Smuzhiyun #endif
4447*4882a593Smuzhiyun local_bh_disable();
4448*4882a593Smuzhiyun spin_lock_nested(&dev->addr_list_lock, nest_level);
4449*4882a593Smuzhiyun }
4450*4882a593Smuzhiyun
netif_addr_unlock(struct net_device * dev)4451*4882a593Smuzhiyun static inline void netif_addr_unlock(struct net_device *dev)
4452*4882a593Smuzhiyun {
4453*4882a593Smuzhiyun spin_unlock(&dev->addr_list_lock);
4454*4882a593Smuzhiyun }
4455*4882a593Smuzhiyun
netif_addr_unlock_bh(struct net_device * dev)4456*4882a593Smuzhiyun static inline void netif_addr_unlock_bh(struct net_device *dev)
4457*4882a593Smuzhiyun {
4458*4882a593Smuzhiyun spin_unlock_bh(&dev->addr_list_lock);
4459*4882a593Smuzhiyun }
4460*4882a593Smuzhiyun
4461*4882a593Smuzhiyun /*
4462*4882a593Smuzhiyun * dev_addrs walker. Should be used only for read access. Call with
4463*4882a593Smuzhiyun * rcu_read_lock held.
4464*4882a593Smuzhiyun */
4465*4882a593Smuzhiyun #define for_each_dev_addr(dev, ha) \
4466*4882a593Smuzhiyun list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
4467*4882a593Smuzhiyun
4468*4882a593Smuzhiyun /* These functions live elsewhere (drivers/net/net_init.c, but related) */
4469*4882a593Smuzhiyun
4470*4882a593Smuzhiyun void ether_setup(struct net_device *dev);
4471*4882a593Smuzhiyun
4472*4882a593Smuzhiyun /* Support for loadable net-drivers */
4473*4882a593Smuzhiyun struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
4474*4882a593Smuzhiyun unsigned char name_assign_type,
4475*4882a593Smuzhiyun void (*setup)(struct net_device *),
4476*4882a593Smuzhiyun unsigned int txqs, unsigned int rxqs);
4477*4882a593Smuzhiyun #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
4478*4882a593Smuzhiyun alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
4479*4882a593Smuzhiyun
4480*4882a593Smuzhiyun #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
4481*4882a593Smuzhiyun alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
4482*4882a593Smuzhiyun count)
4483*4882a593Smuzhiyun
4484*4882a593Smuzhiyun int register_netdev(struct net_device *dev);
4485*4882a593Smuzhiyun void unregister_netdev(struct net_device *dev);
4486*4882a593Smuzhiyun
4487*4882a593Smuzhiyun int devm_register_netdev(struct device *dev, struct net_device *ndev);
4488*4882a593Smuzhiyun
4489*4882a593Smuzhiyun /* General hardware address lists handling functions */
4490*4882a593Smuzhiyun int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4491*4882a593Smuzhiyun struct netdev_hw_addr_list *from_list, int addr_len);
4492*4882a593Smuzhiyun void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4493*4882a593Smuzhiyun struct netdev_hw_addr_list *from_list, int addr_len);
4494*4882a593Smuzhiyun int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
4495*4882a593Smuzhiyun struct net_device *dev,
4496*4882a593Smuzhiyun int (*sync)(struct net_device *, const unsigned char *),
4497*4882a593Smuzhiyun int (*unsync)(struct net_device *,
4498*4882a593Smuzhiyun const unsigned char *));
4499*4882a593Smuzhiyun int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
4500*4882a593Smuzhiyun struct net_device *dev,
4501*4882a593Smuzhiyun int (*sync)(struct net_device *,
4502*4882a593Smuzhiyun const unsigned char *, int),
4503*4882a593Smuzhiyun int (*unsync)(struct net_device *,
4504*4882a593Smuzhiyun const unsigned char *, int));
4505*4882a593Smuzhiyun void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
4506*4882a593Smuzhiyun struct net_device *dev,
4507*4882a593Smuzhiyun int (*unsync)(struct net_device *,
4508*4882a593Smuzhiyun const unsigned char *, int));
4509*4882a593Smuzhiyun void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
4510*4882a593Smuzhiyun struct net_device *dev,
4511*4882a593Smuzhiyun int (*unsync)(struct net_device *,
4512*4882a593Smuzhiyun const unsigned char *));
4513*4882a593Smuzhiyun void __hw_addr_init(struct netdev_hw_addr_list *list);
4514*4882a593Smuzhiyun
4515*4882a593Smuzhiyun /* Functions used for device addresses handling */
4516*4882a593Smuzhiyun int dev_addr_add(struct net_device *dev, const unsigned char *addr,
4517*4882a593Smuzhiyun unsigned char addr_type);
4518*4882a593Smuzhiyun int dev_addr_del(struct net_device *dev, const unsigned char *addr,
4519*4882a593Smuzhiyun unsigned char addr_type);
4520*4882a593Smuzhiyun void dev_addr_flush(struct net_device *dev);
4521*4882a593Smuzhiyun int dev_addr_init(struct net_device *dev);
4522*4882a593Smuzhiyun
4523*4882a593Smuzhiyun /* Functions used for unicast addresses handling */
4524*4882a593Smuzhiyun int dev_uc_add(struct net_device *dev, const unsigned char *addr);
4525*4882a593Smuzhiyun int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
4526*4882a593Smuzhiyun int dev_uc_del(struct net_device *dev, const unsigned char *addr);
4527*4882a593Smuzhiyun int dev_uc_sync(struct net_device *to, struct net_device *from);
4528*4882a593Smuzhiyun int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
4529*4882a593Smuzhiyun void dev_uc_unsync(struct net_device *to, struct net_device *from);
4530*4882a593Smuzhiyun void dev_uc_flush(struct net_device *dev);
4531*4882a593Smuzhiyun void dev_uc_init(struct net_device *dev);
4532*4882a593Smuzhiyun
4533*4882a593Smuzhiyun /**
4534*4882a593Smuzhiyun * __dev_uc_sync - Synchonize device's unicast list
4535*4882a593Smuzhiyun * @dev: device to sync
4536*4882a593Smuzhiyun * @sync: function to call if address should be added
4537*4882a593Smuzhiyun * @unsync: function to call if address should be removed
4538*4882a593Smuzhiyun *
4539*4882a593Smuzhiyun * Add newly added addresses to the interface, and release
4540*4882a593Smuzhiyun * addresses that have been deleted.
4541*4882a593Smuzhiyun */
__dev_uc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))4542*4882a593Smuzhiyun static inline int __dev_uc_sync(struct net_device *dev,
4543*4882a593Smuzhiyun int (*sync)(struct net_device *,
4544*4882a593Smuzhiyun const unsigned char *),
4545*4882a593Smuzhiyun int (*unsync)(struct net_device *,
4546*4882a593Smuzhiyun const unsigned char *))
4547*4882a593Smuzhiyun {
4548*4882a593Smuzhiyun return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
4549*4882a593Smuzhiyun }
4550*4882a593Smuzhiyun
4551*4882a593Smuzhiyun /**
4552*4882a593Smuzhiyun * __dev_uc_unsync - Remove synchronized addresses from device
4553*4882a593Smuzhiyun * @dev: device to sync
4554*4882a593Smuzhiyun * @unsync: function to call if address should be removed
4555*4882a593Smuzhiyun *
4556*4882a593Smuzhiyun * Remove all addresses that were added to the device by dev_uc_sync().
4557*4882a593Smuzhiyun */
__dev_uc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))4558*4882a593Smuzhiyun static inline void __dev_uc_unsync(struct net_device *dev,
4559*4882a593Smuzhiyun int (*unsync)(struct net_device *,
4560*4882a593Smuzhiyun const unsigned char *))
4561*4882a593Smuzhiyun {
4562*4882a593Smuzhiyun __hw_addr_unsync_dev(&dev->uc, dev, unsync);
4563*4882a593Smuzhiyun }
4564*4882a593Smuzhiyun
4565*4882a593Smuzhiyun /* Functions used for multicast addresses handling */
4566*4882a593Smuzhiyun int dev_mc_add(struct net_device *dev, const unsigned char *addr);
4567*4882a593Smuzhiyun int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
4568*4882a593Smuzhiyun int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
4569*4882a593Smuzhiyun int dev_mc_del(struct net_device *dev, const unsigned char *addr);
4570*4882a593Smuzhiyun int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
4571*4882a593Smuzhiyun int dev_mc_sync(struct net_device *to, struct net_device *from);
4572*4882a593Smuzhiyun int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
4573*4882a593Smuzhiyun void dev_mc_unsync(struct net_device *to, struct net_device *from);
4574*4882a593Smuzhiyun void dev_mc_flush(struct net_device *dev);
4575*4882a593Smuzhiyun void dev_mc_init(struct net_device *dev);
4576*4882a593Smuzhiyun
4577*4882a593Smuzhiyun /**
4578*4882a593Smuzhiyun * __dev_mc_sync - Synchonize device's multicast list
4579*4882a593Smuzhiyun * @dev: device to sync
4580*4882a593Smuzhiyun * @sync: function to call if address should be added
4581*4882a593Smuzhiyun * @unsync: function to call if address should be removed
4582*4882a593Smuzhiyun *
4583*4882a593Smuzhiyun * Add newly added addresses to the interface, and release
4584*4882a593Smuzhiyun * addresses that have been deleted.
4585*4882a593Smuzhiyun */
__dev_mc_sync(struct net_device * dev,int (* sync)(struct net_device *,const unsigned char *),int (* unsync)(struct net_device *,const unsigned char *))4586*4882a593Smuzhiyun static inline int __dev_mc_sync(struct net_device *dev,
4587*4882a593Smuzhiyun int (*sync)(struct net_device *,
4588*4882a593Smuzhiyun const unsigned char *),
4589*4882a593Smuzhiyun int (*unsync)(struct net_device *,
4590*4882a593Smuzhiyun const unsigned char *))
4591*4882a593Smuzhiyun {
4592*4882a593Smuzhiyun return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
4593*4882a593Smuzhiyun }
4594*4882a593Smuzhiyun
4595*4882a593Smuzhiyun /**
4596*4882a593Smuzhiyun * __dev_mc_unsync - Remove synchronized addresses from device
4597*4882a593Smuzhiyun * @dev: device to sync
4598*4882a593Smuzhiyun * @unsync: function to call if address should be removed
4599*4882a593Smuzhiyun *
4600*4882a593Smuzhiyun * Remove all addresses that were added to the device by dev_mc_sync().
4601*4882a593Smuzhiyun */
__dev_mc_unsync(struct net_device * dev,int (* unsync)(struct net_device *,const unsigned char *))4602*4882a593Smuzhiyun static inline void __dev_mc_unsync(struct net_device *dev,
4603*4882a593Smuzhiyun int (*unsync)(struct net_device *,
4604*4882a593Smuzhiyun const unsigned char *))
4605*4882a593Smuzhiyun {
4606*4882a593Smuzhiyun __hw_addr_unsync_dev(&dev->mc, dev, unsync);
4607*4882a593Smuzhiyun }
4608*4882a593Smuzhiyun
4609*4882a593Smuzhiyun /* Functions used for secondary unicast and multicast support */
4610*4882a593Smuzhiyun void dev_set_rx_mode(struct net_device *dev);
4611*4882a593Smuzhiyun void __dev_set_rx_mode(struct net_device *dev);
4612*4882a593Smuzhiyun int dev_set_promiscuity(struct net_device *dev, int inc);
4613*4882a593Smuzhiyun int dev_set_allmulti(struct net_device *dev, int inc);
4614*4882a593Smuzhiyun void netdev_state_change(struct net_device *dev);
4615*4882a593Smuzhiyun void netdev_notify_peers(struct net_device *dev);
4616*4882a593Smuzhiyun void netdev_features_change(struct net_device *dev);
4617*4882a593Smuzhiyun /* Load a device via the kmod */
4618*4882a593Smuzhiyun void dev_load(struct net *net, const char *name);
4619*4882a593Smuzhiyun struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4620*4882a593Smuzhiyun struct rtnl_link_stats64 *storage);
4621*4882a593Smuzhiyun void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
4622*4882a593Smuzhiyun const struct net_device_stats *netdev_stats);
4623*4882a593Smuzhiyun void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
4624*4882a593Smuzhiyun const struct pcpu_sw_netstats __percpu *netstats);
4625*4882a593Smuzhiyun
4626*4882a593Smuzhiyun extern int netdev_max_backlog;
4627*4882a593Smuzhiyun extern int netdev_tstamp_prequeue;
4628*4882a593Smuzhiyun extern int weight_p;
4629*4882a593Smuzhiyun extern int dev_weight_rx_bias;
4630*4882a593Smuzhiyun extern int dev_weight_tx_bias;
4631*4882a593Smuzhiyun extern int dev_rx_weight;
4632*4882a593Smuzhiyun extern int dev_tx_weight;
4633*4882a593Smuzhiyun extern int gro_normal_batch;
4634*4882a593Smuzhiyun
4635*4882a593Smuzhiyun enum {
4636*4882a593Smuzhiyun NESTED_SYNC_IMM_BIT,
4637*4882a593Smuzhiyun NESTED_SYNC_TODO_BIT,
4638*4882a593Smuzhiyun };
4639*4882a593Smuzhiyun
4640*4882a593Smuzhiyun #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit))
4641*4882a593Smuzhiyun #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT)
4642*4882a593Smuzhiyun
4643*4882a593Smuzhiyun #define NESTED_SYNC_IMM __NESTED_SYNC(IMM)
4644*4882a593Smuzhiyun #define NESTED_SYNC_TODO __NESTED_SYNC(TODO)
4645*4882a593Smuzhiyun
4646*4882a593Smuzhiyun struct netdev_nested_priv {
4647*4882a593Smuzhiyun unsigned char flags;
4648*4882a593Smuzhiyun void *data;
4649*4882a593Smuzhiyun };
4650*4882a593Smuzhiyun
4651*4882a593Smuzhiyun bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
4652*4882a593Smuzhiyun struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4653*4882a593Smuzhiyun struct list_head **iter);
4654*4882a593Smuzhiyun struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4655*4882a593Smuzhiyun struct list_head **iter);
4656*4882a593Smuzhiyun
4657*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
4658*4882a593Smuzhiyun static LIST_HEAD(net_unlink_list);
4659*4882a593Smuzhiyun
net_unlink_todo(struct net_device * dev)4660*4882a593Smuzhiyun static inline void net_unlink_todo(struct net_device *dev)
4661*4882a593Smuzhiyun {
4662*4882a593Smuzhiyun if (list_empty(&dev->unlink_list))
4663*4882a593Smuzhiyun list_add_tail(&dev->unlink_list, &net_unlink_list);
4664*4882a593Smuzhiyun }
4665*4882a593Smuzhiyun #endif
4666*4882a593Smuzhiyun
4667*4882a593Smuzhiyun /* iterate through upper list, must be called under RCU read lock */
4668*4882a593Smuzhiyun #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
4669*4882a593Smuzhiyun for (iter = &(dev)->adj_list.upper, \
4670*4882a593Smuzhiyun updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
4671*4882a593Smuzhiyun updev; \
4672*4882a593Smuzhiyun updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
4673*4882a593Smuzhiyun
4674*4882a593Smuzhiyun int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
4675*4882a593Smuzhiyun int (*fn)(struct net_device *upper_dev,
4676*4882a593Smuzhiyun struct netdev_nested_priv *priv),
4677*4882a593Smuzhiyun struct netdev_nested_priv *priv);
4678*4882a593Smuzhiyun
4679*4882a593Smuzhiyun bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
4680*4882a593Smuzhiyun struct net_device *upper_dev);
4681*4882a593Smuzhiyun
4682*4882a593Smuzhiyun bool netdev_has_any_upper_dev(struct net_device *dev);
4683*4882a593Smuzhiyun
4684*4882a593Smuzhiyun void *netdev_lower_get_next_private(struct net_device *dev,
4685*4882a593Smuzhiyun struct list_head **iter);
4686*4882a593Smuzhiyun void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4687*4882a593Smuzhiyun struct list_head **iter);
4688*4882a593Smuzhiyun
4689*4882a593Smuzhiyun #define netdev_for_each_lower_private(dev, priv, iter) \
4690*4882a593Smuzhiyun for (iter = (dev)->adj_list.lower.next, \
4691*4882a593Smuzhiyun priv = netdev_lower_get_next_private(dev, &(iter)); \
4692*4882a593Smuzhiyun priv; \
4693*4882a593Smuzhiyun priv = netdev_lower_get_next_private(dev, &(iter)))
4694*4882a593Smuzhiyun
4695*4882a593Smuzhiyun #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
4696*4882a593Smuzhiyun for (iter = &(dev)->adj_list.lower, \
4697*4882a593Smuzhiyun priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
4698*4882a593Smuzhiyun priv; \
4699*4882a593Smuzhiyun priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
4700*4882a593Smuzhiyun
4701*4882a593Smuzhiyun void *netdev_lower_get_next(struct net_device *dev,
4702*4882a593Smuzhiyun struct list_head **iter);
4703*4882a593Smuzhiyun
4704*4882a593Smuzhiyun #define netdev_for_each_lower_dev(dev, ldev, iter) \
4705*4882a593Smuzhiyun for (iter = (dev)->adj_list.lower.next, \
4706*4882a593Smuzhiyun ldev = netdev_lower_get_next(dev, &(iter)); \
4707*4882a593Smuzhiyun ldev; \
4708*4882a593Smuzhiyun ldev = netdev_lower_get_next(dev, &(iter)))
4709*4882a593Smuzhiyun
4710*4882a593Smuzhiyun struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
4711*4882a593Smuzhiyun struct list_head **iter);
4712*4882a593Smuzhiyun int netdev_walk_all_lower_dev(struct net_device *dev,
4713*4882a593Smuzhiyun int (*fn)(struct net_device *lower_dev,
4714*4882a593Smuzhiyun struct netdev_nested_priv *priv),
4715*4882a593Smuzhiyun struct netdev_nested_priv *priv);
4716*4882a593Smuzhiyun int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
4717*4882a593Smuzhiyun int (*fn)(struct net_device *lower_dev,
4718*4882a593Smuzhiyun struct netdev_nested_priv *priv),
4719*4882a593Smuzhiyun struct netdev_nested_priv *priv);
4720*4882a593Smuzhiyun
4721*4882a593Smuzhiyun void *netdev_adjacent_get_private(struct list_head *adj_list);
4722*4882a593Smuzhiyun void *netdev_lower_get_first_private_rcu(struct net_device *dev);
4723*4882a593Smuzhiyun struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
4724*4882a593Smuzhiyun struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
4725*4882a593Smuzhiyun int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev,
4726*4882a593Smuzhiyun struct netlink_ext_ack *extack);
4727*4882a593Smuzhiyun int netdev_master_upper_dev_link(struct net_device *dev,
4728*4882a593Smuzhiyun struct net_device *upper_dev,
4729*4882a593Smuzhiyun void *upper_priv, void *upper_info,
4730*4882a593Smuzhiyun struct netlink_ext_ack *extack);
4731*4882a593Smuzhiyun void netdev_upper_dev_unlink(struct net_device *dev,
4732*4882a593Smuzhiyun struct net_device *upper_dev);
4733*4882a593Smuzhiyun int netdev_adjacent_change_prepare(struct net_device *old_dev,
4734*4882a593Smuzhiyun struct net_device *new_dev,
4735*4882a593Smuzhiyun struct net_device *dev,
4736*4882a593Smuzhiyun struct netlink_ext_ack *extack);
4737*4882a593Smuzhiyun void netdev_adjacent_change_commit(struct net_device *old_dev,
4738*4882a593Smuzhiyun struct net_device *new_dev,
4739*4882a593Smuzhiyun struct net_device *dev);
4740*4882a593Smuzhiyun void netdev_adjacent_change_abort(struct net_device *old_dev,
4741*4882a593Smuzhiyun struct net_device *new_dev,
4742*4882a593Smuzhiyun struct net_device *dev);
4743*4882a593Smuzhiyun void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
4744*4882a593Smuzhiyun void *netdev_lower_dev_get_private(struct net_device *dev,
4745*4882a593Smuzhiyun struct net_device *lower_dev);
4746*4882a593Smuzhiyun void netdev_lower_state_changed(struct net_device *lower_dev,
4747*4882a593Smuzhiyun void *lower_state_info);
4748*4882a593Smuzhiyun
4749*4882a593Smuzhiyun /* RSS keys are 40 or 52 bytes long */
4750*4882a593Smuzhiyun #define NETDEV_RSS_KEY_LEN 52
4751*4882a593Smuzhiyun extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
4752*4882a593Smuzhiyun void netdev_rss_key_fill(void *buffer, size_t len);
4753*4882a593Smuzhiyun
4754*4882a593Smuzhiyun int skb_checksum_help(struct sk_buff *skb);
4755*4882a593Smuzhiyun int skb_crc32c_csum_help(struct sk_buff *skb);
4756*4882a593Smuzhiyun int skb_csum_hwoffload_help(struct sk_buff *skb,
4757*4882a593Smuzhiyun const netdev_features_t features);
4758*4882a593Smuzhiyun
4759*4882a593Smuzhiyun struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
4760*4882a593Smuzhiyun netdev_features_t features, bool tx_path);
4761*4882a593Smuzhiyun struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
4762*4882a593Smuzhiyun netdev_features_t features);
4763*4882a593Smuzhiyun
4764*4882a593Smuzhiyun struct netdev_bonding_info {
4765*4882a593Smuzhiyun ifslave slave;
4766*4882a593Smuzhiyun ifbond master;
4767*4882a593Smuzhiyun };
4768*4882a593Smuzhiyun
4769*4882a593Smuzhiyun struct netdev_notifier_bonding_info {
4770*4882a593Smuzhiyun struct netdev_notifier_info info; /* must be first */
4771*4882a593Smuzhiyun struct netdev_bonding_info bonding_info;
4772*4882a593Smuzhiyun };
4773*4882a593Smuzhiyun
4774*4882a593Smuzhiyun void netdev_bonding_info_change(struct net_device *dev,
4775*4882a593Smuzhiyun struct netdev_bonding_info *bonding_info);
4776*4882a593Smuzhiyun
4777*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
4778*4882a593Smuzhiyun void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
4779*4882a593Smuzhiyun #else
ethtool_notify(struct net_device * dev,unsigned int cmd,const void * data)4780*4882a593Smuzhiyun static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
4781*4882a593Smuzhiyun const void *data)
4782*4882a593Smuzhiyun {
4783*4882a593Smuzhiyun }
4784*4882a593Smuzhiyun #endif
4785*4882a593Smuzhiyun
4786*4882a593Smuzhiyun static inline
skb_gso_segment(struct sk_buff * skb,netdev_features_t features)4787*4882a593Smuzhiyun struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
4788*4882a593Smuzhiyun {
4789*4882a593Smuzhiyun return __skb_gso_segment(skb, features, true);
4790*4882a593Smuzhiyun }
4791*4882a593Smuzhiyun __be16 skb_network_protocol(struct sk_buff *skb, int *depth);
4792*4882a593Smuzhiyun
can_checksum_protocol(netdev_features_t features,__be16 protocol)4793*4882a593Smuzhiyun static inline bool can_checksum_protocol(netdev_features_t features,
4794*4882a593Smuzhiyun __be16 protocol)
4795*4882a593Smuzhiyun {
4796*4882a593Smuzhiyun if (protocol == htons(ETH_P_FCOE))
4797*4882a593Smuzhiyun return !!(features & NETIF_F_FCOE_CRC);
4798*4882a593Smuzhiyun
4799*4882a593Smuzhiyun /* Assume this is an IP checksum (not SCTP CRC) */
4800*4882a593Smuzhiyun
4801*4882a593Smuzhiyun if (features & NETIF_F_HW_CSUM) {
4802*4882a593Smuzhiyun /* Can checksum everything */
4803*4882a593Smuzhiyun return true;
4804*4882a593Smuzhiyun }
4805*4882a593Smuzhiyun
4806*4882a593Smuzhiyun switch (protocol) {
4807*4882a593Smuzhiyun case htons(ETH_P_IP):
4808*4882a593Smuzhiyun return !!(features & NETIF_F_IP_CSUM);
4809*4882a593Smuzhiyun case htons(ETH_P_IPV6):
4810*4882a593Smuzhiyun return !!(features & NETIF_F_IPV6_CSUM);
4811*4882a593Smuzhiyun default:
4812*4882a593Smuzhiyun return false;
4813*4882a593Smuzhiyun }
4814*4882a593Smuzhiyun }
4815*4882a593Smuzhiyun
4816*4882a593Smuzhiyun #ifdef CONFIG_BUG
4817*4882a593Smuzhiyun void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
4818*4882a593Smuzhiyun #else
netdev_rx_csum_fault(struct net_device * dev,struct sk_buff * skb)4819*4882a593Smuzhiyun static inline void netdev_rx_csum_fault(struct net_device *dev,
4820*4882a593Smuzhiyun struct sk_buff *skb)
4821*4882a593Smuzhiyun {
4822*4882a593Smuzhiyun }
4823*4882a593Smuzhiyun #endif
4824*4882a593Smuzhiyun /* rx skb timestamps */
4825*4882a593Smuzhiyun void net_enable_timestamp(void);
4826*4882a593Smuzhiyun void net_disable_timestamp(void);
4827*4882a593Smuzhiyun
4828*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
4829*4882a593Smuzhiyun int __init dev_proc_init(void);
4830*4882a593Smuzhiyun #else
4831*4882a593Smuzhiyun #define dev_proc_init() 0
4832*4882a593Smuzhiyun #endif
4833*4882a593Smuzhiyun
__netdev_start_xmit(const struct net_device_ops * ops,struct sk_buff * skb,struct net_device * dev,bool more)4834*4882a593Smuzhiyun static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
4835*4882a593Smuzhiyun struct sk_buff *skb, struct net_device *dev,
4836*4882a593Smuzhiyun bool more)
4837*4882a593Smuzhiyun {
4838*4882a593Smuzhiyun __this_cpu_write(softnet_data.xmit.more, more);
4839*4882a593Smuzhiyun return ops->ndo_start_xmit(skb, dev);
4840*4882a593Smuzhiyun }
4841*4882a593Smuzhiyun
netdev_xmit_more(void)4842*4882a593Smuzhiyun static inline bool netdev_xmit_more(void)
4843*4882a593Smuzhiyun {
4844*4882a593Smuzhiyun return __this_cpu_read(softnet_data.xmit.more);
4845*4882a593Smuzhiyun }
4846*4882a593Smuzhiyun
netdev_start_xmit(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq,bool more)4847*4882a593Smuzhiyun static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
4848*4882a593Smuzhiyun struct netdev_queue *txq, bool more)
4849*4882a593Smuzhiyun {
4850*4882a593Smuzhiyun const struct net_device_ops *ops = dev->netdev_ops;
4851*4882a593Smuzhiyun netdev_tx_t rc;
4852*4882a593Smuzhiyun
4853*4882a593Smuzhiyun rc = __netdev_start_xmit(ops, skb, dev, more);
4854*4882a593Smuzhiyun if (rc == NETDEV_TX_OK)
4855*4882a593Smuzhiyun txq_trans_update(txq);
4856*4882a593Smuzhiyun
4857*4882a593Smuzhiyun return rc;
4858*4882a593Smuzhiyun }
4859*4882a593Smuzhiyun
4860*4882a593Smuzhiyun int netdev_class_create_file_ns(const struct class_attribute *class_attr,
4861*4882a593Smuzhiyun const void *ns);
4862*4882a593Smuzhiyun void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
4863*4882a593Smuzhiyun const void *ns);
4864*4882a593Smuzhiyun
4865*4882a593Smuzhiyun extern const struct kobj_ns_type_operations net_ns_type_operations;
4866*4882a593Smuzhiyun
4867*4882a593Smuzhiyun const char *netdev_drivername(const struct net_device *dev);
4868*4882a593Smuzhiyun
4869*4882a593Smuzhiyun void linkwatch_run_queue(void);
4870*4882a593Smuzhiyun
netdev_intersect_features(netdev_features_t f1,netdev_features_t f2)4871*4882a593Smuzhiyun static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
4872*4882a593Smuzhiyun netdev_features_t f2)
4873*4882a593Smuzhiyun {
4874*4882a593Smuzhiyun if ((f1 ^ f2) & NETIF_F_HW_CSUM) {
4875*4882a593Smuzhiyun if (f1 & NETIF_F_HW_CSUM)
4876*4882a593Smuzhiyun f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4877*4882a593Smuzhiyun else
4878*4882a593Smuzhiyun f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4879*4882a593Smuzhiyun }
4880*4882a593Smuzhiyun
4881*4882a593Smuzhiyun return f1 & f2;
4882*4882a593Smuzhiyun }
4883*4882a593Smuzhiyun
netdev_get_wanted_features(struct net_device * dev)4884*4882a593Smuzhiyun static inline netdev_features_t netdev_get_wanted_features(
4885*4882a593Smuzhiyun struct net_device *dev)
4886*4882a593Smuzhiyun {
4887*4882a593Smuzhiyun return (dev->features & ~dev->hw_features) | dev->wanted_features;
4888*4882a593Smuzhiyun }
4889*4882a593Smuzhiyun netdev_features_t netdev_increment_features(netdev_features_t all,
4890*4882a593Smuzhiyun netdev_features_t one, netdev_features_t mask);
4891*4882a593Smuzhiyun
4892*4882a593Smuzhiyun /* Allow TSO being used on stacked device :
4893*4882a593Smuzhiyun * Performing the GSO segmentation before last device
4894*4882a593Smuzhiyun * is a performance improvement.
4895*4882a593Smuzhiyun */
netdev_add_tso_features(netdev_features_t features,netdev_features_t mask)4896*4882a593Smuzhiyun static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
4897*4882a593Smuzhiyun netdev_features_t mask)
4898*4882a593Smuzhiyun {
4899*4882a593Smuzhiyun return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
4900*4882a593Smuzhiyun }
4901*4882a593Smuzhiyun
4902*4882a593Smuzhiyun int __netdev_update_features(struct net_device *dev);
4903*4882a593Smuzhiyun void netdev_update_features(struct net_device *dev);
4904*4882a593Smuzhiyun void netdev_change_features(struct net_device *dev);
4905*4882a593Smuzhiyun
4906*4882a593Smuzhiyun void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4907*4882a593Smuzhiyun struct net_device *dev);
4908*4882a593Smuzhiyun
4909*4882a593Smuzhiyun netdev_features_t passthru_features_check(struct sk_buff *skb,
4910*4882a593Smuzhiyun struct net_device *dev,
4911*4882a593Smuzhiyun netdev_features_t features);
4912*4882a593Smuzhiyun netdev_features_t netif_skb_features(struct sk_buff *skb);
4913*4882a593Smuzhiyun
net_gso_ok(netdev_features_t features,int gso_type)4914*4882a593Smuzhiyun static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4915*4882a593Smuzhiyun {
4916*4882a593Smuzhiyun netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
4917*4882a593Smuzhiyun
4918*4882a593Smuzhiyun /* check flags correspondence */
4919*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
4920*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
4921*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
4922*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT));
4923*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
4924*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
4925*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
4926*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
4927*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT));
4928*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT));
4929*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
4930*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
4931*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
4932*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
4933*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
4934*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
4935*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
4936*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
4937*4882a593Smuzhiyun BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
4938*4882a593Smuzhiyun
4939*4882a593Smuzhiyun return (features & feature) == feature;
4940*4882a593Smuzhiyun }
4941*4882a593Smuzhiyun
skb_gso_ok(struct sk_buff * skb,netdev_features_t features)4942*4882a593Smuzhiyun static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
4943*4882a593Smuzhiyun {
4944*4882a593Smuzhiyun return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
4945*4882a593Smuzhiyun (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
4946*4882a593Smuzhiyun }
4947*4882a593Smuzhiyun
netif_needs_gso(struct sk_buff * skb,netdev_features_t features)4948*4882a593Smuzhiyun static inline bool netif_needs_gso(struct sk_buff *skb,
4949*4882a593Smuzhiyun netdev_features_t features)
4950*4882a593Smuzhiyun {
4951*4882a593Smuzhiyun return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
4952*4882a593Smuzhiyun unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
4953*4882a593Smuzhiyun (skb->ip_summed != CHECKSUM_UNNECESSARY)));
4954*4882a593Smuzhiyun }
4955*4882a593Smuzhiyun
netif_set_gso_max_size(struct net_device * dev,unsigned int size)4956*4882a593Smuzhiyun static inline void netif_set_gso_max_size(struct net_device *dev,
4957*4882a593Smuzhiyun unsigned int size)
4958*4882a593Smuzhiyun {
4959*4882a593Smuzhiyun dev->gso_max_size = size;
4960*4882a593Smuzhiyun }
4961*4882a593Smuzhiyun
skb_gso_error_unwind(struct sk_buff * skb,__be16 protocol,int pulled_hlen,u16 mac_offset,int mac_len)4962*4882a593Smuzhiyun static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
4963*4882a593Smuzhiyun int pulled_hlen, u16 mac_offset,
4964*4882a593Smuzhiyun int mac_len)
4965*4882a593Smuzhiyun {
4966*4882a593Smuzhiyun skb->protocol = protocol;
4967*4882a593Smuzhiyun skb->encapsulation = 1;
4968*4882a593Smuzhiyun skb_push(skb, pulled_hlen);
4969*4882a593Smuzhiyun skb_reset_transport_header(skb);
4970*4882a593Smuzhiyun skb->mac_header = mac_offset;
4971*4882a593Smuzhiyun skb->network_header = skb->mac_header + mac_len;
4972*4882a593Smuzhiyun skb->mac_len = mac_len;
4973*4882a593Smuzhiyun }
4974*4882a593Smuzhiyun
netif_is_macsec(const struct net_device * dev)4975*4882a593Smuzhiyun static inline bool netif_is_macsec(const struct net_device *dev)
4976*4882a593Smuzhiyun {
4977*4882a593Smuzhiyun return dev->priv_flags & IFF_MACSEC;
4978*4882a593Smuzhiyun }
4979*4882a593Smuzhiyun
netif_is_macvlan(const struct net_device * dev)4980*4882a593Smuzhiyun static inline bool netif_is_macvlan(const struct net_device *dev)
4981*4882a593Smuzhiyun {
4982*4882a593Smuzhiyun return dev->priv_flags & IFF_MACVLAN;
4983*4882a593Smuzhiyun }
4984*4882a593Smuzhiyun
netif_is_macvlan_port(const struct net_device * dev)4985*4882a593Smuzhiyun static inline bool netif_is_macvlan_port(const struct net_device *dev)
4986*4882a593Smuzhiyun {
4987*4882a593Smuzhiyun return dev->priv_flags & IFF_MACVLAN_PORT;
4988*4882a593Smuzhiyun }
4989*4882a593Smuzhiyun
netif_is_bond_master(const struct net_device * dev)4990*4882a593Smuzhiyun static inline bool netif_is_bond_master(const struct net_device *dev)
4991*4882a593Smuzhiyun {
4992*4882a593Smuzhiyun return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
4993*4882a593Smuzhiyun }
4994*4882a593Smuzhiyun
netif_is_bond_slave(const struct net_device * dev)4995*4882a593Smuzhiyun static inline bool netif_is_bond_slave(const struct net_device *dev)
4996*4882a593Smuzhiyun {
4997*4882a593Smuzhiyun return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
4998*4882a593Smuzhiyun }
4999*4882a593Smuzhiyun
netif_supports_nofcs(struct net_device * dev)5000*4882a593Smuzhiyun static inline bool netif_supports_nofcs(struct net_device *dev)
5001*4882a593Smuzhiyun {
5002*4882a593Smuzhiyun return dev->priv_flags & IFF_SUPP_NOFCS;
5003*4882a593Smuzhiyun }
5004*4882a593Smuzhiyun
netif_has_l3_rx_handler(const struct net_device * dev)5005*4882a593Smuzhiyun static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
5006*4882a593Smuzhiyun {
5007*4882a593Smuzhiyun return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
5008*4882a593Smuzhiyun }
5009*4882a593Smuzhiyun
netif_is_l3_master(const struct net_device * dev)5010*4882a593Smuzhiyun static inline bool netif_is_l3_master(const struct net_device *dev)
5011*4882a593Smuzhiyun {
5012*4882a593Smuzhiyun return dev->priv_flags & IFF_L3MDEV_MASTER;
5013*4882a593Smuzhiyun }
5014*4882a593Smuzhiyun
netif_is_l3_slave(const struct net_device * dev)5015*4882a593Smuzhiyun static inline bool netif_is_l3_slave(const struct net_device *dev)
5016*4882a593Smuzhiyun {
5017*4882a593Smuzhiyun return dev->priv_flags & IFF_L3MDEV_SLAVE;
5018*4882a593Smuzhiyun }
5019*4882a593Smuzhiyun
netif_is_bridge_master(const struct net_device * dev)5020*4882a593Smuzhiyun static inline bool netif_is_bridge_master(const struct net_device *dev)
5021*4882a593Smuzhiyun {
5022*4882a593Smuzhiyun return dev->priv_flags & IFF_EBRIDGE;
5023*4882a593Smuzhiyun }
5024*4882a593Smuzhiyun
netif_is_bridge_port(const struct net_device * dev)5025*4882a593Smuzhiyun static inline bool netif_is_bridge_port(const struct net_device *dev)
5026*4882a593Smuzhiyun {
5027*4882a593Smuzhiyun return dev->priv_flags & IFF_BRIDGE_PORT;
5028*4882a593Smuzhiyun }
5029*4882a593Smuzhiyun
netif_is_ovs_master(const struct net_device * dev)5030*4882a593Smuzhiyun static inline bool netif_is_ovs_master(const struct net_device *dev)
5031*4882a593Smuzhiyun {
5032*4882a593Smuzhiyun return dev->priv_flags & IFF_OPENVSWITCH;
5033*4882a593Smuzhiyun }
5034*4882a593Smuzhiyun
netif_is_ovs_port(const struct net_device * dev)5035*4882a593Smuzhiyun static inline bool netif_is_ovs_port(const struct net_device *dev)
5036*4882a593Smuzhiyun {
5037*4882a593Smuzhiyun return dev->priv_flags & IFF_OVS_DATAPATH;
5038*4882a593Smuzhiyun }
5039*4882a593Smuzhiyun
netif_is_any_bridge_port(const struct net_device * dev)5040*4882a593Smuzhiyun static inline bool netif_is_any_bridge_port(const struct net_device *dev)
5041*4882a593Smuzhiyun {
5042*4882a593Smuzhiyun return netif_is_bridge_port(dev) || netif_is_ovs_port(dev);
5043*4882a593Smuzhiyun }
5044*4882a593Smuzhiyun
netif_is_team_master(const struct net_device * dev)5045*4882a593Smuzhiyun static inline bool netif_is_team_master(const struct net_device *dev)
5046*4882a593Smuzhiyun {
5047*4882a593Smuzhiyun return dev->priv_flags & IFF_TEAM;
5048*4882a593Smuzhiyun }
5049*4882a593Smuzhiyun
netif_is_team_port(const struct net_device * dev)5050*4882a593Smuzhiyun static inline bool netif_is_team_port(const struct net_device *dev)
5051*4882a593Smuzhiyun {
5052*4882a593Smuzhiyun return dev->priv_flags & IFF_TEAM_PORT;
5053*4882a593Smuzhiyun }
5054*4882a593Smuzhiyun
netif_is_lag_master(const struct net_device * dev)5055*4882a593Smuzhiyun static inline bool netif_is_lag_master(const struct net_device *dev)
5056*4882a593Smuzhiyun {
5057*4882a593Smuzhiyun return netif_is_bond_master(dev) || netif_is_team_master(dev);
5058*4882a593Smuzhiyun }
5059*4882a593Smuzhiyun
netif_is_lag_port(const struct net_device * dev)5060*4882a593Smuzhiyun static inline bool netif_is_lag_port(const struct net_device *dev)
5061*4882a593Smuzhiyun {
5062*4882a593Smuzhiyun return netif_is_bond_slave(dev) || netif_is_team_port(dev);
5063*4882a593Smuzhiyun }
5064*4882a593Smuzhiyun
netif_is_rxfh_configured(const struct net_device * dev)5065*4882a593Smuzhiyun static inline bool netif_is_rxfh_configured(const struct net_device *dev)
5066*4882a593Smuzhiyun {
5067*4882a593Smuzhiyun return dev->priv_flags & IFF_RXFH_CONFIGURED;
5068*4882a593Smuzhiyun }
5069*4882a593Smuzhiyun
netif_is_failover(const struct net_device * dev)5070*4882a593Smuzhiyun static inline bool netif_is_failover(const struct net_device *dev)
5071*4882a593Smuzhiyun {
5072*4882a593Smuzhiyun return dev->priv_flags & IFF_FAILOVER;
5073*4882a593Smuzhiyun }
5074*4882a593Smuzhiyun
netif_is_failover_slave(const struct net_device * dev)5075*4882a593Smuzhiyun static inline bool netif_is_failover_slave(const struct net_device *dev)
5076*4882a593Smuzhiyun {
5077*4882a593Smuzhiyun return dev->priv_flags & IFF_FAILOVER_SLAVE;
5078*4882a593Smuzhiyun }
5079*4882a593Smuzhiyun
5080*4882a593Smuzhiyun /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
netif_keep_dst(struct net_device * dev)5081*4882a593Smuzhiyun static inline void netif_keep_dst(struct net_device *dev)
5082*4882a593Smuzhiyun {
5083*4882a593Smuzhiyun dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
5084*4882a593Smuzhiyun }
5085*4882a593Smuzhiyun
5086*4882a593Smuzhiyun /* return true if dev can't cope with mtu frames that need vlan tag insertion */
netif_reduces_vlan_mtu(struct net_device * dev)5087*4882a593Smuzhiyun static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
5088*4882a593Smuzhiyun {
5089*4882a593Smuzhiyun /* TODO: reserve and use an additional IFF bit, if we get more users */
5090*4882a593Smuzhiyun return dev->priv_flags & IFF_MACSEC;
5091*4882a593Smuzhiyun }
5092*4882a593Smuzhiyun
5093*4882a593Smuzhiyun extern struct pernet_operations __net_initdata loopback_net_ops;
5094*4882a593Smuzhiyun
5095*4882a593Smuzhiyun /* Logging, debugging and troubleshooting/diagnostic helpers. */
5096*4882a593Smuzhiyun
5097*4882a593Smuzhiyun /* netdev_printk helpers, similar to dev_printk */
5098*4882a593Smuzhiyun
netdev_name(const struct net_device * dev)5099*4882a593Smuzhiyun static inline const char *netdev_name(const struct net_device *dev)
5100*4882a593Smuzhiyun {
5101*4882a593Smuzhiyun if (!dev->name[0] || strchr(dev->name, '%'))
5102*4882a593Smuzhiyun return "(unnamed net_device)";
5103*4882a593Smuzhiyun return dev->name;
5104*4882a593Smuzhiyun }
5105*4882a593Smuzhiyun
netdev_unregistering(const struct net_device * dev)5106*4882a593Smuzhiyun static inline bool netdev_unregistering(const struct net_device *dev)
5107*4882a593Smuzhiyun {
5108*4882a593Smuzhiyun return dev->reg_state == NETREG_UNREGISTERING;
5109*4882a593Smuzhiyun }
5110*4882a593Smuzhiyun
netdev_reg_state(const struct net_device * dev)5111*4882a593Smuzhiyun static inline const char *netdev_reg_state(const struct net_device *dev)
5112*4882a593Smuzhiyun {
5113*4882a593Smuzhiyun switch (dev->reg_state) {
5114*4882a593Smuzhiyun case NETREG_UNINITIALIZED: return " (uninitialized)";
5115*4882a593Smuzhiyun case NETREG_REGISTERED: return "";
5116*4882a593Smuzhiyun case NETREG_UNREGISTERING: return " (unregistering)";
5117*4882a593Smuzhiyun case NETREG_UNREGISTERED: return " (unregistered)";
5118*4882a593Smuzhiyun case NETREG_RELEASED: return " (released)";
5119*4882a593Smuzhiyun case NETREG_DUMMY: return " (dummy)";
5120*4882a593Smuzhiyun }
5121*4882a593Smuzhiyun
5122*4882a593Smuzhiyun WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
5123*4882a593Smuzhiyun return " (unknown)";
5124*4882a593Smuzhiyun }
5125*4882a593Smuzhiyun
5126*4882a593Smuzhiyun __printf(3, 4) __cold
5127*4882a593Smuzhiyun void netdev_printk(const char *level, const struct net_device *dev,
5128*4882a593Smuzhiyun const char *format, ...);
5129*4882a593Smuzhiyun __printf(2, 3) __cold
5130*4882a593Smuzhiyun void netdev_emerg(const struct net_device *dev, const char *format, ...);
5131*4882a593Smuzhiyun __printf(2, 3) __cold
5132*4882a593Smuzhiyun void netdev_alert(const struct net_device *dev, const char *format, ...);
5133*4882a593Smuzhiyun __printf(2, 3) __cold
5134*4882a593Smuzhiyun void netdev_crit(const struct net_device *dev, const char *format, ...);
5135*4882a593Smuzhiyun __printf(2, 3) __cold
5136*4882a593Smuzhiyun void netdev_err(const struct net_device *dev, const char *format, ...);
5137*4882a593Smuzhiyun __printf(2, 3) __cold
5138*4882a593Smuzhiyun void netdev_warn(const struct net_device *dev, const char *format, ...);
5139*4882a593Smuzhiyun __printf(2, 3) __cold
5140*4882a593Smuzhiyun void netdev_notice(const struct net_device *dev, const char *format, ...);
5141*4882a593Smuzhiyun __printf(2, 3) __cold
5142*4882a593Smuzhiyun void netdev_info(const struct net_device *dev, const char *format, ...);
5143*4882a593Smuzhiyun
5144*4882a593Smuzhiyun #define netdev_level_once(level, dev, fmt, ...) \
5145*4882a593Smuzhiyun do { \
5146*4882a593Smuzhiyun static bool __print_once __read_mostly; \
5147*4882a593Smuzhiyun \
5148*4882a593Smuzhiyun if (!__print_once) { \
5149*4882a593Smuzhiyun __print_once = true; \
5150*4882a593Smuzhiyun netdev_printk(level, dev, fmt, ##__VA_ARGS__); \
5151*4882a593Smuzhiyun } \
5152*4882a593Smuzhiyun } while (0)
5153*4882a593Smuzhiyun
5154*4882a593Smuzhiyun #define netdev_emerg_once(dev, fmt, ...) \
5155*4882a593Smuzhiyun netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
5156*4882a593Smuzhiyun #define netdev_alert_once(dev, fmt, ...) \
5157*4882a593Smuzhiyun netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
5158*4882a593Smuzhiyun #define netdev_crit_once(dev, fmt, ...) \
5159*4882a593Smuzhiyun netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
5160*4882a593Smuzhiyun #define netdev_err_once(dev, fmt, ...) \
5161*4882a593Smuzhiyun netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__)
5162*4882a593Smuzhiyun #define netdev_warn_once(dev, fmt, ...) \
5163*4882a593Smuzhiyun netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
5164*4882a593Smuzhiyun #define netdev_notice_once(dev, fmt, ...) \
5165*4882a593Smuzhiyun netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
5166*4882a593Smuzhiyun #define netdev_info_once(dev, fmt, ...) \
5167*4882a593Smuzhiyun netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__)
5168*4882a593Smuzhiyun
5169*4882a593Smuzhiyun #define MODULE_ALIAS_NETDEV(device) \
5170*4882a593Smuzhiyun MODULE_ALIAS("netdev-" device)
5171*4882a593Smuzhiyun
5172*4882a593Smuzhiyun #if defined(CONFIG_DYNAMIC_DEBUG) || \
5173*4882a593Smuzhiyun (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
5174*4882a593Smuzhiyun #define netdev_dbg(__dev, format, args...) \
5175*4882a593Smuzhiyun do { \
5176*4882a593Smuzhiyun dynamic_netdev_dbg(__dev, format, ##args); \
5177*4882a593Smuzhiyun } while (0)
5178*4882a593Smuzhiyun #elif defined(DEBUG)
5179*4882a593Smuzhiyun #define netdev_dbg(__dev, format, args...) \
5180*4882a593Smuzhiyun netdev_printk(KERN_DEBUG, __dev, format, ##args)
5181*4882a593Smuzhiyun #else
5182*4882a593Smuzhiyun #define netdev_dbg(__dev, format, args...) \
5183*4882a593Smuzhiyun ({ \
5184*4882a593Smuzhiyun if (0) \
5185*4882a593Smuzhiyun netdev_printk(KERN_DEBUG, __dev, format, ##args); \
5186*4882a593Smuzhiyun })
5187*4882a593Smuzhiyun #endif
5188*4882a593Smuzhiyun
5189*4882a593Smuzhiyun #if defined(VERBOSE_DEBUG)
5190*4882a593Smuzhiyun #define netdev_vdbg netdev_dbg
5191*4882a593Smuzhiyun #else
5192*4882a593Smuzhiyun
5193*4882a593Smuzhiyun #define netdev_vdbg(dev, format, args...) \
5194*4882a593Smuzhiyun ({ \
5195*4882a593Smuzhiyun if (0) \
5196*4882a593Smuzhiyun netdev_printk(KERN_DEBUG, dev, format, ##args); \
5197*4882a593Smuzhiyun 0; \
5198*4882a593Smuzhiyun })
5199*4882a593Smuzhiyun #endif
5200*4882a593Smuzhiyun
5201*4882a593Smuzhiyun /*
5202*4882a593Smuzhiyun * netdev_WARN() acts like dev_printk(), but with the key difference
5203*4882a593Smuzhiyun * of using a WARN/WARN_ON to get the message out, including the
5204*4882a593Smuzhiyun * file/line information and a backtrace.
5205*4882a593Smuzhiyun */
5206*4882a593Smuzhiyun #define netdev_WARN(dev, format, args...) \
5207*4882a593Smuzhiyun WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \
5208*4882a593Smuzhiyun netdev_reg_state(dev), ##args)
5209*4882a593Smuzhiyun
5210*4882a593Smuzhiyun #define netdev_WARN_ONCE(dev, format, args...) \
5211*4882a593Smuzhiyun WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \
5212*4882a593Smuzhiyun netdev_reg_state(dev), ##args)
5213*4882a593Smuzhiyun
5214*4882a593Smuzhiyun /* netif printk helpers, similar to netdev_printk */
5215*4882a593Smuzhiyun
5216*4882a593Smuzhiyun #define netif_printk(priv, type, level, dev, fmt, args...) \
5217*4882a593Smuzhiyun do { \
5218*4882a593Smuzhiyun if (netif_msg_##type(priv)) \
5219*4882a593Smuzhiyun netdev_printk(level, (dev), fmt, ##args); \
5220*4882a593Smuzhiyun } while (0)
5221*4882a593Smuzhiyun
5222*4882a593Smuzhiyun #define netif_level(level, priv, type, dev, fmt, args...) \
5223*4882a593Smuzhiyun do { \
5224*4882a593Smuzhiyun if (netif_msg_##type(priv)) \
5225*4882a593Smuzhiyun netdev_##level(dev, fmt, ##args); \
5226*4882a593Smuzhiyun } while (0)
5227*4882a593Smuzhiyun
5228*4882a593Smuzhiyun #define netif_emerg(priv, type, dev, fmt, args...) \
5229*4882a593Smuzhiyun netif_level(emerg, priv, type, dev, fmt, ##args)
5230*4882a593Smuzhiyun #define netif_alert(priv, type, dev, fmt, args...) \
5231*4882a593Smuzhiyun netif_level(alert, priv, type, dev, fmt, ##args)
5232*4882a593Smuzhiyun #define netif_crit(priv, type, dev, fmt, args...) \
5233*4882a593Smuzhiyun netif_level(crit, priv, type, dev, fmt, ##args)
5234*4882a593Smuzhiyun #define netif_err(priv, type, dev, fmt, args...) \
5235*4882a593Smuzhiyun netif_level(err, priv, type, dev, fmt, ##args)
5236*4882a593Smuzhiyun #define netif_warn(priv, type, dev, fmt, args...) \
5237*4882a593Smuzhiyun netif_level(warn, priv, type, dev, fmt, ##args)
5238*4882a593Smuzhiyun #define netif_notice(priv, type, dev, fmt, args...) \
5239*4882a593Smuzhiyun netif_level(notice, priv, type, dev, fmt, ##args)
5240*4882a593Smuzhiyun #define netif_info(priv, type, dev, fmt, args...) \
5241*4882a593Smuzhiyun netif_level(info, priv, type, dev, fmt, ##args)
5242*4882a593Smuzhiyun
5243*4882a593Smuzhiyun #if defined(CONFIG_DYNAMIC_DEBUG) || \
5244*4882a593Smuzhiyun (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
5245*4882a593Smuzhiyun #define netif_dbg(priv, type, netdev, format, args...) \
5246*4882a593Smuzhiyun do { \
5247*4882a593Smuzhiyun if (netif_msg_##type(priv)) \
5248*4882a593Smuzhiyun dynamic_netdev_dbg(netdev, format, ##args); \
5249*4882a593Smuzhiyun } while (0)
5250*4882a593Smuzhiyun #elif defined(DEBUG)
5251*4882a593Smuzhiyun #define netif_dbg(priv, type, dev, format, args...) \
5252*4882a593Smuzhiyun netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
5253*4882a593Smuzhiyun #else
5254*4882a593Smuzhiyun #define netif_dbg(priv, type, dev, format, args...) \
5255*4882a593Smuzhiyun ({ \
5256*4882a593Smuzhiyun if (0) \
5257*4882a593Smuzhiyun netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
5258*4882a593Smuzhiyun 0; \
5259*4882a593Smuzhiyun })
5260*4882a593Smuzhiyun #endif
5261*4882a593Smuzhiyun
5262*4882a593Smuzhiyun /* if @cond then downgrade to debug, else print at @level */
5263*4882a593Smuzhiyun #define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
5264*4882a593Smuzhiyun do { \
5265*4882a593Smuzhiyun if (cond) \
5266*4882a593Smuzhiyun netif_dbg(priv, type, netdev, fmt, ##args); \
5267*4882a593Smuzhiyun else \
5268*4882a593Smuzhiyun netif_ ## level(priv, type, netdev, fmt, ##args); \
5269*4882a593Smuzhiyun } while (0)
5270*4882a593Smuzhiyun
5271*4882a593Smuzhiyun #if defined(VERBOSE_DEBUG)
5272*4882a593Smuzhiyun #define netif_vdbg netif_dbg
5273*4882a593Smuzhiyun #else
5274*4882a593Smuzhiyun #define netif_vdbg(priv, type, dev, format, args...) \
5275*4882a593Smuzhiyun ({ \
5276*4882a593Smuzhiyun if (0) \
5277*4882a593Smuzhiyun netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
5278*4882a593Smuzhiyun 0; \
5279*4882a593Smuzhiyun })
5280*4882a593Smuzhiyun #endif
5281*4882a593Smuzhiyun
5282*4882a593Smuzhiyun /*
5283*4882a593Smuzhiyun * The list of packet types we will receive (as opposed to discard)
5284*4882a593Smuzhiyun * and the routines to invoke.
5285*4882a593Smuzhiyun *
5286*4882a593Smuzhiyun * Why 16. Because with 16 the only overlap we get on a hash of the
5287*4882a593Smuzhiyun * low nibble of the protocol value is RARP/SNAP/X.25.
5288*4882a593Smuzhiyun *
5289*4882a593Smuzhiyun * 0800 IP
5290*4882a593Smuzhiyun * 0001 802.3
5291*4882a593Smuzhiyun * 0002 AX.25
5292*4882a593Smuzhiyun * 0004 802.2
5293*4882a593Smuzhiyun * 8035 RARP
5294*4882a593Smuzhiyun * 0005 SNAP
5295*4882a593Smuzhiyun * 0805 X.25
5296*4882a593Smuzhiyun * 0806 ARP
5297*4882a593Smuzhiyun * 8137 IPX
5298*4882a593Smuzhiyun * 0009 Localtalk
5299*4882a593Smuzhiyun * 86DD IPv6
5300*4882a593Smuzhiyun */
5301*4882a593Smuzhiyun #define PTYPE_HASH_SIZE (16)
5302*4882a593Smuzhiyun #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
5303*4882a593Smuzhiyun
5304*4882a593Smuzhiyun extern struct net_device *blackhole_netdev;
5305*4882a593Smuzhiyun
5306*4882a593Smuzhiyun #endif /* _LINUX_NETDEVICE_H */
5307