1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * INET An implementation of the TCP/IP protocol suite for the LINUX
4*4882a593Smuzhiyun * operating system. INET is implemented using the BSD Socket
5*4882a593Smuzhiyun * interface as the means of communication with the user level.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Definitions for the AF_INET socket handler.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Version: @(#)sock.h 1.0.4 05/13/93
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Authors: Ross Biro
12*4882a593Smuzhiyun * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13*4882a593Smuzhiyun * Corey Minyard <wf-rch!minyard@relay.EU.net>
14*4882a593Smuzhiyun * Florian La Roche <flla@stud.uni-sb.de>
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Fixes:
17*4882a593Smuzhiyun * Alan Cox : Volatiles in skbuff pointers. See
18*4882a593Smuzhiyun * skbuff comments. May be overdone,
19*4882a593Smuzhiyun * better to prove they can be removed
20*4882a593Smuzhiyun * than the reverse.
21*4882a593Smuzhiyun * Alan Cox : Added a zapped field for tcp to note
22*4882a593Smuzhiyun * a socket is reset and must stay shut up
23*4882a593Smuzhiyun * Alan Cox : New fields for options
24*4882a593Smuzhiyun * Pauline Middelink : identd support
25*4882a593Smuzhiyun * Alan Cox : Eliminate low level recv/recvfrom
26*4882a593Smuzhiyun * David S. Miller : New socket lookup architecture.
27*4882a593Smuzhiyun * Steve Whitehouse: Default routines for sock_ops
28*4882a593Smuzhiyun * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made
29*4882a593Smuzhiyun * protinfo be just a void pointer, as the
30*4882a593Smuzhiyun * protocol specific parts were moved to
31*4882a593Smuzhiyun * respective headers and ipv4/v6, etc now
32*4882a593Smuzhiyun * use private slabcaches for its socks
33*4882a593Smuzhiyun * Pedro Hortas : New flags field for socket options
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun #ifndef _SOCK_H
36*4882a593Smuzhiyun #define _SOCK_H
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <linux/hardirq.h>
39*4882a593Smuzhiyun #include <linux/kernel.h>
40*4882a593Smuzhiyun #include <linux/list.h>
41*4882a593Smuzhiyun #include <linux/list_nulls.h>
42*4882a593Smuzhiyun #include <linux/timer.h>
43*4882a593Smuzhiyun #include <linux/cache.h>
44*4882a593Smuzhiyun #include <linux/bitops.h>
45*4882a593Smuzhiyun #include <linux/lockdep.h>
46*4882a593Smuzhiyun #include <linux/netdevice.h>
47*4882a593Smuzhiyun #include <linux/skbuff.h> /* struct sk_buff */
48*4882a593Smuzhiyun #include <linux/mm.h>
49*4882a593Smuzhiyun #include <linux/security.h>
50*4882a593Smuzhiyun #include <linux/slab.h>
51*4882a593Smuzhiyun #include <linux/uaccess.h>
52*4882a593Smuzhiyun #include <linux/page_counter.h>
53*4882a593Smuzhiyun #include <linux/memcontrol.h>
54*4882a593Smuzhiyun #include <linux/static_key.h>
55*4882a593Smuzhiyun #include <linux/sched.h>
56*4882a593Smuzhiyun #include <linux/wait.h>
57*4882a593Smuzhiyun #include <linux/cgroup-defs.h>
58*4882a593Smuzhiyun #include <linux/rbtree.h>
59*4882a593Smuzhiyun #include <linux/filter.h>
60*4882a593Smuzhiyun #include <linux/rculist_nulls.h>
61*4882a593Smuzhiyun #include <linux/poll.h>
62*4882a593Smuzhiyun #include <linux/sockptr.h>
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #include <linux/atomic.h>
65*4882a593Smuzhiyun #include <linux/refcount.h>
66*4882a593Smuzhiyun #include <net/dst.h>
67*4882a593Smuzhiyun #include <net/checksum.h>
68*4882a593Smuzhiyun #include <net/tcp_states.h>
69*4882a593Smuzhiyun #include <linux/net_tstamp.h>
70*4882a593Smuzhiyun #include <net/l3mdev.h>
71*4882a593Smuzhiyun #include <linux/android_kabi.h>
72*4882a593Smuzhiyun #include <linux/android_vendor.h>
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * This structure really needs to be cleaned up.
76*4882a593Smuzhiyun * Most of it is for TCP, and not used by any of
77*4882a593Smuzhiyun * the other protocols.
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* Define this to get the SOCK_DBG debugging facility. */
81*4882a593Smuzhiyun #define SOCK_DEBUGGING
82*4882a593Smuzhiyun #ifdef SOCK_DEBUGGING
83*4882a593Smuzhiyun #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
84*4882a593Smuzhiyun printk(KERN_DEBUG msg); } while (0)
85*4882a593Smuzhiyun #else
86*4882a593Smuzhiyun /* Validate arguments and do nothing */
87*4882a593Smuzhiyun static inline __printf(2, 3)
SOCK_DEBUG(const struct sock * sk,const char * msg,...)88*4882a593Smuzhiyun void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* This is the per-socket lock. The spinlock provides a synchronization
94*4882a593Smuzhiyun * between user contexts and software interrupt processing, whereas the
95*4882a593Smuzhiyun * mini-semaphore synchronizes multiple users amongst themselves.
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun typedef struct {
98*4882a593Smuzhiyun spinlock_t slock;
99*4882a593Smuzhiyun int owned;
100*4882a593Smuzhiyun wait_queue_head_t wq;
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun * We express the mutex-alike socket_lock semantics
103*4882a593Smuzhiyun * to the lock validator by explicitly managing
104*4882a593Smuzhiyun * the slock as a lock variant (in addition to
105*4882a593Smuzhiyun * the slock itself):
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_LOCK_ALLOC
108*4882a593Smuzhiyun struct lockdep_map dep_map;
109*4882a593Smuzhiyun #endif
110*4882a593Smuzhiyun } socket_lock_t;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun struct sock;
113*4882a593Smuzhiyun struct proto;
114*4882a593Smuzhiyun struct net;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun typedef __u32 __bitwise __portpair;
117*4882a593Smuzhiyun typedef __u64 __bitwise __addrpair;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun * struct sock_common - minimal network layer representation of sockets
121*4882a593Smuzhiyun * @skc_daddr: Foreign IPv4 addr
122*4882a593Smuzhiyun * @skc_rcv_saddr: Bound local IPv4 addr
123*4882a593Smuzhiyun * @skc_addrpair: 8-byte-aligned __u64 union of @skc_daddr & @skc_rcv_saddr
124*4882a593Smuzhiyun * @skc_hash: hash value used with various protocol lookup tables
125*4882a593Smuzhiyun * @skc_u16hashes: two u16 hash values used by UDP lookup tables
126*4882a593Smuzhiyun * @skc_dport: placeholder for inet_dport/tw_dport
127*4882a593Smuzhiyun * @skc_num: placeholder for inet_num/tw_num
128*4882a593Smuzhiyun * @skc_portpair: __u32 union of @skc_dport & @skc_num
129*4882a593Smuzhiyun * @skc_family: network address family
130*4882a593Smuzhiyun * @skc_state: Connection state
131*4882a593Smuzhiyun * @skc_reuse: %SO_REUSEADDR setting
132*4882a593Smuzhiyun * @skc_reuseport: %SO_REUSEPORT setting
133*4882a593Smuzhiyun * @skc_ipv6only: socket is IPV6 only
134*4882a593Smuzhiyun * @skc_net_refcnt: socket is using net ref counting
135*4882a593Smuzhiyun * @skc_bound_dev_if: bound device index if != 0
136*4882a593Smuzhiyun * @skc_bind_node: bind hash linkage for various protocol lookup tables
137*4882a593Smuzhiyun * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
138*4882a593Smuzhiyun * @skc_prot: protocol handlers inside a network family
139*4882a593Smuzhiyun * @skc_net: reference to the network namespace of this socket
140*4882a593Smuzhiyun * @skc_v6_daddr: IPV6 destination address
141*4882a593Smuzhiyun * @skc_v6_rcv_saddr: IPV6 source address
142*4882a593Smuzhiyun * @skc_cookie: socket's cookie value
143*4882a593Smuzhiyun * @skc_node: main hash linkage for various protocol lookup tables
144*4882a593Smuzhiyun * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
145*4882a593Smuzhiyun * @skc_tx_queue_mapping: tx queue number for this connection
146*4882a593Smuzhiyun * @skc_rx_queue_mapping: rx queue number for this connection
147*4882a593Smuzhiyun * @skc_flags: place holder for sk_flags
148*4882a593Smuzhiyun * %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
149*4882a593Smuzhiyun * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
150*4882a593Smuzhiyun * @skc_listener: connection request listener socket (aka rsk_listener)
151*4882a593Smuzhiyun * [union with @skc_flags]
152*4882a593Smuzhiyun * @skc_tw_dr: (aka tw_dr) ptr to &struct inet_timewait_death_row
153*4882a593Smuzhiyun * [union with @skc_flags]
154*4882a593Smuzhiyun * @skc_incoming_cpu: record/match cpu processing incoming packets
155*4882a593Smuzhiyun * @skc_rcv_wnd: (aka rsk_rcv_wnd) TCP receive window size (possibly scaled)
156*4882a593Smuzhiyun * [union with @skc_incoming_cpu]
157*4882a593Smuzhiyun * @skc_tw_rcv_nxt: (aka tw_rcv_nxt) TCP window next expected seq number
158*4882a593Smuzhiyun * [union with @skc_incoming_cpu]
159*4882a593Smuzhiyun * @skc_refcnt: reference count
160*4882a593Smuzhiyun *
161*4882a593Smuzhiyun * This is the minimal network layer representation of sockets, the header
162*4882a593Smuzhiyun * for struct sock and struct inet_timewait_sock.
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun struct sock_common {
165*4882a593Smuzhiyun union {
166*4882a593Smuzhiyun __addrpair skc_addrpair;
167*4882a593Smuzhiyun struct {
168*4882a593Smuzhiyun __be32 skc_daddr;
169*4882a593Smuzhiyun __be32 skc_rcv_saddr;
170*4882a593Smuzhiyun };
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun union {
173*4882a593Smuzhiyun unsigned int skc_hash;
174*4882a593Smuzhiyun __u16 skc_u16hashes[2];
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun /* skc_dport && skc_num must be grouped as well */
177*4882a593Smuzhiyun union {
178*4882a593Smuzhiyun __portpair skc_portpair;
179*4882a593Smuzhiyun struct {
180*4882a593Smuzhiyun __be16 skc_dport;
181*4882a593Smuzhiyun __u16 skc_num;
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun };
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun unsigned short skc_family;
186*4882a593Smuzhiyun volatile unsigned char skc_state;
187*4882a593Smuzhiyun unsigned char skc_reuse:4;
188*4882a593Smuzhiyun unsigned char skc_reuseport:1;
189*4882a593Smuzhiyun unsigned char skc_ipv6only:1;
190*4882a593Smuzhiyun unsigned char skc_net_refcnt:1;
191*4882a593Smuzhiyun int skc_bound_dev_if;
192*4882a593Smuzhiyun union {
193*4882a593Smuzhiyun struct hlist_node skc_bind_node;
194*4882a593Smuzhiyun struct hlist_node skc_portaddr_node;
195*4882a593Smuzhiyun };
196*4882a593Smuzhiyun struct proto *skc_prot;
197*4882a593Smuzhiyun possible_net_t skc_net;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
200*4882a593Smuzhiyun struct in6_addr skc_v6_daddr;
201*4882a593Smuzhiyun struct in6_addr skc_v6_rcv_saddr;
202*4882a593Smuzhiyun #endif
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun atomic64_t skc_cookie;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* following fields are padding to force
207*4882a593Smuzhiyun * offset(struct sock, sk_refcnt) == 128 on 64bit arches
208*4882a593Smuzhiyun * assuming IPV6 is enabled. We use this padding differently
209*4882a593Smuzhiyun * for different kind of 'sockets'
210*4882a593Smuzhiyun */
211*4882a593Smuzhiyun union {
212*4882a593Smuzhiyun unsigned long skc_flags;
213*4882a593Smuzhiyun struct sock *skc_listener; /* request_sock */
214*4882a593Smuzhiyun struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun * fields between dontcopy_begin/dontcopy_end
218*4882a593Smuzhiyun * are not copied in sock_copy()
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun /* private: */
221*4882a593Smuzhiyun int skc_dontcopy_begin[0];
222*4882a593Smuzhiyun /* public: */
223*4882a593Smuzhiyun union {
224*4882a593Smuzhiyun struct hlist_node skc_node;
225*4882a593Smuzhiyun struct hlist_nulls_node skc_nulls_node;
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun unsigned short skc_tx_queue_mapping;
228*4882a593Smuzhiyun #ifdef CONFIG_XPS
229*4882a593Smuzhiyun unsigned short skc_rx_queue_mapping;
230*4882a593Smuzhiyun #endif
231*4882a593Smuzhiyun union {
232*4882a593Smuzhiyun int skc_incoming_cpu;
233*4882a593Smuzhiyun u32 skc_rcv_wnd;
234*4882a593Smuzhiyun u32 skc_tw_rcv_nxt; /* struct tcp_timewait_sock */
235*4882a593Smuzhiyun };
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun refcount_t skc_refcnt;
238*4882a593Smuzhiyun /* private: */
239*4882a593Smuzhiyun int skc_dontcopy_end[0];
240*4882a593Smuzhiyun union {
241*4882a593Smuzhiyun u32 skc_rxhash;
242*4882a593Smuzhiyun u32 skc_window_clamp;
243*4882a593Smuzhiyun u32 skc_tw_snd_nxt; /* struct tcp_timewait_sock */
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun /* public: */
246*4882a593Smuzhiyun };
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun struct bpf_local_storage;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /**
251*4882a593Smuzhiyun * struct sock - network layer representation of sockets
252*4882a593Smuzhiyun * @__sk_common: shared layout with inet_timewait_sock
253*4882a593Smuzhiyun * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
254*4882a593Smuzhiyun * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
255*4882a593Smuzhiyun * @sk_lock: synchronizer
256*4882a593Smuzhiyun * @sk_kern_sock: True if sock is using kernel lock classes
257*4882a593Smuzhiyun * @sk_rcvbuf: size of receive buffer in bytes
258*4882a593Smuzhiyun * @sk_wq: sock wait queue and async head
259*4882a593Smuzhiyun * @sk_rx_dst: receive input route used by early demux
260*4882a593Smuzhiyun * @sk_dst_cache: destination cache
261*4882a593Smuzhiyun * @sk_dst_pending_confirm: need to confirm neighbour
262*4882a593Smuzhiyun * @sk_policy: flow policy
263*4882a593Smuzhiyun * @sk_rx_skb_cache: cache copy of recently accessed RX skb
264*4882a593Smuzhiyun * @sk_receive_queue: incoming packets
265*4882a593Smuzhiyun * @sk_wmem_alloc: transmit queue bytes committed
266*4882a593Smuzhiyun * @sk_tsq_flags: TCP Small Queues flags
267*4882a593Smuzhiyun * @sk_write_queue: Packet sending queue
268*4882a593Smuzhiyun * @sk_omem_alloc: "o" is "option" or "other"
269*4882a593Smuzhiyun * @sk_wmem_queued: persistent queue size
270*4882a593Smuzhiyun * @sk_forward_alloc: space allocated forward
271*4882a593Smuzhiyun * @sk_napi_id: id of the last napi context to receive data for sk
272*4882a593Smuzhiyun * @sk_ll_usec: usecs to busypoll when there is no data
273*4882a593Smuzhiyun * @sk_allocation: allocation mode
274*4882a593Smuzhiyun * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
275*4882a593Smuzhiyun * @sk_pacing_status: Pacing status (requested, handled by sch_fq)
276*4882a593Smuzhiyun * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
277*4882a593Smuzhiyun * @sk_sndbuf: size of send buffer in bytes
278*4882a593Smuzhiyun * @__sk_flags_offset: empty field used to determine location of bitfield
279*4882a593Smuzhiyun * @sk_padding: unused element for alignment
280*4882a593Smuzhiyun * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
281*4882a593Smuzhiyun * @sk_no_check_rx: allow zero checksum in RX packets
282*4882a593Smuzhiyun * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
283*4882a593Smuzhiyun * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
284*4882a593Smuzhiyun * @sk_route_forced_caps: static, forced route capabilities
285*4882a593Smuzhiyun * (set in tcp_init_sock())
286*4882a593Smuzhiyun * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
287*4882a593Smuzhiyun * @sk_gso_max_size: Maximum GSO segment size to build
288*4882a593Smuzhiyun * @sk_gso_max_segs: Maximum number of GSO segments
289*4882a593Smuzhiyun * @sk_pacing_shift: scaling factor for TCP Small Queues
290*4882a593Smuzhiyun * @sk_lingertime: %SO_LINGER l_linger setting
291*4882a593Smuzhiyun * @sk_backlog: always used with the per-socket spinlock held
292*4882a593Smuzhiyun * @sk_callback_lock: used with the callbacks in the end of this struct
293*4882a593Smuzhiyun * @sk_error_queue: rarely used
294*4882a593Smuzhiyun * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
295*4882a593Smuzhiyun * IPV6_ADDRFORM for instance)
296*4882a593Smuzhiyun * @sk_err: last error
297*4882a593Smuzhiyun * @sk_err_soft: errors that don't cause failure but are the cause of a
298*4882a593Smuzhiyun * persistent failure not just 'timed out'
299*4882a593Smuzhiyun * @sk_drops: raw/udp drops counter
300*4882a593Smuzhiyun * @sk_ack_backlog: current listen backlog
301*4882a593Smuzhiyun * @sk_max_ack_backlog: listen backlog set in listen()
302*4882a593Smuzhiyun * @sk_uid: user id of owner
303*4882a593Smuzhiyun * @sk_priority: %SO_PRIORITY setting
304*4882a593Smuzhiyun * @sk_type: socket type (%SOCK_STREAM, etc)
305*4882a593Smuzhiyun * @sk_protocol: which protocol this socket belongs in this network family
306*4882a593Smuzhiyun * @sk_peer_pid: &struct pid for this socket's peer
307*4882a593Smuzhiyun * @sk_peer_cred: %SO_PEERCRED setting
308*4882a593Smuzhiyun * @sk_rcvlowat: %SO_RCVLOWAT setting
309*4882a593Smuzhiyun * @sk_rcvtimeo: %SO_RCVTIMEO setting
310*4882a593Smuzhiyun * @sk_sndtimeo: %SO_SNDTIMEO setting
311*4882a593Smuzhiyun * @sk_txhash: computed flow hash for use on transmit
312*4882a593Smuzhiyun * @sk_filter: socket filtering instructions
313*4882a593Smuzhiyun * @sk_timer: sock cleanup timer
314*4882a593Smuzhiyun * @sk_stamp: time stamp of last packet received
315*4882a593Smuzhiyun * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
316*4882a593Smuzhiyun * @sk_tsflags: SO_TIMESTAMPING socket options
317*4882a593Smuzhiyun * @sk_tskey: counter to disambiguate concurrent tstamp requests
318*4882a593Smuzhiyun * @sk_zckey: counter to order MSG_ZEROCOPY notifications
319*4882a593Smuzhiyun * @sk_socket: Identd and reporting IO signals
320*4882a593Smuzhiyun * @sk_user_data: RPC layer private data
321*4882a593Smuzhiyun * @sk_frag: cached page frag
322*4882a593Smuzhiyun * @sk_peek_off: current peek_offset value
323*4882a593Smuzhiyun * @sk_send_head: front of stuff to transmit
324*4882a593Smuzhiyun * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
325*4882a593Smuzhiyun * @sk_tx_skb_cache: cache copy of recently accessed TX skb
326*4882a593Smuzhiyun * @sk_security: used by security modules
327*4882a593Smuzhiyun * @sk_mark: generic packet mark
328*4882a593Smuzhiyun * @sk_cgrp_data: cgroup data for this cgroup
329*4882a593Smuzhiyun * @sk_memcg: this socket's memory cgroup association
330*4882a593Smuzhiyun * @sk_write_pending: a write to stream socket waits to start
331*4882a593Smuzhiyun * @sk_state_change: callback to indicate change in the state of the sock
332*4882a593Smuzhiyun * @sk_data_ready: callback to indicate there is data to be processed
333*4882a593Smuzhiyun * @sk_write_space: callback to indicate there is bf sending space available
334*4882a593Smuzhiyun * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
335*4882a593Smuzhiyun * @sk_backlog_rcv: callback to process the backlog
336*4882a593Smuzhiyun * @sk_validate_xmit_skb: ptr to an optional validate function
337*4882a593Smuzhiyun * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
338*4882a593Smuzhiyun * @sk_reuseport_cb: reuseport group container
339*4882a593Smuzhiyun * @sk_bpf_storage: ptr to cache and control for bpf_sk_storage
340*4882a593Smuzhiyun * @sk_rcu: used during RCU grace period
341*4882a593Smuzhiyun * @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
342*4882a593Smuzhiyun * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
343*4882a593Smuzhiyun * @sk_txtime_report_errors: set report errors mode for SO_TXTIME
344*4882a593Smuzhiyun * @sk_txtime_unused: unused txtime flags
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun struct sock {
347*4882a593Smuzhiyun /*
348*4882a593Smuzhiyun * Now struct inet_timewait_sock also uses sock_common, so please just
349*4882a593Smuzhiyun * don't add nothing before this first member (__sk_common) --acme
350*4882a593Smuzhiyun */
351*4882a593Smuzhiyun struct sock_common __sk_common;
352*4882a593Smuzhiyun #define sk_node __sk_common.skc_node
353*4882a593Smuzhiyun #define sk_nulls_node __sk_common.skc_nulls_node
354*4882a593Smuzhiyun #define sk_refcnt __sk_common.skc_refcnt
355*4882a593Smuzhiyun #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
356*4882a593Smuzhiyun #ifdef CONFIG_XPS
357*4882a593Smuzhiyun #define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
358*4882a593Smuzhiyun #endif
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun #define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
361*4882a593Smuzhiyun #define sk_dontcopy_end __sk_common.skc_dontcopy_end
362*4882a593Smuzhiyun #define sk_hash __sk_common.skc_hash
363*4882a593Smuzhiyun #define sk_portpair __sk_common.skc_portpair
364*4882a593Smuzhiyun #define sk_num __sk_common.skc_num
365*4882a593Smuzhiyun #define sk_dport __sk_common.skc_dport
366*4882a593Smuzhiyun #define sk_addrpair __sk_common.skc_addrpair
367*4882a593Smuzhiyun #define sk_daddr __sk_common.skc_daddr
368*4882a593Smuzhiyun #define sk_rcv_saddr __sk_common.skc_rcv_saddr
369*4882a593Smuzhiyun #define sk_family __sk_common.skc_family
370*4882a593Smuzhiyun #define sk_state __sk_common.skc_state
371*4882a593Smuzhiyun #define sk_reuse __sk_common.skc_reuse
372*4882a593Smuzhiyun #define sk_reuseport __sk_common.skc_reuseport
373*4882a593Smuzhiyun #define sk_ipv6only __sk_common.skc_ipv6only
374*4882a593Smuzhiyun #define sk_net_refcnt __sk_common.skc_net_refcnt
375*4882a593Smuzhiyun #define sk_bound_dev_if __sk_common.skc_bound_dev_if
376*4882a593Smuzhiyun #define sk_bind_node __sk_common.skc_bind_node
377*4882a593Smuzhiyun #define sk_prot __sk_common.skc_prot
378*4882a593Smuzhiyun #define sk_net __sk_common.skc_net
379*4882a593Smuzhiyun #define sk_v6_daddr __sk_common.skc_v6_daddr
380*4882a593Smuzhiyun #define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
381*4882a593Smuzhiyun #define sk_cookie __sk_common.skc_cookie
382*4882a593Smuzhiyun #define sk_incoming_cpu __sk_common.skc_incoming_cpu
383*4882a593Smuzhiyun #define sk_flags __sk_common.skc_flags
384*4882a593Smuzhiyun #define sk_rxhash __sk_common.skc_rxhash
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun socket_lock_t sk_lock;
387*4882a593Smuzhiyun atomic_t sk_drops;
388*4882a593Smuzhiyun int sk_rcvlowat;
389*4882a593Smuzhiyun struct sk_buff_head sk_error_queue;
390*4882a593Smuzhiyun struct sk_buff *sk_rx_skb_cache;
391*4882a593Smuzhiyun struct sk_buff_head sk_receive_queue;
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * The backlog queue is special, it is always used with
394*4882a593Smuzhiyun * the per-socket spinlock held and requires low latency
395*4882a593Smuzhiyun * access. Therefore we special case it's implementation.
396*4882a593Smuzhiyun * Note : rmem_alloc is in this structure to fill a hole
397*4882a593Smuzhiyun * on 64bit arches, not because its logically part of
398*4882a593Smuzhiyun * backlog.
399*4882a593Smuzhiyun */
400*4882a593Smuzhiyun struct {
401*4882a593Smuzhiyun atomic_t rmem_alloc;
402*4882a593Smuzhiyun int len;
403*4882a593Smuzhiyun struct sk_buff *head;
404*4882a593Smuzhiyun struct sk_buff *tail;
405*4882a593Smuzhiyun } sk_backlog;
406*4882a593Smuzhiyun #define sk_rmem_alloc sk_backlog.rmem_alloc
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun int sk_forward_alloc;
409*4882a593Smuzhiyun #ifdef CONFIG_NET_RX_BUSY_POLL
410*4882a593Smuzhiyun unsigned int sk_ll_usec;
411*4882a593Smuzhiyun /* ===== mostly read cache line ===== */
412*4882a593Smuzhiyun unsigned int sk_napi_id;
413*4882a593Smuzhiyun #endif
414*4882a593Smuzhiyun int sk_rcvbuf;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun struct sk_filter __rcu *sk_filter;
417*4882a593Smuzhiyun union {
418*4882a593Smuzhiyun struct socket_wq __rcu *sk_wq;
419*4882a593Smuzhiyun /* private: */
420*4882a593Smuzhiyun struct socket_wq *sk_wq_raw;
421*4882a593Smuzhiyun /* public: */
422*4882a593Smuzhiyun };
423*4882a593Smuzhiyun #ifdef CONFIG_XFRM
424*4882a593Smuzhiyun struct xfrm_policy __rcu *sk_policy[2];
425*4882a593Smuzhiyun #endif
426*4882a593Smuzhiyun struct dst_entry __rcu *sk_rx_dst;
427*4882a593Smuzhiyun struct dst_entry __rcu *sk_dst_cache;
428*4882a593Smuzhiyun atomic_t sk_omem_alloc;
429*4882a593Smuzhiyun int sk_sndbuf;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* ===== cache line for TX ===== */
432*4882a593Smuzhiyun int sk_wmem_queued;
433*4882a593Smuzhiyun refcount_t sk_wmem_alloc;
434*4882a593Smuzhiyun unsigned long sk_tsq_flags;
435*4882a593Smuzhiyun union {
436*4882a593Smuzhiyun struct sk_buff *sk_send_head;
437*4882a593Smuzhiyun struct rb_root tcp_rtx_queue;
438*4882a593Smuzhiyun };
439*4882a593Smuzhiyun struct sk_buff *sk_tx_skb_cache;
440*4882a593Smuzhiyun struct sk_buff_head sk_write_queue;
441*4882a593Smuzhiyun __s32 sk_peek_off;
442*4882a593Smuzhiyun int sk_write_pending;
443*4882a593Smuzhiyun __u32 sk_dst_pending_confirm;
444*4882a593Smuzhiyun u32 sk_pacing_status; /* see enum sk_pacing */
445*4882a593Smuzhiyun long sk_sndtimeo;
446*4882a593Smuzhiyun struct timer_list sk_timer;
447*4882a593Smuzhiyun __u32 sk_priority;
448*4882a593Smuzhiyun __u32 sk_mark;
449*4882a593Smuzhiyun unsigned long sk_pacing_rate; /* bytes per second */
450*4882a593Smuzhiyun unsigned long sk_max_pacing_rate;
451*4882a593Smuzhiyun struct page_frag sk_frag;
452*4882a593Smuzhiyun netdev_features_t sk_route_caps;
453*4882a593Smuzhiyun netdev_features_t sk_route_nocaps;
454*4882a593Smuzhiyun netdev_features_t sk_route_forced_caps;
455*4882a593Smuzhiyun int sk_gso_type;
456*4882a593Smuzhiyun unsigned int sk_gso_max_size;
457*4882a593Smuzhiyun gfp_t sk_allocation;
458*4882a593Smuzhiyun __u32 sk_txhash;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /*
461*4882a593Smuzhiyun * Because of non atomicity rules, all
462*4882a593Smuzhiyun * changes are protected by socket lock.
463*4882a593Smuzhiyun */
464*4882a593Smuzhiyun u8 sk_padding : 1,
465*4882a593Smuzhiyun sk_kern_sock : 1,
466*4882a593Smuzhiyun sk_no_check_tx : 1,
467*4882a593Smuzhiyun sk_no_check_rx : 1,
468*4882a593Smuzhiyun sk_userlocks : 4;
469*4882a593Smuzhiyun u8 sk_pacing_shift;
470*4882a593Smuzhiyun u16 sk_type;
471*4882a593Smuzhiyun u16 sk_protocol;
472*4882a593Smuzhiyun u16 sk_gso_max_segs;
473*4882a593Smuzhiyun unsigned long sk_lingertime;
474*4882a593Smuzhiyun struct proto *sk_prot_creator;
475*4882a593Smuzhiyun rwlock_t sk_callback_lock;
476*4882a593Smuzhiyun int sk_err,
477*4882a593Smuzhiyun sk_err_soft;
478*4882a593Smuzhiyun u32 sk_ack_backlog;
479*4882a593Smuzhiyun u32 sk_max_ack_backlog;
480*4882a593Smuzhiyun kuid_t sk_uid;
481*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DEBUG_SPINLOCK) || IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC)
482*4882a593Smuzhiyun spinlock_t sk_peer_lock;
483*4882a593Smuzhiyun #else
484*4882a593Smuzhiyun /* sk_peer_lock is in the ANDROID_KABI_RESERVE(1) field below */
485*4882a593Smuzhiyun #endif
486*4882a593Smuzhiyun struct pid *sk_peer_pid;
487*4882a593Smuzhiyun const struct cred *sk_peer_cred;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun long sk_rcvtimeo;
490*4882a593Smuzhiyun ktime_t sk_stamp;
491*4882a593Smuzhiyun #if BITS_PER_LONG==32
492*4882a593Smuzhiyun seqlock_t sk_stamp_seq;
493*4882a593Smuzhiyun #endif
494*4882a593Smuzhiyun u16 sk_tsflags;
495*4882a593Smuzhiyun u8 sk_shutdown;
496*4882a593Smuzhiyun u32 sk_tskey;
497*4882a593Smuzhiyun atomic_t sk_zckey;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun u8 sk_clockid;
500*4882a593Smuzhiyun u8 sk_txtime_deadline_mode : 1,
501*4882a593Smuzhiyun sk_txtime_report_errors : 1,
502*4882a593Smuzhiyun sk_txtime_unused : 6;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun struct socket *sk_socket;
505*4882a593Smuzhiyun void *sk_user_data;
506*4882a593Smuzhiyun #ifdef CONFIG_SECURITY
507*4882a593Smuzhiyun void *sk_security;
508*4882a593Smuzhiyun #endif
509*4882a593Smuzhiyun struct sock_cgroup_data sk_cgrp_data;
510*4882a593Smuzhiyun struct mem_cgroup *sk_memcg;
511*4882a593Smuzhiyun void (*sk_state_change)(struct sock *sk);
512*4882a593Smuzhiyun void (*sk_data_ready)(struct sock *sk);
513*4882a593Smuzhiyun void (*sk_write_space)(struct sock *sk);
514*4882a593Smuzhiyun void (*sk_error_report)(struct sock *sk);
515*4882a593Smuzhiyun int (*sk_backlog_rcv)(struct sock *sk,
516*4882a593Smuzhiyun struct sk_buff *skb);
517*4882a593Smuzhiyun #ifdef CONFIG_SOCK_VALIDATE_XMIT
518*4882a593Smuzhiyun struct sk_buff* (*sk_validate_xmit_skb)(struct sock *sk,
519*4882a593Smuzhiyun struct net_device *dev,
520*4882a593Smuzhiyun struct sk_buff *skb);
521*4882a593Smuzhiyun #endif
522*4882a593Smuzhiyun void (*sk_destruct)(struct sock *sk);
523*4882a593Smuzhiyun struct sock_reuseport __rcu *sk_reuseport_cb;
524*4882a593Smuzhiyun #ifdef CONFIG_BPF_SYSCALL
525*4882a593Smuzhiyun struct bpf_local_storage __rcu *sk_bpf_storage;
526*4882a593Smuzhiyun #endif
527*4882a593Smuzhiyun struct rcu_head sk_rcu;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DEBUG_SPINLOCK) || IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC)
530*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
531*4882a593Smuzhiyun #else
532*4882a593Smuzhiyun ANDROID_KABI_USE(1, spinlock_t sk_peer_lock);
533*4882a593Smuzhiyun #endif
534*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
535*4882a593Smuzhiyun ANDROID_KABI_RESERVE(3);
536*4882a593Smuzhiyun ANDROID_KABI_RESERVE(4);
537*4882a593Smuzhiyun ANDROID_KABI_RESERVE(5);
538*4882a593Smuzhiyun ANDROID_KABI_RESERVE(6);
539*4882a593Smuzhiyun ANDROID_KABI_RESERVE(7);
540*4882a593Smuzhiyun ANDROID_KABI_RESERVE(8);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun ANDROID_OEM_DATA(1);
543*4882a593Smuzhiyun };
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun enum sk_pacing {
546*4882a593Smuzhiyun SK_PACING_NONE = 0,
547*4882a593Smuzhiyun SK_PACING_NEEDED = 1,
548*4882a593Smuzhiyun SK_PACING_FQ = 2,
549*4882a593Smuzhiyun };
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* flag bits in sk_user_data
552*4882a593Smuzhiyun *
553*4882a593Smuzhiyun * - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might
554*4882a593Smuzhiyun * not be suitable for copying when cloning the socket. For instance,
555*4882a593Smuzhiyun * it can point to a reference counted object. sk_user_data bottom
556*4882a593Smuzhiyun * bit is set if pointer must not be copied.
557*4882a593Smuzhiyun *
558*4882a593Smuzhiyun * - SK_USER_DATA_BPF: Mark whether sk_user_data field is
559*4882a593Smuzhiyun * managed/owned by a BPF reuseport array. This bit should be set
560*4882a593Smuzhiyun * when sk_user_data's sk is added to the bpf's reuseport_array.
561*4882a593Smuzhiyun *
562*4882a593Smuzhiyun * - SK_USER_DATA_PSOCK: Mark whether pointer stored in
563*4882a593Smuzhiyun * sk_user_data points to psock type. This bit should be set
564*4882a593Smuzhiyun * when sk_user_data is assigned to a psock object.
565*4882a593Smuzhiyun */
566*4882a593Smuzhiyun #define SK_USER_DATA_NOCOPY 1UL
567*4882a593Smuzhiyun #define SK_USER_DATA_BPF 2UL
568*4882a593Smuzhiyun #define SK_USER_DATA_PSOCK 4UL
569*4882a593Smuzhiyun #define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
570*4882a593Smuzhiyun SK_USER_DATA_PSOCK)
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /**
573*4882a593Smuzhiyun * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
574*4882a593Smuzhiyun * @sk: socket
575*4882a593Smuzhiyun */
sk_user_data_is_nocopy(const struct sock * sk)576*4882a593Smuzhiyun static inline bool sk_user_data_is_nocopy(const struct sock *sk)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun return ((uintptr_t)sk->sk_user_data & SK_USER_DATA_NOCOPY);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /**
584*4882a593Smuzhiyun * __rcu_dereference_sk_user_data_with_flags - return the pointer
585*4882a593Smuzhiyun * only if argument flags all has been set in sk_user_data. Otherwise
586*4882a593Smuzhiyun * return NULL
587*4882a593Smuzhiyun *
588*4882a593Smuzhiyun * @sk: socket
589*4882a593Smuzhiyun * @flags: flag bits
590*4882a593Smuzhiyun */
591*4882a593Smuzhiyun static inline void *
__rcu_dereference_sk_user_data_with_flags(const struct sock * sk,uintptr_t flags)592*4882a593Smuzhiyun __rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
593*4882a593Smuzhiyun uintptr_t flags)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun if ((sk_user_data & flags) == flags)
600*4882a593Smuzhiyun return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
601*4882a593Smuzhiyun return NULL;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun #define rcu_dereference_sk_user_data(sk) \
605*4882a593Smuzhiyun __rcu_dereference_sk_user_data_with_flags(sk, 0)
606*4882a593Smuzhiyun #define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \
607*4882a593Smuzhiyun ({ \
608*4882a593Smuzhiyun uintptr_t __tmp1 = (uintptr_t)(ptr), \
609*4882a593Smuzhiyun __tmp2 = (uintptr_t)(flags); \
610*4882a593Smuzhiyun WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \
611*4882a593Smuzhiyun WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \
612*4882a593Smuzhiyun rcu_assign_pointer(__sk_user_data((sk)), \
613*4882a593Smuzhiyun __tmp1 | __tmp2); \
614*4882a593Smuzhiyun })
615*4882a593Smuzhiyun #define rcu_assign_sk_user_data(sk, ptr) \
616*4882a593Smuzhiyun __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /*
619*4882a593Smuzhiyun * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
620*4882a593Smuzhiyun * or not whether his port will be reused by someone else. SK_FORCE_REUSE
621*4882a593Smuzhiyun * on a socket means that the socket will reuse everybody else's port
622*4882a593Smuzhiyun * without looking at the other's sk_reuse value.
623*4882a593Smuzhiyun */
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun #define SK_NO_REUSE 0
626*4882a593Smuzhiyun #define SK_CAN_REUSE 1
627*4882a593Smuzhiyun #define SK_FORCE_REUSE 2
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun int sk_set_peek_off(struct sock *sk, int val);
630*4882a593Smuzhiyun
sk_peek_offset(struct sock * sk,int flags)631*4882a593Smuzhiyun static inline int sk_peek_offset(struct sock *sk, int flags)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun if (unlikely(flags & MSG_PEEK)) {
634*4882a593Smuzhiyun return READ_ONCE(sk->sk_peek_off);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun return 0;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
sk_peek_offset_bwd(struct sock * sk,int val)640*4882a593Smuzhiyun static inline void sk_peek_offset_bwd(struct sock *sk, int val)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun s32 off = READ_ONCE(sk->sk_peek_off);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun if (unlikely(off >= 0)) {
645*4882a593Smuzhiyun off = max_t(s32, off - val, 0);
646*4882a593Smuzhiyun WRITE_ONCE(sk->sk_peek_off, off);
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
sk_peek_offset_fwd(struct sock * sk,int val)650*4882a593Smuzhiyun static inline void sk_peek_offset_fwd(struct sock *sk, int val)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun sk_peek_offset_bwd(sk, -val);
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /*
656*4882a593Smuzhiyun * Hashed lists helper routines
657*4882a593Smuzhiyun */
sk_entry(const struct hlist_node * node)658*4882a593Smuzhiyun static inline struct sock *sk_entry(const struct hlist_node *node)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun return hlist_entry(node, struct sock, sk_node);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
__sk_head(const struct hlist_head * head)663*4882a593Smuzhiyun static inline struct sock *__sk_head(const struct hlist_head *head)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun return hlist_entry(head->first, struct sock, sk_node);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
sk_head(const struct hlist_head * head)668*4882a593Smuzhiyun static inline struct sock *sk_head(const struct hlist_head *head)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun return hlist_empty(head) ? NULL : __sk_head(head);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
__sk_nulls_head(const struct hlist_nulls_head * head)673*4882a593Smuzhiyun static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
sk_nulls_head(const struct hlist_nulls_head * head)678*4882a593Smuzhiyun static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
sk_next(const struct sock * sk)683*4882a593Smuzhiyun static inline struct sock *sk_next(const struct sock *sk)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
sk_nulls_next(const struct sock * sk)688*4882a593Smuzhiyun static inline struct sock *sk_nulls_next(const struct sock *sk)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun return (!is_a_nulls(sk->sk_nulls_node.next)) ?
691*4882a593Smuzhiyun hlist_nulls_entry(sk->sk_nulls_node.next,
692*4882a593Smuzhiyun struct sock, sk_nulls_node) :
693*4882a593Smuzhiyun NULL;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
sk_unhashed(const struct sock * sk)696*4882a593Smuzhiyun static inline bool sk_unhashed(const struct sock *sk)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun return hlist_unhashed(&sk->sk_node);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
sk_hashed(const struct sock * sk)701*4882a593Smuzhiyun static inline bool sk_hashed(const struct sock *sk)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun return !sk_unhashed(sk);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
sk_node_init(struct hlist_node * node)706*4882a593Smuzhiyun static inline void sk_node_init(struct hlist_node *node)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun node->pprev = NULL;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
sk_nulls_node_init(struct hlist_nulls_node * node)711*4882a593Smuzhiyun static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun node->pprev = NULL;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun
__sk_del_node(struct sock * sk)716*4882a593Smuzhiyun static inline void __sk_del_node(struct sock *sk)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun __hlist_del(&sk->sk_node);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /* NB: equivalent to hlist_del_init_rcu */
__sk_del_node_init(struct sock * sk)722*4882a593Smuzhiyun static inline bool __sk_del_node_init(struct sock *sk)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun if (sk_hashed(sk)) {
725*4882a593Smuzhiyun __sk_del_node(sk);
726*4882a593Smuzhiyun sk_node_init(&sk->sk_node);
727*4882a593Smuzhiyun return true;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun return false;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /* Grab socket reference count. This operation is valid only
733*4882a593Smuzhiyun when sk is ALREADY grabbed f.e. it is found in hash table
734*4882a593Smuzhiyun or a list and the lookup is made under lock preventing hash table
735*4882a593Smuzhiyun modifications.
736*4882a593Smuzhiyun */
737*4882a593Smuzhiyun
sock_hold(struct sock * sk)738*4882a593Smuzhiyun static __always_inline void sock_hold(struct sock *sk)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun refcount_inc(&sk->sk_refcnt);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /* Ungrab socket in the context, which assumes that socket refcnt
744*4882a593Smuzhiyun cannot hit zero, f.e. it is true in context of any socketcall.
745*4882a593Smuzhiyun */
__sock_put(struct sock * sk)746*4882a593Smuzhiyun static __always_inline void __sock_put(struct sock *sk)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun refcount_dec(&sk->sk_refcnt);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
sk_del_node_init(struct sock * sk)751*4882a593Smuzhiyun static inline bool sk_del_node_init(struct sock *sk)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun bool rc = __sk_del_node_init(sk);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun if (rc) {
756*4882a593Smuzhiyun /* paranoid for a while -acme */
757*4882a593Smuzhiyun WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
758*4882a593Smuzhiyun __sock_put(sk);
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun return rc;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun #define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
763*4882a593Smuzhiyun
__sk_nulls_del_node_init_rcu(struct sock * sk)764*4882a593Smuzhiyun static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun if (sk_hashed(sk)) {
767*4882a593Smuzhiyun hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
768*4882a593Smuzhiyun return true;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun return false;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
sk_nulls_del_node_init_rcu(struct sock * sk)773*4882a593Smuzhiyun static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun bool rc = __sk_nulls_del_node_init_rcu(sk);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun if (rc) {
778*4882a593Smuzhiyun /* paranoid for a while -acme */
779*4882a593Smuzhiyun WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
780*4882a593Smuzhiyun __sock_put(sk);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun return rc;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
__sk_add_node(struct sock * sk,struct hlist_head * list)785*4882a593Smuzhiyun static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun hlist_add_head(&sk->sk_node, list);
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
sk_add_node(struct sock * sk,struct hlist_head * list)790*4882a593Smuzhiyun static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun sock_hold(sk);
793*4882a593Smuzhiyun __sk_add_node(sk, list);
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
sk_add_node_rcu(struct sock * sk,struct hlist_head * list)796*4882a593Smuzhiyun static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun sock_hold(sk);
799*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
800*4882a593Smuzhiyun sk->sk_family == AF_INET6)
801*4882a593Smuzhiyun hlist_add_tail_rcu(&sk->sk_node, list);
802*4882a593Smuzhiyun else
803*4882a593Smuzhiyun hlist_add_head_rcu(&sk->sk_node, list);
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
sk_add_node_tail_rcu(struct sock * sk,struct hlist_head * list)806*4882a593Smuzhiyun static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun sock_hold(sk);
809*4882a593Smuzhiyun hlist_add_tail_rcu(&sk->sk_node, list);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
__sk_nulls_add_node_rcu(struct sock * sk,struct hlist_nulls_head * list)812*4882a593Smuzhiyun static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
__sk_nulls_add_node_tail_rcu(struct sock * sk,struct hlist_nulls_head * list)817*4882a593Smuzhiyun static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
sk_nulls_add_node_rcu(struct sock * sk,struct hlist_nulls_head * list)822*4882a593Smuzhiyun static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun sock_hold(sk);
825*4882a593Smuzhiyun __sk_nulls_add_node_rcu(sk, list);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
__sk_del_bind_node(struct sock * sk)828*4882a593Smuzhiyun static inline void __sk_del_bind_node(struct sock *sk)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun __hlist_del(&sk->sk_bind_node);
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
sk_add_bind_node(struct sock * sk,struct hlist_head * list)833*4882a593Smuzhiyun static inline void sk_add_bind_node(struct sock *sk,
834*4882a593Smuzhiyun struct hlist_head *list)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun hlist_add_head(&sk->sk_bind_node, list);
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun #define sk_for_each(__sk, list) \
840*4882a593Smuzhiyun hlist_for_each_entry(__sk, list, sk_node)
841*4882a593Smuzhiyun #define sk_for_each_rcu(__sk, list) \
842*4882a593Smuzhiyun hlist_for_each_entry_rcu(__sk, list, sk_node)
843*4882a593Smuzhiyun #define sk_nulls_for_each(__sk, node, list) \
844*4882a593Smuzhiyun hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
845*4882a593Smuzhiyun #define sk_nulls_for_each_rcu(__sk, node, list) \
846*4882a593Smuzhiyun hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
847*4882a593Smuzhiyun #define sk_for_each_from(__sk) \
848*4882a593Smuzhiyun hlist_for_each_entry_from(__sk, sk_node)
849*4882a593Smuzhiyun #define sk_nulls_for_each_from(__sk, node) \
850*4882a593Smuzhiyun if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
851*4882a593Smuzhiyun hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
852*4882a593Smuzhiyun #define sk_for_each_safe(__sk, tmp, list) \
853*4882a593Smuzhiyun hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
854*4882a593Smuzhiyun #define sk_for_each_bound(__sk, list) \
855*4882a593Smuzhiyun hlist_for_each_entry(__sk, list, sk_bind_node)
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /**
858*4882a593Smuzhiyun * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
859*4882a593Smuzhiyun * @tpos: the type * to use as a loop cursor.
860*4882a593Smuzhiyun * @pos: the &struct hlist_node to use as a loop cursor.
861*4882a593Smuzhiyun * @head: the head for your list.
862*4882a593Smuzhiyun * @offset: offset of hlist_node within the struct.
863*4882a593Smuzhiyun *
864*4882a593Smuzhiyun */
865*4882a593Smuzhiyun #define sk_for_each_entry_offset_rcu(tpos, pos, head, offset) \
866*4882a593Smuzhiyun for (pos = rcu_dereference(hlist_first_rcu(head)); \
867*4882a593Smuzhiyun pos != NULL && \
868*4882a593Smuzhiyun ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \
869*4882a593Smuzhiyun pos = rcu_dereference(hlist_next_rcu(pos)))
870*4882a593Smuzhiyun
sk_user_ns(struct sock * sk)871*4882a593Smuzhiyun static inline struct user_namespace *sk_user_ns(struct sock *sk)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun /* Careful only use this in a context where these parameters
874*4882a593Smuzhiyun * can not change and must all be valid, such as recvmsg from
875*4882a593Smuzhiyun * userspace.
876*4882a593Smuzhiyun */
877*4882a593Smuzhiyun return sk->sk_socket->file->f_cred->user_ns;
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /* Sock flags */
881*4882a593Smuzhiyun enum sock_flags {
882*4882a593Smuzhiyun SOCK_DEAD,
883*4882a593Smuzhiyun SOCK_DONE,
884*4882a593Smuzhiyun SOCK_URGINLINE,
885*4882a593Smuzhiyun SOCK_KEEPOPEN,
886*4882a593Smuzhiyun SOCK_LINGER,
887*4882a593Smuzhiyun SOCK_DESTROY,
888*4882a593Smuzhiyun SOCK_BROADCAST,
889*4882a593Smuzhiyun SOCK_TIMESTAMP,
890*4882a593Smuzhiyun SOCK_ZAPPED,
891*4882a593Smuzhiyun SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
892*4882a593Smuzhiyun SOCK_DBG, /* %SO_DEBUG setting */
893*4882a593Smuzhiyun SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
894*4882a593Smuzhiyun SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
895*4882a593Smuzhiyun SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
896*4882a593Smuzhiyun SOCK_MEMALLOC, /* VM depends on this socket for swapping */
897*4882a593Smuzhiyun SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
898*4882a593Smuzhiyun SOCK_FASYNC, /* fasync() active */
899*4882a593Smuzhiyun SOCK_RXQ_OVFL,
900*4882a593Smuzhiyun SOCK_ZEROCOPY, /* buffers from userspace */
901*4882a593Smuzhiyun SOCK_WIFI_STATUS, /* push wifi status to userspace */
902*4882a593Smuzhiyun SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
903*4882a593Smuzhiyun * Will use last 4 bytes of packet sent from
904*4882a593Smuzhiyun * user-space instead.
905*4882a593Smuzhiyun */
906*4882a593Smuzhiyun SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
907*4882a593Smuzhiyun SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
908*4882a593Smuzhiyun SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
909*4882a593Smuzhiyun SOCK_TXTIME,
910*4882a593Smuzhiyun SOCK_XDP, /* XDP is attached */
911*4882a593Smuzhiyun SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
912*4882a593Smuzhiyun };
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
915*4882a593Smuzhiyun
sock_copy_flags(struct sock * nsk,struct sock * osk)916*4882a593Smuzhiyun static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun nsk->sk_flags = osk->sk_flags;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
sock_set_flag(struct sock * sk,enum sock_flags flag)921*4882a593Smuzhiyun static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun __set_bit(flag, &sk->sk_flags);
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
sock_reset_flag(struct sock * sk,enum sock_flags flag)926*4882a593Smuzhiyun static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun __clear_bit(flag, &sk->sk_flags);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
sock_valbool_flag(struct sock * sk,enum sock_flags bit,int valbool)931*4882a593Smuzhiyun static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
932*4882a593Smuzhiyun int valbool)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun if (valbool)
935*4882a593Smuzhiyun sock_set_flag(sk, bit);
936*4882a593Smuzhiyun else
937*4882a593Smuzhiyun sock_reset_flag(sk, bit);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
sock_flag(const struct sock * sk,enum sock_flags flag)940*4882a593Smuzhiyun static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun return test_bit(flag, &sk->sk_flags);
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun #ifdef CONFIG_NET
946*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(memalloc_socks_key);
sk_memalloc_socks(void)947*4882a593Smuzhiyun static inline int sk_memalloc_socks(void)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun return static_branch_unlikely(&memalloc_socks_key);
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun void __receive_sock(struct file *file);
953*4882a593Smuzhiyun #else
954*4882a593Smuzhiyun
sk_memalloc_socks(void)955*4882a593Smuzhiyun static inline int sk_memalloc_socks(void)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun return 0;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun
__receive_sock(struct file * file)960*4882a593Smuzhiyun static inline void __receive_sock(struct file *file)
961*4882a593Smuzhiyun { }
962*4882a593Smuzhiyun #endif
963*4882a593Smuzhiyun
sk_gfp_mask(const struct sock * sk,gfp_t gfp_mask)964*4882a593Smuzhiyun static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
sk_acceptq_removed(struct sock * sk)969*4882a593Smuzhiyun static inline void sk_acceptq_removed(struct sock *sk)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
sk_acceptq_added(struct sock * sk)974*4882a593Smuzhiyun static inline void sk_acceptq_added(struct sock *sk)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
sk_acceptq_is_full(const struct sock * sk)979*4882a593Smuzhiyun static inline bool sk_acceptq_is_full(const struct sock *sk)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun /*
985*4882a593Smuzhiyun * Compute minimal free write space needed to queue new packets.
986*4882a593Smuzhiyun */
sk_stream_min_wspace(const struct sock * sk)987*4882a593Smuzhiyun static inline int sk_stream_min_wspace(const struct sock *sk)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun return READ_ONCE(sk->sk_wmem_queued) >> 1;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun
sk_stream_wspace(const struct sock * sk)992*4882a593Smuzhiyun static inline int sk_stream_wspace(const struct sock *sk)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
sk_wmem_queued_add(struct sock * sk,int val)997*4882a593Smuzhiyun static inline void sk_wmem_queued_add(struct sock *sk, int val)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun void sk_stream_write_space(struct sock *sk);
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun /* OOB backlog add */
__sk_add_backlog(struct sock * sk,struct sk_buff * skb)1005*4882a593Smuzhiyun static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun /* dont let skb dst not refcounted, we are going to leave rcu lock */
1008*4882a593Smuzhiyun skb_dst_force(skb);
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun if (!sk->sk_backlog.tail)
1011*4882a593Smuzhiyun WRITE_ONCE(sk->sk_backlog.head, skb);
1012*4882a593Smuzhiyun else
1013*4882a593Smuzhiyun sk->sk_backlog.tail->next = skb;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun WRITE_ONCE(sk->sk_backlog.tail, skb);
1016*4882a593Smuzhiyun skb->next = NULL;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun /*
1020*4882a593Smuzhiyun * Take into account size of receive queue and backlog queue
1021*4882a593Smuzhiyun * Do not take into account this skb truesize,
1022*4882a593Smuzhiyun * to allow even a single big packet to come.
1023*4882a593Smuzhiyun */
sk_rcvqueues_full(const struct sock * sk,unsigned int limit)1024*4882a593Smuzhiyun static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun return qsize > limit;
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun /* The per-socket spinlock must be held here. */
sk_add_backlog(struct sock * sk,struct sk_buff * skb,unsigned int limit)1032*4882a593Smuzhiyun static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
1033*4882a593Smuzhiyun unsigned int limit)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun if (sk_rcvqueues_full(sk, limit))
1036*4882a593Smuzhiyun return -ENOBUFS;
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun /*
1039*4882a593Smuzhiyun * If the skb was allocated from pfmemalloc reserves, only
1040*4882a593Smuzhiyun * allow SOCK_MEMALLOC sockets to use it as this socket is
1041*4882a593Smuzhiyun * helping free memory
1042*4882a593Smuzhiyun */
1043*4882a593Smuzhiyun if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
1044*4882a593Smuzhiyun return -ENOMEM;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun __sk_add_backlog(sk, skb);
1047*4882a593Smuzhiyun sk->sk_backlog.len += skb->truesize;
1048*4882a593Smuzhiyun return 0;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
1052*4882a593Smuzhiyun
sk_backlog_rcv(struct sock * sk,struct sk_buff * skb)1053*4882a593Smuzhiyun static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun if (sk_memalloc_socks() && skb_pfmemalloc(skb))
1056*4882a593Smuzhiyun return __sk_backlog_rcv(sk, skb);
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun return sk->sk_backlog_rcv(sk, skb);
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun
sk_incoming_cpu_update(struct sock * sk)1061*4882a593Smuzhiyun static inline void sk_incoming_cpu_update(struct sock *sk)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun int cpu = raw_smp_processor_id();
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
1066*4882a593Smuzhiyun WRITE_ONCE(sk->sk_incoming_cpu, cpu);
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
sock_rps_record_flow_hash(__u32 hash)1069*4882a593Smuzhiyun static inline void sock_rps_record_flow_hash(__u32 hash)
1070*4882a593Smuzhiyun {
1071*4882a593Smuzhiyun #ifdef CONFIG_RPS
1072*4882a593Smuzhiyun struct rps_sock_flow_table *sock_flow_table;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun rcu_read_lock();
1075*4882a593Smuzhiyun sock_flow_table = rcu_dereference(rps_sock_flow_table);
1076*4882a593Smuzhiyun rps_record_sock_flow(sock_flow_table, hash);
1077*4882a593Smuzhiyun rcu_read_unlock();
1078*4882a593Smuzhiyun #endif
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun
sock_rps_record_flow(const struct sock * sk)1081*4882a593Smuzhiyun static inline void sock_rps_record_flow(const struct sock *sk)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun #ifdef CONFIG_RPS
1084*4882a593Smuzhiyun if (static_branch_unlikely(&rfs_needed)) {
1085*4882a593Smuzhiyun /* Reading sk->sk_rxhash might incur an expensive cache line
1086*4882a593Smuzhiyun * miss.
1087*4882a593Smuzhiyun *
1088*4882a593Smuzhiyun * TCP_ESTABLISHED does cover almost all states where RFS
1089*4882a593Smuzhiyun * might be useful, and is cheaper [1] than testing :
1090*4882a593Smuzhiyun * IPv4: inet_sk(sk)->inet_daddr
1091*4882a593Smuzhiyun * IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
1092*4882a593Smuzhiyun * OR an additional socket flag
1093*4882a593Smuzhiyun * [1] : sk_state and sk_prot are in the same cache line.
1094*4882a593Smuzhiyun */
1095*4882a593Smuzhiyun if (sk->sk_state == TCP_ESTABLISHED)
1096*4882a593Smuzhiyun sock_rps_record_flow_hash(sk->sk_rxhash);
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun #endif
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
sock_rps_save_rxhash(struct sock * sk,const struct sk_buff * skb)1101*4882a593Smuzhiyun static inline void sock_rps_save_rxhash(struct sock *sk,
1102*4882a593Smuzhiyun const struct sk_buff *skb)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun #ifdef CONFIG_RPS
1105*4882a593Smuzhiyun if (unlikely(sk->sk_rxhash != skb->hash))
1106*4882a593Smuzhiyun sk->sk_rxhash = skb->hash;
1107*4882a593Smuzhiyun #endif
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
sock_rps_reset_rxhash(struct sock * sk)1110*4882a593Smuzhiyun static inline void sock_rps_reset_rxhash(struct sock *sk)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun #ifdef CONFIG_RPS
1113*4882a593Smuzhiyun sk->sk_rxhash = 0;
1114*4882a593Smuzhiyun #endif
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun #define sk_wait_event(__sk, __timeo, __condition, __wait) \
1118*4882a593Smuzhiyun ({ int __rc; \
1119*4882a593Smuzhiyun release_sock(__sk); \
1120*4882a593Smuzhiyun __rc = __condition; \
1121*4882a593Smuzhiyun if (!__rc) { \
1122*4882a593Smuzhiyun *(__timeo) = wait_woken(__wait, \
1123*4882a593Smuzhiyun TASK_INTERRUPTIBLE, \
1124*4882a593Smuzhiyun *(__timeo)); \
1125*4882a593Smuzhiyun } \
1126*4882a593Smuzhiyun sched_annotate_sleep(); \
1127*4882a593Smuzhiyun lock_sock(__sk); \
1128*4882a593Smuzhiyun __rc = __condition; \
1129*4882a593Smuzhiyun __rc; \
1130*4882a593Smuzhiyun })
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
1133*4882a593Smuzhiyun int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
1134*4882a593Smuzhiyun void sk_stream_wait_close(struct sock *sk, long timeo_p);
1135*4882a593Smuzhiyun int sk_stream_error(struct sock *sk, int flags, int err);
1136*4882a593Smuzhiyun void sk_stream_kill_queues(struct sock *sk);
1137*4882a593Smuzhiyun void sk_set_memalloc(struct sock *sk);
1138*4882a593Smuzhiyun void sk_clear_memalloc(struct sock *sk);
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun void __sk_flush_backlog(struct sock *sk);
1141*4882a593Smuzhiyun
sk_flush_backlog(struct sock * sk)1142*4882a593Smuzhiyun static inline bool sk_flush_backlog(struct sock *sk)
1143*4882a593Smuzhiyun {
1144*4882a593Smuzhiyun if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
1145*4882a593Smuzhiyun __sk_flush_backlog(sk);
1146*4882a593Smuzhiyun return true;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun return false;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun struct request_sock_ops;
1154*4882a593Smuzhiyun struct timewait_sock_ops;
1155*4882a593Smuzhiyun struct inet_hashinfo;
1156*4882a593Smuzhiyun struct raw_hashinfo;
1157*4882a593Smuzhiyun struct smc_hashinfo;
1158*4882a593Smuzhiyun struct module;
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun /*
1161*4882a593Smuzhiyun * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
1162*4882a593Smuzhiyun * un-modified. Special care is taken when initializing object to zero.
1163*4882a593Smuzhiyun */
sk_prot_clear_nulls(struct sock * sk,int size)1164*4882a593Smuzhiyun static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1165*4882a593Smuzhiyun {
1166*4882a593Smuzhiyun if (offsetof(struct sock, sk_node.next) != 0)
1167*4882a593Smuzhiyun memset(sk, 0, offsetof(struct sock, sk_node.next));
1168*4882a593Smuzhiyun memset(&sk->sk_node.pprev, 0,
1169*4882a593Smuzhiyun size - offsetof(struct sock, sk_node.pprev));
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun /* Networking protocol blocks we attach to sockets.
1173*4882a593Smuzhiyun * socket layer -> transport layer interface
1174*4882a593Smuzhiyun */
1175*4882a593Smuzhiyun struct proto {
1176*4882a593Smuzhiyun void (*close)(struct sock *sk,
1177*4882a593Smuzhiyun long timeout);
1178*4882a593Smuzhiyun int (*pre_connect)(struct sock *sk,
1179*4882a593Smuzhiyun struct sockaddr *uaddr,
1180*4882a593Smuzhiyun int addr_len);
1181*4882a593Smuzhiyun int (*connect)(struct sock *sk,
1182*4882a593Smuzhiyun struct sockaddr *uaddr,
1183*4882a593Smuzhiyun int addr_len);
1184*4882a593Smuzhiyun int (*disconnect)(struct sock *sk, int flags);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun struct sock * (*accept)(struct sock *sk, int flags, int *err,
1187*4882a593Smuzhiyun bool kern);
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun int (*ioctl)(struct sock *sk, int cmd,
1190*4882a593Smuzhiyun unsigned long arg);
1191*4882a593Smuzhiyun int (*init)(struct sock *sk);
1192*4882a593Smuzhiyun void (*destroy)(struct sock *sk);
1193*4882a593Smuzhiyun void (*shutdown)(struct sock *sk, int how);
1194*4882a593Smuzhiyun int (*setsockopt)(struct sock *sk, int level,
1195*4882a593Smuzhiyun int optname, sockptr_t optval,
1196*4882a593Smuzhiyun unsigned int optlen);
1197*4882a593Smuzhiyun int (*getsockopt)(struct sock *sk, int level,
1198*4882a593Smuzhiyun int optname, char __user *optval,
1199*4882a593Smuzhiyun int __user *option);
1200*4882a593Smuzhiyun void (*keepalive)(struct sock *sk, int valbool);
1201*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
1202*4882a593Smuzhiyun int (*compat_ioctl)(struct sock *sk,
1203*4882a593Smuzhiyun unsigned int cmd, unsigned long arg);
1204*4882a593Smuzhiyun #endif
1205*4882a593Smuzhiyun int (*sendmsg)(struct sock *sk, struct msghdr *msg,
1206*4882a593Smuzhiyun size_t len);
1207*4882a593Smuzhiyun int (*recvmsg)(struct sock *sk, struct msghdr *msg,
1208*4882a593Smuzhiyun size_t len, int noblock, int flags,
1209*4882a593Smuzhiyun int *addr_len);
1210*4882a593Smuzhiyun int (*sendpage)(struct sock *sk, struct page *page,
1211*4882a593Smuzhiyun int offset, size_t size, int flags);
1212*4882a593Smuzhiyun int (*bind)(struct sock *sk,
1213*4882a593Smuzhiyun struct sockaddr *addr, int addr_len);
1214*4882a593Smuzhiyun int (*bind_add)(struct sock *sk,
1215*4882a593Smuzhiyun struct sockaddr *addr, int addr_len);
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun int (*backlog_rcv) (struct sock *sk,
1218*4882a593Smuzhiyun struct sk_buff *skb);
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun void (*release_cb)(struct sock *sk);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun /* Keeping track of sk's, looking them up, and port selection methods. */
1223*4882a593Smuzhiyun int (*hash)(struct sock *sk);
1224*4882a593Smuzhiyun void (*unhash)(struct sock *sk);
1225*4882a593Smuzhiyun void (*rehash)(struct sock *sk);
1226*4882a593Smuzhiyun int (*get_port)(struct sock *sk, unsigned short snum);
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun /* Keeping track of sockets in use */
1229*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
1230*4882a593Smuzhiyun unsigned int inuse_idx;
1231*4882a593Smuzhiyun #endif
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun bool (*stream_memory_free)(const struct sock *sk, int wake);
1234*4882a593Smuzhiyun bool (*stream_memory_read)(const struct sock *sk);
1235*4882a593Smuzhiyun /* Memory pressure */
1236*4882a593Smuzhiyun void (*enter_memory_pressure)(struct sock *sk);
1237*4882a593Smuzhiyun void (*leave_memory_pressure)(struct sock *sk);
1238*4882a593Smuzhiyun atomic_long_t *memory_allocated; /* Current allocated memory. */
1239*4882a593Smuzhiyun struct percpu_counter *sockets_allocated; /* Current number of sockets. */
1240*4882a593Smuzhiyun /*
1241*4882a593Smuzhiyun * Pressure flag: try to collapse.
1242*4882a593Smuzhiyun * Technical note: it is used by multiple contexts non atomically.
1243*4882a593Smuzhiyun * All the __sk_mem_schedule() is of this nature: accounting
1244*4882a593Smuzhiyun * is strict, actions are advisory and have some latency.
1245*4882a593Smuzhiyun */
1246*4882a593Smuzhiyun unsigned long *memory_pressure;
1247*4882a593Smuzhiyun long *sysctl_mem;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun int *sysctl_wmem;
1250*4882a593Smuzhiyun int *sysctl_rmem;
1251*4882a593Smuzhiyun u32 sysctl_wmem_offset;
1252*4882a593Smuzhiyun u32 sysctl_rmem_offset;
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun int max_header;
1255*4882a593Smuzhiyun bool no_autobind;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun struct kmem_cache *slab;
1258*4882a593Smuzhiyun unsigned int obj_size;
1259*4882a593Smuzhiyun slab_flags_t slab_flags;
1260*4882a593Smuzhiyun unsigned int useroffset; /* Usercopy region offset */
1261*4882a593Smuzhiyun unsigned int usersize; /* Usercopy region size */
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun struct percpu_counter *orphan_count;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun struct request_sock_ops *rsk_prot;
1266*4882a593Smuzhiyun struct timewait_sock_ops *twsk_prot;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun union {
1269*4882a593Smuzhiyun struct inet_hashinfo *hashinfo;
1270*4882a593Smuzhiyun struct udp_table *udp_table;
1271*4882a593Smuzhiyun struct raw_hashinfo *raw_hash;
1272*4882a593Smuzhiyun struct smc_hashinfo *smc_hash;
1273*4882a593Smuzhiyun } h;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun struct module *owner;
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun char name[32];
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun struct list_head node;
1280*4882a593Smuzhiyun #ifdef SOCK_REFCNT_DEBUG
1281*4882a593Smuzhiyun atomic_t socks;
1282*4882a593Smuzhiyun #endif
1283*4882a593Smuzhiyun int (*diag_destroy)(struct sock *sk, int err);
1284*4882a593Smuzhiyun } __randomize_layout;
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun int proto_register(struct proto *prot, int alloc_slab);
1287*4882a593Smuzhiyun void proto_unregister(struct proto *prot);
1288*4882a593Smuzhiyun int sock_load_diag_module(int family, int protocol);
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun #ifdef SOCK_REFCNT_DEBUG
sk_refcnt_debug_inc(struct sock * sk)1291*4882a593Smuzhiyun static inline void sk_refcnt_debug_inc(struct sock *sk)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun atomic_inc(&sk->sk_prot->socks);
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun
sk_refcnt_debug_dec(struct sock * sk)1296*4882a593Smuzhiyun static inline void sk_refcnt_debug_dec(struct sock *sk)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun atomic_dec(&sk->sk_prot->socks);
1299*4882a593Smuzhiyun printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
1300*4882a593Smuzhiyun sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun
sk_refcnt_debug_release(const struct sock * sk)1303*4882a593Smuzhiyun static inline void sk_refcnt_debug_release(const struct sock *sk)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun if (refcount_read(&sk->sk_refcnt) != 1)
1306*4882a593Smuzhiyun printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
1307*4882a593Smuzhiyun sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
1308*4882a593Smuzhiyun }
1309*4882a593Smuzhiyun #else /* SOCK_REFCNT_DEBUG */
1310*4882a593Smuzhiyun #define sk_refcnt_debug_inc(sk) do { } while (0)
1311*4882a593Smuzhiyun #define sk_refcnt_debug_dec(sk) do { } while (0)
1312*4882a593Smuzhiyun #define sk_refcnt_debug_release(sk) do { } while (0)
1313*4882a593Smuzhiyun #endif /* SOCK_REFCNT_DEBUG */
1314*4882a593Smuzhiyun
__sk_stream_memory_free(const struct sock * sk,int wake)1315*4882a593Smuzhiyun static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
1316*4882a593Smuzhiyun {
1317*4882a593Smuzhiyun if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
1318*4882a593Smuzhiyun return false;
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun return sk->sk_prot->stream_memory_free ?
1321*4882a593Smuzhiyun sk->sk_prot->stream_memory_free(sk, wake) : true;
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun
sk_stream_memory_free(const struct sock * sk)1324*4882a593Smuzhiyun static inline bool sk_stream_memory_free(const struct sock *sk)
1325*4882a593Smuzhiyun {
1326*4882a593Smuzhiyun return __sk_stream_memory_free(sk, 0);
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun
__sk_stream_is_writeable(const struct sock * sk,int wake)1329*4882a593Smuzhiyun static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
1330*4882a593Smuzhiyun {
1331*4882a593Smuzhiyun return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
1332*4882a593Smuzhiyun __sk_stream_memory_free(sk, wake);
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun
sk_stream_is_writeable(const struct sock * sk)1335*4882a593Smuzhiyun static inline bool sk_stream_is_writeable(const struct sock *sk)
1336*4882a593Smuzhiyun {
1337*4882a593Smuzhiyun return __sk_stream_is_writeable(sk, 0);
1338*4882a593Smuzhiyun }
1339*4882a593Smuzhiyun
sk_under_cgroup_hierarchy(struct sock * sk,struct cgroup * ancestor)1340*4882a593Smuzhiyun static inline int sk_under_cgroup_hierarchy(struct sock *sk,
1341*4882a593Smuzhiyun struct cgroup *ancestor)
1342*4882a593Smuzhiyun {
1343*4882a593Smuzhiyun #ifdef CONFIG_SOCK_CGROUP_DATA
1344*4882a593Smuzhiyun return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
1345*4882a593Smuzhiyun ancestor);
1346*4882a593Smuzhiyun #else
1347*4882a593Smuzhiyun return -ENOTSUPP;
1348*4882a593Smuzhiyun #endif
1349*4882a593Smuzhiyun }
1350*4882a593Smuzhiyun
sk_has_memory_pressure(const struct sock * sk)1351*4882a593Smuzhiyun static inline bool sk_has_memory_pressure(const struct sock *sk)
1352*4882a593Smuzhiyun {
1353*4882a593Smuzhiyun return sk->sk_prot->memory_pressure != NULL;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun
sk_under_memory_pressure(const struct sock * sk)1356*4882a593Smuzhiyun static inline bool sk_under_memory_pressure(const struct sock *sk)
1357*4882a593Smuzhiyun {
1358*4882a593Smuzhiyun if (!sk->sk_prot->memory_pressure)
1359*4882a593Smuzhiyun return false;
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
1362*4882a593Smuzhiyun mem_cgroup_under_socket_pressure(sk->sk_memcg))
1363*4882a593Smuzhiyun return true;
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun return !!*sk->sk_prot->memory_pressure;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun static inline long
sk_memory_allocated(const struct sock * sk)1369*4882a593Smuzhiyun sk_memory_allocated(const struct sock *sk)
1370*4882a593Smuzhiyun {
1371*4882a593Smuzhiyun return atomic_long_read(sk->sk_prot->memory_allocated);
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun static inline long
sk_memory_allocated_add(struct sock * sk,int amt)1375*4882a593Smuzhiyun sk_memory_allocated_add(struct sock *sk, int amt)
1376*4882a593Smuzhiyun {
1377*4882a593Smuzhiyun return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun static inline void
sk_memory_allocated_sub(struct sock * sk,int amt)1381*4882a593Smuzhiyun sk_memory_allocated_sub(struct sock *sk, int amt)
1382*4882a593Smuzhiyun {
1383*4882a593Smuzhiyun atomic_long_sub(amt, sk->sk_prot->memory_allocated);
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun
sk_sockets_allocated_dec(struct sock * sk)1386*4882a593Smuzhiyun static inline void sk_sockets_allocated_dec(struct sock *sk)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun percpu_counter_dec(sk->sk_prot->sockets_allocated);
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun
sk_sockets_allocated_inc(struct sock * sk)1391*4882a593Smuzhiyun static inline void sk_sockets_allocated_inc(struct sock *sk)
1392*4882a593Smuzhiyun {
1393*4882a593Smuzhiyun percpu_counter_inc(sk->sk_prot->sockets_allocated);
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun static inline u64
sk_sockets_allocated_read_positive(struct sock * sk)1397*4882a593Smuzhiyun sk_sockets_allocated_read_positive(struct sock *sk)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun static inline int
proto_sockets_allocated_sum_positive(struct proto * prot)1403*4882a593Smuzhiyun proto_sockets_allocated_sum_positive(struct proto *prot)
1404*4882a593Smuzhiyun {
1405*4882a593Smuzhiyun return percpu_counter_sum_positive(prot->sockets_allocated);
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun static inline long
proto_memory_allocated(struct proto * prot)1409*4882a593Smuzhiyun proto_memory_allocated(struct proto *prot)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun return atomic_long_read(prot->memory_allocated);
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun static inline bool
proto_memory_pressure(struct proto * prot)1415*4882a593Smuzhiyun proto_memory_pressure(struct proto *prot)
1416*4882a593Smuzhiyun {
1417*4882a593Smuzhiyun if (!prot->memory_pressure)
1418*4882a593Smuzhiyun return false;
1419*4882a593Smuzhiyun return !!*prot->memory_pressure;
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
1424*4882a593Smuzhiyun /* Called with local bh disabled */
1425*4882a593Smuzhiyun void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
1426*4882a593Smuzhiyun int sock_prot_inuse_get(struct net *net, struct proto *proto);
1427*4882a593Smuzhiyun int sock_inuse_get(struct net *net);
1428*4882a593Smuzhiyun #else
sock_prot_inuse_add(struct net * net,struct proto * prot,int inc)1429*4882a593Smuzhiyun static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
1430*4882a593Smuzhiyun int inc)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun #endif
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun /* With per-bucket locks this operation is not-atomic, so that
1437*4882a593Smuzhiyun * this version is not worse.
1438*4882a593Smuzhiyun */
__sk_prot_rehash(struct sock * sk)1439*4882a593Smuzhiyun static inline int __sk_prot_rehash(struct sock *sk)
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun sk->sk_prot->unhash(sk);
1442*4882a593Smuzhiyun return sk->sk_prot->hash(sk);
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun /* About 10 seconds */
1446*4882a593Smuzhiyun #define SOCK_DESTROY_TIME (10*HZ)
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun /* Sockets 0-1023 can't be bound to unless you are superuser */
1449*4882a593Smuzhiyun #define PROT_SOCK 1024
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun #define SHUTDOWN_MASK 3
1452*4882a593Smuzhiyun #define RCV_SHUTDOWN 1
1453*4882a593Smuzhiyun #define SEND_SHUTDOWN 2
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun #define SOCK_SNDBUF_LOCK 1
1456*4882a593Smuzhiyun #define SOCK_RCVBUF_LOCK 2
1457*4882a593Smuzhiyun #define SOCK_BINDADDR_LOCK 4
1458*4882a593Smuzhiyun #define SOCK_BINDPORT_LOCK 8
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun struct socket_alloc {
1461*4882a593Smuzhiyun struct socket socket;
1462*4882a593Smuzhiyun struct inode vfs_inode;
1463*4882a593Smuzhiyun };
1464*4882a593Smuzhiyun
SOCKET_I(struct inode * inode)1465*4882a593Smuzhiyun static inline struct socket *SOCKET_I(struct inode *inode)
1466*4882a593Smuzhiyun {
1467*4882a593Smuzhiyun return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun
SOCK_INODE(struct socket * socket)1470*4882a593Smuzhiyun static inline struct inode *SOCK_INODE(struct socket *socket)
1471*4882a593Smuzhiyun {
1472*4882a593Smuzhiyun return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun /*
1476*4882a593Smuzhiyun * Functions for memory accounting
1477*4882a593Smuzhiyun */
1478*4882a593Smuzhiyun int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
1479*4882a593Smuzhiyun int __sk_mem_schedule(struct sock *sk, int size, int kind);
1480*4882a593Smuzhiyun void __sk_mem_reduce_allocated(struct sock *sk, int amount);
1481*4882a593Smuzhiyun void __sk_mem_reclaim(struct sock *sk, int amount);
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun /* We used to have PAGE_SIZE here, but systems with 64KB pages
1484*4882a593Smuzhiyun * do not necessarily have 16x time more memory than 4KB ones.
1485*4882a593Smuzhiyun */
1486*4882a593Smuzhiyun #define SK_MEM_QUANTUM 4096
1487*4882a593Smuzhiyun #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
1488*4882a593Smuzhiyun #define SK_MEM_SEND 0
1489*4882a593Smuzhiyun #define SK_MEM_RECV 1
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */
sk_prot_mem_limits(const struct sock * sk,int index)1492*4882a593Smuzhiyun static inline long sk_prot_mem_limits(const struct sock *sk, int index)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun long val = READ_ONCE(sk->sk_prot->sysctl_mem[index]);
1495*4882a593Smuzhiyun
1496*4882a593Smuzhiyun #if PAGE_SIZE > SK_MEM_QUANTUM
1497*4882a593Smuzhiyun val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT;
1498*4882a593Smuzhiyun #elif PAGE_SIZE < SK_MEM_QUANTUM
1499*4882a593Smuzhiyun val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT;
1500*4882a593Smuzhiyun #endif
1501*4882a593Smuzhiyun return val;
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
sk_mem_pages(int amt)1504*4882a593Smuzhiyun static inline int sk_mem_pages(int amt)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun
sk_has_account(struct sock * sk)1509*4882a593Smuzhiyun static inline bool sk_has_account(struct sock *sk)
1510*4882a593Smuzhiyun {
1511*4882a593Smuzhiyun /* return true if protocol supports memory accounting */
1512*4882a593Smuzhiyun return !!sk->sk_prot->memory_allocated;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun
sk_wmem_schedule(struct sock * sk,int size)1515*4882a593Smuzhiyun static inline bool sk_wmem_schedule(struct sock *sk, int size)
1516*4882a593Smuzhiyun {
1517*4882a593Smuzhiyun int delta;
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun if (!sk_has_account(sk))
1520*4882a593Smuzhiyun return true;
1521*4882a593Smuzhiyun delta = size - sk->sk_forward_alloc;
1522*4882a593Smuzhiyun return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND);
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun
1525*4882a593Smuzhiyun static inline bool
sk_rmem_schedule(struct sock * sk,struct sk_buff * skb,int size)1526*4882a593Smuzhiyun sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
1527*4882a593Smuzhiyun {
1528*4882a593Smuzhiyun int delta;
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun if (!sk_has_account(sk))
1531*4882a593Smuzhiyun return true;
1532*4882a593Smuzhiyun delta = size - sk->sk_forward_alloc;
1533*4882a593Smuzhiyun return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) ||
1534*4882a593Smuzhiyun skb_pfmemalloc(skb);
1535*4882a593Smuzhiyun }
1536*4882a593Smuzhiyun
sk_mem_reclaim(struct sock * sk)1537*4882a593Smuzhiyun static inline void sk_mem_reclaim(struct sock *sk)
1538*4882a593Smuzhiyun {
1539*4882a593Smuzhiyun if (!sk_has_account(sk))
1540*4882a593Smuzhiyun return;
1541*4882a593Smuzhiyun if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
1542*4882a593Smuzhiyun __sk_mem_reclaim(sk, sk->sk_forward_alloc);
1543*4882a593Smuzhiyun }
1544*4882a593Smuzhiyun
sk_mem_reclaim_partial(struct sock * sk)1545*4882a593Smuzhiyun static inline void sk_mem_reclaim_partial(struct sock *sk)
1546*4882a593Smuzhiyun {
1547*4882a593Smuzhiyun if (!sk_has_account(sk))
1548*4882a593Smuzhiyun return;
1549*4882a593Smuzhiyun if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
1550*4882a593Smuzhiyun __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun
sk_mem_charge(struct sock * sk,int size)1553*4882a593Smuzhiyun static inline void sk_mem_charge(struct sock *sk, int size)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun if (!sk_has_account(sk))
1556*4882a593Smuzhiyun return;
1557*4882a593Smuzhiyun sk->sk_forward_alloc -= size;
1558*4882a593Smuzhiyun }
1559*4882a593Smuzhiyun
sk_mem_uncharge(struct sock * sk,int size)1560*4882a593Smuzhiyun static inline void sk_mem_uncharge(struct sock *sk, int size)
1561*4882a593Smuzhiyun {
1562*4882a593Smuzhiyun if (!sk_has_account(sk))
1563*4882a593Smuzhiyun return;
1564*4882a593Smuzhiyun sk->sk_forward_alloc += size;
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun /* Avoid a possible overflow.
1567*4882a593Smuzhiyun * TCP send queues can make this happen, if sk_mem_reclaim()
1568*4882a593Smuzhiyun * is not called and more than 2 GBytes are released at once.
1569*4882a593Smuzhiyun *
1570*4882a593Smuzhiyun * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
1571*4882a593Smuzhiyun * no need to hold that much forward allocation anyway.
1572*4882a593Smuzhiyun */
1573*4882a593Smuzhiyun if (unlikely(sk->sk_forward_alloc >= 1 << 21))
1574*4882a593Smuzhiyun __sk_mem_reclaim(sk, 1 << 20);
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
sk_wmem_free_skb(struct sock * sk,struct sk_buff * skb)1578*4882a593Smuzhiyun static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1579*4882a593Smuzhiyun {
1580*4882a593Smuzhiyun sk_wmem_queued_add(sk, -skb->truesize);
1581*4882a593Smuzhiyun sk_mem_uncharge(sk, skb->truesize);
1582*4882a593Smuzhiyun if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
1583*4882a593Smuzhiyun !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
1584*4882a593Smuzhiyun skb_ext_reset(skb);
1585*4882a593Smuzhiyun skb_zcopy_clear(skb, true);
1586*4882a593Smuzhiyun sk->sk_tx_skb_cache = skb;
1587*4882a593Smuzhiyun return;
1588*4882a593Smuzhiyun }
1589*4882a593Smuzhiyun __kfree_skb(skb);
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun
sock_release_ownership(struct sock * sk)1592*4882a593Smuzhiyun static inline void sock_release_ownership(struct sock *sk)
1593*4882a593Smuzhiyun {
1594*4882a593Smuzhiyun if (sk->sk_lock.owned) {
1595*4882a593Smuzhiyun sk->sk_lock.owned = 0;
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun /* The sk_lock has mutex_unlock() semantics: */
1598*4882a593Smuzhiyun mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun /*
1603*4882a593Smuzhiyun * Macro so as to not evaluate some arguments when
1604*4882a593Smuzhiyun * lockdep is not enabled.
1605*4882a593Smuzhiyun *
1606*4882a593Smuzhiyun * Mark both the sk_lock and the sk_lock.slock as a
1607*4882a593Smuzhiyun * per-address-family lock class.
1608*4882a593Smuzhiyun */
1609*4882a593Smuzhiyun #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
1610*4882a593Smuzhiyun do { \
1611*4882a593Smuzhiyun sk->sk_lock.owned = 0; \
1612*4882a593Smuzhiyun init_waitqueue_head(&sk->sk_lock.wq); \
1613*4882a593Smuzhiyun spin_lock_init(&(sk)->sk_lock.slock); \
1614*4882a593Smuzhiyun debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1615*4882a593Smuzhiyun sizeof((sk)->sk_lock)); \
1616*4882a593Smuzhiyun lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1617*4882a593Smuzhiyun (skey), (sname)); \
1618*4882a593Smuzhiyun lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1619*4882a593Smuzhiyun } while (0)
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
lockdep_sock_is_held(const struct sock * sk)1622*4882a593Smuzhiyun static inline bool lockdep_sock_is_held(const struct sock *sk)
1623*4882a593Smuzhiyun {
1624*4882a593Smuzhiyun return lockdep_is_held(&sk->sk_lock) ||
1625*4882a593Smuzhiyun lockdep_is_held(&sk->sk_lock.slock);
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun #endif
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun void lock_sock_nested(struct sock *sk, int subclass);
1630*4882a593Smuzhiyun
lock_sock(struct sock * sk)1631*4882a593Smuzhiyun static inline void lock_sock(struct sock *sk)
1632*4882a593Smuzhiyun {
1633*4882a593Smuzhiyun lock_sock_nested(sk, 0);
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun void __release_sock(struct sock *sk);
1637*4882a593Smuzhiyun void release_sock(struct sock *sk);
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun /* BH context may only use the following locking interface. */
1640*4882a593Smuzhiyun #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
1641*4882a593Smuzhiyun #define bh_lock_sock_nested(__sk) \
1642*4882a593Smuzhiyun spin_lock_nested(&((__sk)->sk_lock.slock), \
1643*4882a593Smuzhiyun SINGLE_DEPTH_NESTING)
1644*4882a593Smuzhiyun #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun bool lock_sock_fast(struct sock *sk);
1647*4882a593Smuzhiyun /**
1648*4882a593Smuzhiyun * unlock_sock_fast - complement of lock_sock_fast
1649*4882a593Smuzhiyun * @sk: socket
1650*4882a593Smuzhiyun * @slow: slow mode
1651*4882a593Smuzhiyun *
1652*4882a593Smuzhiyun * fast unlock socket for user context.
1653*4882a593Smuzhiyun * If slow mode is on, we call regular release_sock()
1654*4882a593Smuzhiyun */
unlock_sock_fast(struct sock * sk,bool slow)1655*4882a593Smuzhiyun static inline void unlock_sock_fast(struct sock *sk, bool slow)
1656*4882a593Smuzhiyun {
1657*4882a593Smuzhiyun if (slow)
1658*4882a593Smuzhiyun release_sock(sk);
1659*4882a593Smuzhiyun else
1660*4882a593Smuzhiyun spin_unlock_bh(&sk->sk_lock.slock);
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun /* Used by processes to "lock" a socket state, so that
1664*4882a593Smuzhiyun * interrupts and bottom half handlers won't change it
1665*4882a593Smuzhiyun * from under us. It essentially blocks any incoming
1666*4882a593Smuzhiyun * packets, so that we won't get any new data or any
1667*4882a593Smuzhiyun * packets that change the state of the socket.
1668*4882a593Smuzhiyun *
1669*4882a593Smuzhiyun * While locked, BH processing will add new packets to
1670*4882a593Smuzhiyun * the backlog queue. This queue is processed by the
1671*4882a593Smuzhiyun * owner of the socket lock right before it is released.
1672*4882a593Smuzhiyun *
1673*4882a593Smuzhiyun * Since ~2.3.5 it is also exclusive sleep lock serializing
1674*4882a593Smuzhiyun * accesses from user process context.
1675*4882a593Smuzhiyun */
1676*4882a593Smuzhiyun
sock_owned_by_me(const struct sock * sk)1677*4882a593Smuzhiyun static inline void sock_owned_by_me(const struct sock *sk)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun #ifdef CONFIG_LOCKDEP
1680*4882a593Smuzhiyun WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
1681*4882a593Smuzhiyun #endif
1682*4882a593Smuzhiyun }
1683*4882a593Smuzhiyun
sock_owned_by_user(const struct sock * sk)1684*4882a593Smuzhiyun static inline bool sock_owned_by_user(const struct sock *sk)
1685*4882a593Smuzhiyun {
1686*4882a593Smuzhiyun sock_owned_by_me(sk);
1687*4882a593Smuzhiyun return sk->sk_lock.owned;
1688*4882a593Smuzhiyun }
1689*4882a593Smuzhiyun
sock_owned_by_user_nocheck(const struct sock * sk)1690*4882a593Smuzhiyun static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
1691*4882a593Smuzhiyun {
1692*4882a593Smuzhiyun return sk->sk_lock.owned;
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun /* no reclassification while locks are held */
sock_allow_reclassification(const struct sock * csk)1696*4882a593Smuzhiyun static inline bool sock_allow_reclassification(const struct sock *csk)
1697*4882a593Smuzhiyun {
1698*4882a593Smuzhiyun struct sock *sk = (struct sock *)csk;
1699*4882a593Smuzhiyun
1700*4882a593Smuzhiyun return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1704*4882a593Smuzhiyun struct proto *prot, int kern);
1705*4882a593Smuzhiyun void sk_free(struct sock *sk);
1706*4882a593Smuzhiyun void sk_destruct(struct sock *sk);
1707*4882a593Smuzhiyun struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
1708*4882a593Smuzhiyun void sk_free_unlock_clone(struct sock *sk);
1709*4882a593Smuzhiyun
1710*4882a593Smuzhiyun struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
1711*4882a593Smuzhiyun gfp_t priority);
1712*4882a593Smuzhiyun void __sock_wfree(struct sk_buff *skb);
1713*4882a593Smuzhiyun void sock_wfree(struct sk_buff *skb);
1714*4882a593Smuzhiyun struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
1715*4882a593Smuzhiyun gfp_t priority);
1716*4882a593Smuzhiyun void skb_orphan_partial(struct sk_buff *skb);
1717*4882a593Smuzhiyun void sock_rfree(struct sk_buff *skb);
1718*4882a593Smuzhiyun void sock_efree(struct sk_buff *skb);
1719*4882a593Smuzhiyun #ifdef CONFIG_INET
1720*4882a593Smuzhiyun void sock_edemux(struct sk_buff *skb);
1721*4882a593Smuzhiyun void sock_pfree(struct sk_buff *skb);
1722*4882a593Smuzhiyun #else
1723*4882a593Smuzhiyun #define sock_edemux sock_efree
1724*4882a593Smuzhiyun #endif
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun int sock_setsockopt(struct socket *sock, int level, int op,
1727*4882a593Smuzhiyun sockptr_t optval, unsigned int optlen);
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun int sock_getsockopt(struct socket *sock, int level, int op,
1730*4882a593Smuzhiyun char __user *optval, int __user *optlen);
1731*4882a593Smuzhiyun int sock_gettstamp(struct socket *sock, void __user *userstamp,
1732*4882a593Smuzhiyun bool timeval, bool time32);
1733*4882a593Smuzhiyun struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
1734*4882a593Smuzhiyun int noblock, int *errcode);
1735*4882a593Smuzhiyun struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1736*4882a593Smuzhiyun unsigned long data_len, int noblock,
1737*4882a593Smuzhiyun int *errcode, int max_page_order);
1738*4882a593Smuzhiyun void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
1739*4882a593Smuzhiyun void sock_kfree_s(struct sock *sk, void *mem, int size);
1740*4882a593Smuzhiyun void sock_kzfree_s(struct sock *sk, void *mem, int size);
1741*4882a593Smuzhiyun void sk_send_sigurg(struct sock *sk);
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun struct sockcm_cookie {
1744*4882a593Smuzhiyun u64 transmit_time;
1745*4882a593Smuzhiyun u32 mark;
1746*4882a593Smuzhiyun u16 tsflags;
1747*4882a593Smuzhiyun };
1748*4882a593Smuzhiyun
sockcm_init(struct sockcm_cookie * sockc,const struct sock * sk)1749*4882a593Smuzhiyun static inline void sockcm_init(struct sockcm_cookie *sockc,
1750*4882a593Smuzhiyun const struct sock *sk)
1751*4882a593Smuzhiyun {
1752*4882a593Smuzhiyun *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
1756*4882a593Smuzhiyun struct sockcm_cookie *sockc);
1757*4882a593Smuzhiyun int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
1758*4882a593Smuzhiyun struct sockcm_cookie *sockc);
1759*4882a593Smuzhiyun
1760*4882a593Smuzhiyun /*
1761*4882a593Smuzhiyun * Functions to fill in entries in struct proto_ops when a protocol
1762*4882a593Smuzhiyun * does not implement a particular function.
1763*4882a593Smuzhiyun */
1764*4882a593Smuzhiyun int sock_no_bind(struct socket *, struct sockaddr *, int);
1765*4882a593Smuzhiyun int sock_no_connect(struct socket *, struct sockaddr *, int, int);
1766*4882a593Smuzhiyun int sock_no_socketpair(struct socket *, struct socket *);
1767*4882a593Smuzhiyun int sock_no_accept(struct socket *, struct socket *, int, bool);
1768*4882a593Smuzhiyun int sock_no_getname(struct socket *, struct sockaddr *, int);
1769*4882a593Smuzhiyun int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
1770*4882a593Smuzhiyun int sock_no_listen(struct socket *, int);
1771*4882a593Smuzhiyun int sock_no_shutdown(struct socket *, int);
1772*4882a593Smuzhiyun int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
1773*4882a593Smuzhiyun int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
1774*4882a593Smuzhiyun int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
1775*4882a593Smuzhiyun int sock_no_mmap(struct file *file, struct socket *sock,
1776*4882a593Smuzhiyun struct vm_area_struct *vma);
1777*4882a593Smuzhiyun ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
1778*4882a593Smuzhiyun size_t size, int flags);
1779*4882a593Smuzhiyun ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
1780*4882a593Smuzhiyun int offset, size_t size, int flags);
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun /*
1783*4882a593Smuzhiyun * Functions to fill in entries in struct proto_ops when a protocol
1784*4882a593Smuzhiyun * uses the inet style.
1785*4882a593Smuzhiyun */
1786*4882a593Smuzhiyun int sock_common_getsockopt(struct socket *sock, int level, int optname,
1787*4882a593Smuzhiyun char __user *optval, int __user *optlen);
1788*4882a593Smuzhiyun int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1789*4882a593Smuzhiyun int flags);
1790*4882a593Smuzhiyun int sock_common_setsockopt(struct socket *sock, int level, int optname,
1791*4882a593Smuzhiyun sockptr_t optval, unsigned int optlen);
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun void sk_common_release(struct sock *sk);
1794*4882a593Smuzhiyun
1795*4882a593Smuzhiyun /*
1796*4882a593Smuzhiyun * Default socket callbacks and setup code
1797*4882a593Smuzhiyun */
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun /* Initialise core socket variables */
1800*4882a593Smuzhiyun void sock_init_data(struct socket *sock, struct sock *sk);
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun /*
1803*4882a593Smuzhiyun * Socket reference counting postulates.
1804*4882a593Smuzhiyun *
1805*4882a593Smuzhiyun * * Each user of socket SHOULD hold a reference count.
1806*4882a593Smuzhiyun * * Each access point to socket (an hash table bucket, reference from a list,
1807*4882a593Smuzhiyun * running timer, skb in flight MUST hold a reference count.
1808*4882a593Smuzhiyun * * When reference count hits 0, it means it will never increase back.
1809*4882a593Smuzhiyun * * When reference count hits 0, it means that no references from
1810*4882a593Smuzhiyun * outside exist to this socket and current process on current CPU
1811*4882a593Smuzhiyun * is last user and may/should destroy this socket.
1812*4882a593Smuzhiyun * * sk_free is called from any context: process, BH, IRQ. When
1813*4882a593Smuzhiyun * it is called, socket has no references from outside -> sk_free
1814*4882a593Smuzhiyun * may release descendant resources allocated by the socket, but
1815*4882a593Smuzhiyun * to the time when it is called, socket is NOT referenced by any
1816*4882a593Smuzhiyun * hash tables, lists etc.
1817*4882a593Smuzhiyun * * Packets, delivered from outside (from network or from another process)
1818*4882a593Smuzhiyun * and enqueued on receive/error queues SHOULD NOT grab reference count,
1819*4882a593Smuzhiyun * when they sit in queue. Otherwise, packets will leak to hole, when
1820*4882a593Smuzhiyun * socket is looked up by one cpu and unhasing is made by another CPU.
1821*4882a593Smuzhiyun * It is true for udp/raw, netlink (leak to receive and error queues), tcp
1822*4882a593Smuzhiyun * (leak to backlog). Packet socket does all the processing inside
1823*4882a593Smuzhiyun * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
1824*4882a593Smuzhiyun * use separate SMP lock, so that they are prone too.
1825*4882a593Smuzhiyun */
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun /* Ungrab socket and destroy it, if it was the last reference. */
sock_put(struct sock * sk)1828*4882a593Smuzhiyun static inline void sock_put(struct sock *sk)
1829*4882a593Smuzhiyun {
1830*4882a593Smuzhiyun if (refcount_dec_and_test(&sk->sk_refcnt))
1831*4882a593Smuzhiyun sk_free(sk);
1832*4882a593Smuzhiyun }
1833*4882a593Smuzhiyun /* Generic version of sock_put(), dealing with all sockets
1834*4882a593Smuzhiyun * (TCP_TIMEWAIT, TCP_NEW_SYN_RECV, ESTABLISHED...)
1835*4882a593Smuzhiyun */
1836*4882a593Smuzhiyun void sock_gen_put(struct sock *sk);
1837*4882a593Smuzhiyun
1838*4882a593Smuzhiyun int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
1839*4882a593Smuzhiyun unsigned int trim_cap, bool refcounted);
sk_receive_skb(struct sock * sk,struct sk_buff * skb,const int nested)1840*4882a593Smuzhiyun static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1841*4882a593Smuzhiyun const int nested)
1842*4882a593Smuzhiyun {
1843*4882a593Smuzhiyun return __sk_receive_skb(sk, skb, nested, 1, true);
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun
sk_tx_queue_set(struct sock * sk,int tx_queue)1846*4882a593Smuzhiyun static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1847*4882a593Smuzhiyun {
1848*4882a593Smuzhiyun /* sk_tx_queue_mapping accept only upto a 16-bit value */
1849*4882a593Smuzhiyun if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
1850*4882a593Smuzhiyun return;
1851*4882a593Smuzhiyun sk->sk_tx_queue_mapping = tx_queue;
1852*4882a593Smuzhiyun }
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun #define NO_QUEUE_MAPPING USHRT_MAX
1855*4882a593Smuzhiyun
sk_tx_queue_clear(struct sock * sk)1856*4882a593Smuzhiyun static inline void sk_tx_queue_clear(struct sock *sk)
1857*4882a593Smuzhiyun {
1858*4882a593Smuzhiyun sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun
sk_tx_queue_get(const struct sock * sk)1861*4882a593Smuzhiyun static inline int sk_tx_queue_get(const struct sock *sk)
1862*4882a593Smuzhiyun {
1863*4882a593Smuzhiyun if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
1864*4882a593Smuzhiyun return sk->sk_tx_queue_mapping;
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun return -1;
1867*4882a593Smuzhiyun }
1868*4882a593Smuzhiyun
sk_rx_queue_set(struct sock * sk,const struct sk_buff * skb)1869*4882a593Smuzhiyun static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
1870*4882a593Smuzhiyun {
1871*4882a593Smuzhiyun #ifdef CONFIG_XPS
1872*4882a593Smuzhiyun if (skb_rx_queue_recorded(skb)) {
1873*4882a593Smuzhiyun u16 rx_queue = skb_get_rx_queue(skb);
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
1876*4882a593Smuzhiyun return;
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun sk->sk_rx_queue_mapping = rx_queue;
1879*4882a593Smuzhiyun }
1880*4882a593Smuzhiyun #endif
1881*4882a593Smuzhiyun }
1882*4882a593Smuzhiyun
sk_rx_queue_clear(struct sock * sk)1883*4882a593Smuzhiyun static inline void sk_rx_queue_clear(struct sock *sk)
1884*4882a593Smuzhiyun {
1885*4882a593Smuzhiyun #ifdef CONFIG_XPS
1886*4882a593Smuzhiyun sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
1887*4882a593Smuzhiyun #endif
1888*4882a593Smuzhiyun }
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun #ifdef CONFIG_XPS
sk_rx_queue_get(const struct sock * sk)1891*4882a593Smuzhiyun static inline int sk_rx_queue_get(const struct sock *sk)
1892*4882a593Smuzhiyun {
1893*4882a593Smuzhiyun if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
1894*4882a593Smuzhiyun return sk->sk_rx_queue_mapping;
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun return -1;
1897*4882a593Smuzhiyun }
1898*4882a593Smuzhiyun #endif
1899*4882a593Smuzhiyun
sk_set_socket(struct sock * sk,struct socket * sock)1900*4882a593Smuzhiyun static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1901*4882a593Smuzhiyun {
1902*4882a593Smuzhiyun sk->sk_socket = sock;
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun
sk_sleep(struct sock * sk)1905*4882a593Smuzhiyun static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1906*4882a593Smuzhiyun {
1907*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
1908*4882a593Smuzhiyun return &rcu_dereference_raw(sk->sk_wq)->wait;
1909*4882a593Smuzhiyun }
1910*4882a593Smuzhiyun /* Detach socket from process context.
1911*4882a593Smuzhiyun * Announce socket dead, detach it from wait queue and inode.
1912*4882a593Smuzhiyun * Note that parent inode held reference count on this struct sock,
1913*4882a593Smuzhiyun * we do not release it in this function, because protocol
1914*4882a593Smuzhiyun * probably wants some additional cleanups or even continuing
1915*4882a593Smuzhiyun * to work with this socket (TCP).
1916*4882a593Smuzhiyun */
sock_orphan(struct sock * sk)1917*4882a593Smuzhiyun static inline void sock_orphan(struct sock *sk)
1918*4882a593Smuzhiyun {
1919*4882a593Smuzhiyun write_lock_bh(&sk->sk_callback_lock);
1920*4882a593Smuzhiyun sock_set_flag(sk, SOCK_DEAD);
1921*4882a593Smuzhiyun sk_set_socket(sk, NULL);
1922*4882a593Smuzhiyun sk->sk_wq = NULL;
1923*4882a593Smuzhiyun write_unlock_bh(&sk->sk_callback_lock);
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun
sock_graft(struct sock * sk,struct socket * parent)1926*4882a593Smuzhiyun static inline void sock_graft(struct sock *sk, struct socket *parent)
1927*4882a593Smuzhiyun {
1928*4882a593Smuzhiyun WARN_ON(parent->sk);
1929*4882a593Smuzhiyun write_lock_bh(&sk->sk_callback_lock);
1930*4882a593Smuzhiyun rcu_assign_pointer(sk->sk_wq, &parent->wq);
1931*4882a593Smuzhiyun parent->sk = sk;
1932*4882a593Smuzhiyun sk_set_socket(sk, parent);
1933*4882a593Smuzhiyun sk->sk_uid = SOCK_INODE(parent)->i_uid;
1934*4882a593Smuzhiyun security_sock_graft(sk, parent);
1935*4882a593Smuzhiyun write_unlock_bh(&sk->sk_callback_lock);
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun kuid_t sock_i_uid(struct sock *sk);
1939*4882a593Smuzhiyun unsigned long sock_i_ino(struct sock *sk);
1940*4882a593Smuzhiyun
sock_net_uid(const struct net * net,const struct sock * sk)1941*4882a593Smuzhiyun static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
1942*4882a593Smuzhiyun {
1943*4882a593Smuzhiyun return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
1944*4882a593Smuzhiyun }
1945*4882a593Smuzhiyun
net_tx_rndhash(void)1946*4882a593Smuzhiyun static inline u32 net_tx_rndhash(void)
1947*4882a593Smuzhiyun {
1948*4882a593Smuzhiyun u32 v = prandom_u32();
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun return v ?: 1;
1951*4882a593Smuzhiyun }
1952*4882a593Smuzhiyun
sk_set_txhash(struct sock * sk)1953*4882a593Smuzhiyun static inline void sk_set_txhash(struct sock *sk)
1954*4882a593Smuzhiyun {
1955*4882a593Smuzhiyun /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
1956*4882a593Smuzhiyun WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun
sk_rethink_txhash(struct sock * sk)1959*4882a593Smuzhiyun static inline bool sk_rethink_txhash(struct sock *sk)
1960*4882a593Smuzhiyun {
1961*4882a593Smuzhiyun if (sk->sk_txhash) {
1962*4882a593Smuzhiyun sk_set_txhash(sk);
1963*4882a593Smuzhiyun return true;
1964*4882a593Smuzhiyun }
1965*4882a593Smuzhiyun return false;
1966*4882a593Smuzhiyun }
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun static inline struct dst_entry *
__sk_dst_get(struct sock * sk)1969*4882a593Smuzhiyun __sk_dst_get(struct sock *sk)
1970*4882a593Smuzhiyun {
1971*4882a593Smuzhiyun return rcu_dereference_check(sk->sk_dst_cache,
1972*4882a593Smuzhiyun lockdep_sock_is_held(sk));
1973*4882a593Smuzhiyun }
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyun static inline struct dst_entry *
sk_dst_get(struct sock * sk)1976*4882a593Smuzhiyun sk_dst_get(struct sock *sk)
1977*4882a593Smuzhiyun {
1978*4882a593Smuzhiyun struct dst_entry *dst;
1979*4882a593Smuzhiyun
1980*4882a593Smuzhiyun rcu_read_lock();
1981*4882a593Smuzhiyun dst = rcu_dereference(sk->sk_dst_cache);
1982*4882a593Smuzhiyun if (dst && !atomic_inc_not_zero(&dst->__refcnt))
1983*4882a593Smuzhiyun dst = NULL;
1984*4882a593Smuzhiyun rcu_read_unlock();
1985*4882a593Smuzhiyun return dst;
1986*4882a593Smuzhiyun }
1987*4882a593Smuzhiyun
__dst_negative_advice(struct sock * sk)1988*4882a593Smuzhiyun static inline void __dst_negative_advice(struct sock *sk)
1989*4882a593Smuzhiyun {
1990*4882a593Smuzhiyun struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1991*4882a593Smuzhiyun
1992*4882a593Smuzhiyun if (dst && dst->ops->negative_advice) {
1993*4882a593Smuzhiyun ndst = dst->ops->negative_advice(dst);
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun if (ndst != dst) {
1996*4882a593Smuzhiyun rcu_assign_pointer(sk->sk_dst_cache, ndst);
1997*4882a593Smuzhiyun sk_tx_queue_clear(sk);
1998*4882a593Smuzhiyun sk->sk_dst_pending_confirm = 0;
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun }
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun
dst_negative_advice(struct sock * sk)2003*4882a593Smuzhiyun static inline void dst_negative_advice(struct sock *sk)
2004*4882a593Smuzhiyun {
2005*4882a593Smuzhiyun sk_rethink_txhash(sk);
2006*4882a593Smuzhiyun __dst_negative_advice(sk);
2007*4882a593Smuzhiyun }
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun static inline void
__sk_dst_set(struct sock * sk,struct dst_entry * dst)2010*4882a593Smuzhiyun __sk_dst_set(struct sock *sk, struct dst_entry *dst)
2011*4882a593Smuzhiyun {
2012*4882a593Smuzhiyun struct dst_entry *old_dst;
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun sk_tx_queue_clear(sk);
2015*4882a593Smuzhiyun sk->sk_dst_pending_confirm = 0;
2016*4882a593Smuzhiyun old_dst = rcu_dereference_protected(sk->sk_dst_cache,
2017*4882a593Smuzhiyun lockdep_sock_is_held(sk));
2018*4882a593Smuzhiyun rcu_assign_pointer(sk->sk_dst_cache, dst);
2019*4882a593Smuzhiyun dst_release(old_dst);
2020*4882a593Smuzhiyun }
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun static inline void
sk_dst_set(struct sock * sk,struct dst_entry * dst)2023*4882a593Smuzhiyun sk_dst_set(struct sock *sk, struct dst_entry *dst)
2024*4882a593Smuzhiyun {
2025*4882a593Smuzhiyun struct dst_entry *old_dst;
2026*4882a593Smuzhiyun
2027*4882a593Smuzhiyun sk_tx_queue_clear(sk);
2028*4882a593Smuzhiyun sk->sk_dst_pending_confirm = 0;
2029*4882a593Smuzhiyun old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
2030*4882a593Smuzhiyun dst_release(old_dst);
2031*4882a593Smuzhiyun }
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun static inline void
__sk_dst_reset(struct sock * sk)2034*4882a593Smuzhiyun __sk_dst_reset(struct sock *sk)
2035*4882a593Smuzhiyun {
2036*4882a593Smuzhiyun __sk_dst_set(sk, NULL);
2037*4882a593Smuzhiyun }
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun static inline void
sk_dst_reset(struct sock * sk)2040*4882a593Smuzhiyun sk_dst_reset(struct sock *sk)
2041*4882a593Smuzhiyun {
2042*4882a593Smuzhiyun sk_dst_set(sk, NULL);
2043*4882a593Smuzhiyun }
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
2048*4882a593Smuzhiyun
sk_dst_confirm(struct sock * sk)2049*4882a593Smuzhiyun static inline void sk_dst_confirm(struct sock *sk)
2050*4882a593Smuzhiyun {
2051*4882a593Smuzhiyun if (!READ_ONCE(sk->sk_dst_pending_confirm))
2052*4882a593Smuzhiyun WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
2053*4882a593Smuzhiyun }
2054*4882a593Smuzhiyun
sock_confirm_neigh(struct sk_buff * skb,struct neighbour * n)2055*4882a593Smuzhiyun static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
2056*4882a593Smuzhiyun {
2057*4882a593Smuzhiyun if (skb_get_dst_pending_confirm(skb)) {
2058*4882a593Smuzhiyun struct sock *sk = skb->sk;
2059*4882a593Smuzhiyun unsigned long now = jiffies;
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun /* avoid dirtying neighbour */
2062*4882a593Smuzhiyun if (READ_ONCE(n->confirmed) != now)
2063*4882a593Smuzhiyun WRITE_ONCE(n->confirmed, now);
2064*4882a593Smuzhiyun if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
2065*4882a593Smuzhiyun WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
2066*4882a593Smuzhiyun }
2067*4882a593Smuzhiyun }
2068*4882a593Smuzhiyun
2069*4882a593Smuzhiyun bool sk_mc_loop(struct sock *sk);
2070*4882a593Smuzhiyun
sk_can_gso(const struct sock * sk)2071*4882a593Smuzhiyun static inline bool sk_can_gso(const struct sock *sk)
2072*4882a593Smuzhiyun {
2073*4882a593Smuzhiyun return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
2074*4882a593Smuzhiyun }
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
2077*4882a593Smuzhiyun
sk_nocaps_add(struct sock * sk,netdev_features_t flags)2078*4882a593Smuzhiyun static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
2079*4882a593Smuzhiyun {
2080*4882a593Smuzhiyun sk->sk_route_nocaps |= flags;
2081*4882a593Smuzhiyun sk->sk_route_caps &= ~flags;
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun
skb_do_copy_data_nocache(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,char * to,int copy,int offset)2084*4882a593Smuzhiyun static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
2085*4882a593Smuzhiyun struct iov_iter *from, char *to,
2086*4882a593Smuzhiyun int copy, int offset)
2087*4882a593Smuzhiyun {
2088*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_NONE) {
2089*4882a593Smuzhiyun __wsum csum = 0;
2090*4882a593Smuzhiyun if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
2091*4882a593Smuzhiyun return -EFAULT;
2092*4882a593Smuzhiyun skb->csum = csum_block_add(skb->csum, csum, offset);
2093*4882a593Smuzhiyun } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
2094*4882a593Smuzhiyun if (!copy_from_iter_full_nocache(to, copy, from))
2095*4882a593Smuzhiyun return -EFAULT;
2096*4882a593Smuzhiyun } else if (!copy_from_iter_full(to, copy, from))
2097*4882a593Smuzhiyun return -EFAULT;
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun return 0;
2100*4882a593Smuzhiyun }
2101*4882a593Smuzhiyun
skb_add_data_nocache(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,int copy)2102*4882a593Smuzhiyun static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
2103*4882a593Smuzhiyun struct iov_iter *from, int copy)
2104*4882a593Smuzhiyun {
2105*4882a593Smuzhiyun int err, offset = skb->len;
2106*4882a593Smuzhiyun
2107*4882a593Smuzhiyun err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
2108*4882a593Smuzhiyun copy, offset);
2109*4882a593Smuzhiyun if (err)
2110*4882a593Smuzhiyun __skb_trim(skb, offset);
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun return err;
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun
skb_copy_to_page_nocache(struct sock * sk,struct iov_iter * from,struct sk_buff * skb,struct page * page,int off,int copy)2115*4882a593Smuzhiyun static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
2116*4882a593Smuzhiyun struct sk_buff *skb,
2117*4882a593Smuzhiyun struct page *page,
2118*4882a593Smuzhiyun int off, int copy)
2119*4882a593Smuzhiyun {
2120*4882a593Smuzhiyun int err;
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
2123*4882a593Smuzhiyun copy, skb->len);
2124*4882a593Smuzhiyun if (err)
2125*4882a593Smuzhiyun return err;
2126*4882a593Smuzhiyun
2127*4882a593Smuzhiyun skb->len += copy;
2128*4882a593Smuzhiyun skb->data_len += copy;
2129*4882a593Smuzhiyun skb->truesize += copy;
2130*4882a593Smuzhiyun sk_wmem_queued_add(sk, copy);
2131*4882a593Smuzhiyun sk_mem_charge(sk, copy);
2132*4882a593Smuzhiyun return 0;
2133*4882a593Smuzhiyun }
2134*4882a593Smuzhiyun
2135*4882a593Smuzhiyun /**
2136*4882a593Smuzhiyun * sk_wmem_alloc_get - returns write allocations
2137*4882a593Smuzhiyun * @sk: socket
2138*4882a593Smuzhiyun *
2139*4882a593Smuzhiyun * Return: sk_wmem_alloc minus initial offset of one
2140*4882a593Smuzhiyun */
sk_wmem_alloc_get(const struct sock * sk)2141*4882a593Smuzhiyun static inline int sk_wmem_alloc_get(const struct sock *sk)
2142*4882a593Smuzhiyun {
2143*4882a593Smuzhiyun return refcount_read(&sk->sk_wmem_alloc) - 1;
2144*4882a593Smuzhiyun }
2145*4882a593Smuzhiyun
2146*4882a593Smuzhiyun /**
2147*4882a593Smuzhiyun * sk_rmem_alloc_get - returns read allocations
2148*4882a593Smuzhiyun * @sk: socket
2149*4882a593Smuzhiyun *
2150*4882a593Smuzhiyun * Return: sk_rmem_alloc
2151*4882a593Smuzhiyun */
sk_rmem_alloc_get(const struct sock * sk)2152*4882a593Smuzhiyun static inline int sk_rmem_alloc_get(const struct sock *sk)
2153*4882a593Smuzhiyun {
2154*4882a593Smuzhiyun return atomic_read(&sk->sk_rmem_alloc);
2155*4882a593Smuzhiyun }
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun /**
2158*4882a593Smuzhiyun * sk_has_allocations - check if allocations are outstanding
2159*4882a593Smuzhiyun * @sk: socket
2160*4882a593Smuzhiyun *
2161*4882a593Smuzhiyun * Return: true if socket has write or read allocations
2162*4882a593Smuzhiyun */
sk_has_allocations(const struct sock * sk)2163*4882a593Smuzhiyun static inline bool sk_has_allocations(const struct sock *sk)
2164*4882a593Smuzhiyun {
2165*4882a593Smuzhiyun return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
2166*4882a593Smuzhiyun }
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun /**
2169*4882a593Smuzhiyun * skwq_has_sleeper - check if there are any waiting processes
2170*4882a593Smuzhiyun * @wq: struct socket_wq
2171*4882a593Smuzhiyun *
2172*4882a593Smuzhiyun * Return: true if socket_wq has waiting processes
2173*4882a593Smuzhiyun *
2174*4882a593Smuzhiyun * The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory
2175*4882a593Smuzhiyun * barrier call. They were added due to the race found within the tcp code.
2176*4882a593Smuzhiyun *
2177*4882a593Smuzhiyun * Consider following tcp code paths::
2178*4882a593Smuzhiyun *
2179*4882a593Smuzhiyun * CPU1 CPU2
2180*4882a593Smuzhiyun * sys_select receive packet
2181*4882a593Smuzhiyun * ... ...
2182*4882a593Smuzhiyun * __add_wait_queue update tp->rcv_nxt
2183*4882a593Smuzhiyun * ... ...
2184*4882a593Smuzhiyun * tp->rcv_nxt check sock_def_readable
2185*4882a593Smuzhiyun * ... {
2186*4882a593Smuzhiyun * schedule rcu_read_lock();
2187*4882a593Smuzhiyun * wq = rcu_dereference(sk->sk_wq);
2188*4882a593Smuzhiyun * if (wq && waitqueue_active(&wq->wait))
2189*4882a593Smuzhiyun * wake_up_interruptible(&wq->wait)
2190*4882a593Smuzhiyun * ...
2191*4882a593Smuzhiyun * }
2192*4882a593Smuzhiyun *
2193*4882a593Smuzhiyun * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
2194*4882a593Smuzhiyun * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
2195*4882a593Smuzhiyun * could then endup calling schedule and sleep forever if there are no more
2196*4882a593Smuzhiyun * data on the socket.
2197*4882a593Smuzhiyun *
2198*4882a593Smuzhiyun */
skwq_has_sleeper(struct socket_wq * wq)2199*4882a593Smuzhiyun static inline bool skwq_has_sleeper(struct socket_wq *wq)
2200*4882a593Smuzhiyun {
2201*4882a593Smuzhiyun return wq && wq_has_sleeper(&wq->wait);
2202*4882a593Smuzhiyun }
2203*4882a593Smuzhiyun
2204*4882a593Smuzhiyun /**
2205*4882a593Smuzhiyun * sock_poll_wait - place memory barrier behind the poll_wait call.
2206*4882a593Smuzhiyun * @filp: file
2207*4882a593Smuzhiyun * @sock: socket to wait on
2208*4882a593Smuzhiyun * @p: poll_table
2209*4882a593Smuzhiyun *
2210*4882a593Smuzhiyun * See the comments in the wq_has_sleeper function.
2211*4882a593Smuzhiyun */
sock_poll_wait(struct file * filp,struct socket * sock,poll_table * p)2212*4882a593Smuzhiyun static inline void sock_poll_wait(struct file *filp, struct socket *sock,
2213*4882a593Smuzhiyun poll_table *p)
2214*4882a593Smuzhiyun {
2215*4882a593Smuzhiyun if (!poll_does_not_wait(p)) {
2216*4882a593Smuzhiyun poll_wait(filp, &sock->wq.wait, p);
2217*4882a593Smuzhiyun /* We need to be sure we are in sync with the
2218*4882a593Smuzhiyun * socket flags modification.
2219*4882a593Smuzhiyun *
2220*4882a593Smuzhiyun * This memory barrier is paired in the wq_has_sleeper.
2221*4882a593Smuzhiyun */
2222*4882a593Smuzhiyun smp_mb();
2223*4882a593Smuzhiyun }
2224*4882a593Smuzhiyun }
2225*4882a593Smuzhiyun
skb_set_hash_from_sk(struct sk_buff * skb,struct sock * sk)2226*4882a593Smuzhiyun static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
2227*4882a593Smuzhiyun {
2228*4882a593Smuzhiyun /* This pairs with WRITE_ONCE() in sk_set_txhash() */
2229*4882a593Smuzhiyun u32 txhash = READ_ONCE(sk->sk_txhash);
2230*4882a593Smuzhiyun
2231*4882a593Smuzhiyun if (txhash) {
2232*4882a593Smuzhiyun skb->l4_hash = 1;
2233*4882a593Smuzhiyun skb->hash = txhash;
2234*4882a593Smuzhiyun }
2235*4882a593Smuzhiyun }
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
2238*4882a593Smuzhiyun
2239*4882a593Smuzhiyun /*
2240*4882a593Smuzhiyun * Queue a received datagram if it will fit. Stream and sequenced
2241*4882a593Smuzhiyun * protocols can't normally use this as they need to fit buffers in
2242*4882a593Smuzhiyun * and play with them.
2243*4882a593Smuzhiyun *
2244*4882a593Smuzhiyun * Inlined as it's very short and called for pretty much every
2245*4882a593Smuzhiyun * packet ever received.
2246*4882a593Smuzhiyun */
skb_set_owner_r(struct sk_buff * skb,struct sock * sk)2247*4882a593Smuzhiyun static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
2248*4882a593Smuzhiyun {
2249*4882a593Smuzhiyun skb_orphan(skb);
2250*4882a593Smuzhiyun skb->sk = sk;
2251*4882a593Smuzhiyun skb->destructor = sock_rfree;
2252*4882a593Smuzhiyun atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2253*4882a593Smuzhiyun sk_mem_charge(sk, skb->truesize);
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun
skb_set_owner_sk_safe(struct sk_buff * skb,struct sock * sk)2256*4882a593Smuzhiyun static inline __must_check bool skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
2257*4882a593Smuzhiyun {
2258*4882a593Smuzhiyun if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
2259*4882a593Smuzhiyun skb_orphan(skb);
2260*4882a593Smuzhiyun skb->destructor = sock_efree;
2261*4882a593Smuzhiyun skb->sk = sk;
2262*4882a593Smuzhiyun return true;
2263*4882a593Smuzhiyun }
2264*4882a593Smuzhiyun return false;
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun
2267*4882a593Smuzhiyun void sk_reset_timer(struct sock *sk, struct timer_list *timer,
2268*4882a593Smuzhiyun unsigned long expires);
2269*4882a593Smuzhiyun
2270*4882a593Smuzhiyun void sk_stop_timer(struct sock *sk, struct timer_list *timer);
2271*4882a593Smuzhiyun
2272*4882a593Smuzhiyun void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
2273*4882a593Smuzhiyun
2274*4882a593Smuzhiyun int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
2275*4882a593Smuzhiyun struct sk_buff *skb, unsigned int flags,
2276*4882a593Smuzhiyun void (*destructor)(struct sock *sk,
2277*4882a593Smuzhiyun struct sk_buff *skb));
2278*4882a593Smuzhiyun int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2279*4882a593Smuzhiyun int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
2280*4882a593Smuzhiyun
2281*4882a593Smuzhiyun int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
2282*4882a593Smuzhiyun struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
2283*4882a593Smuzhiyun
2284*4882a593Smuzhiyun /*
2285*4882a593Smuzhiyun * Recover an error report and clear atomically
2286*4882a593Smuzhiyun */
2287*4882a593Smuzhiyun
sock_error(struct sock * sk)2288*4882a593Smuzhiyun static inline int sock_error(struct sock *sk)
2289*4882a593Smuzhiyun {
2290*4882a593Smuzhiyun int err;
2291*4882a593Smuzhiyun
2292*4882a593Smuzhiyun /* Avoid an atomic operation for the common case.
2293*4882a593Smuzhiyun * This is racy since another cpu/thread can change sk_err under us.
2294*4882a593Smuzhiyun */
2295*4882a593Smuzhiyun if (likely(data_race(!sk->sk_err)))
2296*4882a593Smuzhiyun return 0;
2297*4882a593Smuzhiyun
2298*4882a593Smuzhiyun err = xchg(&sk->sk_err, 0);
2299*4882a593Smuzhiyun return -err;
2300*4882a593Smuzhiyun }
2301*4882a593Smuzhiyun
sock_wspace(struct sock * sk)2302*4882a593Smuzhiyun static inline unsigned long sock_wspace(struct sock *sk)
2303*4882a593Smuzhiyun {
2304*4882a593Smuzhiyun int amt = 0;
2305*4882a593Smuzhiyun
2306*4882a593Smuzhiyun if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
2307*4882a593Smuzhiyun amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
2308*4882a593Smuzhiyun if (amt < 0)
2309*4882a593Smuzhiyun amt = 0;
2310*4882a593Smuzhiyun }
2311*4882a593Smuzhiyun return amt;
2312*4882a593Smuzhiyun }
2313*4882a593Smuzhiyun
2314*4882a593Smuzhiyun /* Note:
2315*4882a593Smuzhiyun * We use sk->sk_wq_raw, from contexts knowing this
2316*4882a593Smuzhiyun * pointer is not NULL and cannot disappear/change.
2317*4882a593Smuzhiyun */
sk_set_bit(int nr,struct sock * sk)2318*4882a593Smuzhiyun static inline void sk_set_bit(int nr, struct sock *sk)
2319*4882a593Smuzhiyun {
2320*4882a593Smuzhiyun if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2321*4882a593Smuzhiyun !sock_flag(sk, SOCK_FASYNC))
2322*4882a593Smuzhiyun return;
2323*4882a593Smuzhiyun
2324*4882a593Smuzhiyun set_bit(nr, &sk->sk_wq_raw->flags);
2325*4882a593Smuzhiyun }
2326*4882a593Smuzhiyun
sk_clear_bit(int nr,struct sock * sk)2327*4882a593Smuzhiyun static inline void sk_clear_bit(int nr, struct sock *sk)
2328*4882a593Smuzhiyun {
2329*4882a593Smuzhiyun if ((nr == SOCKWQ_ASYNC_NOSPACE || nr == SOCKWQ_ASYNC_WAITDATA) &&
2330*4882a593Smuzhiyun !sock_flag(sk, SOCK_FASYNC))
2331*4882a593Smuzhiyun return;
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun clear_bit(nr, &sk->sk_wq_raw->flags);
2334*4882a593Smuzhiyun }
2335*4882a593Smuzhiyun
sk_wake_async(const struct sock * sk,int how,int band)2336*4882a593Smuzhiyun static inline void sk_wake_async(const struct sock *sk, int how, int band)
2337*4882a593Smuzhiyun {
2338*4882a593Smuzhiyun if (sock_flag(sk, SOCK_FASYNC)) {
2339*4882a593Smuzhiyun rcu_read_lock();
2340*4882a593Smuzhiyun sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
2341*4882a593Smuzhiyun rcu_read_unlock();
2342*4882a593Smuzhiyun }
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun
2345*4882a593Smuzhiyun /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might
2346*4882a593Smuzhiyun * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak.
2347*4882a593Smuzhiyun * Note: for send buffers, TCP works better if we can build two skbs at
2348*4882a593Smuzhiyun * minimum.
2349*4882a593Smuzhiyun */
2350*4882a593Smuzhiyun #define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff)))
2351*4882a593Smuzhiyun
2352*4882a593Smuzhiyun #define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2)
2353*4882a593Smuzhiyun #define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE
2354*4882a593Smuzhiyun
sk_stream_moderate_sndbuf(struct sock * sk)2355*4882a593Smuzhiyun static inline void sk_stream_moderate_sndbuf(struct sock *sk)
2356*4882a593Smuzhiyun {
2357*4882a593Smuzhiyun u32 val;
2358*4882a593Smuzhiyun
2359*4882a593Smuzhiyun if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
2360*4882a593Smuzhiyun return;
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
2363*4882a593Smuzhiyun
2364*4882a593Smuzhiyun WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
2365*4882a593Smuzhiyun }
2366*4882a593Smuzhiyun
2367*4882a593Smuzhiyun struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
2368*4882a593Smuzhiyun bool force_schedule);
2369*4882a593Smuzhiyun
2370*4882a593Smuzhiyun /**
2371*4882a593Smuzhiyun * sk_page_frag - return an appropriate page_frag
2372*4882a593Smuzhiyun * @sk: socket
2373*4882a593Smuzhiyun *
2374*4882a593Smuzhiyun * Use the per task page_frag instead of the per socket one for
2375*4882a593Smuzhiyun * optimization when we know that we're in process context and own
2376*4882a593Smuzhiyun * everything that's associated with %current.
2377*4882a593Smuzhiyun *
2378*4882a593Smuzhiyun * Both direct reclaim and page faults can nest inside other
2379*4882a593Smuzhiyun * socket operations and end up recursing into sk_page_frag()
2380*4882a593Smuzhiyun * while it's already in use: explicitly avoid task page_frag
2381*4882a593Smuzhiyun * usage if the caller is potentially doing any of them.
2382*4882a593Smuzhiyun * This assumes that page fault handlers use the GFP_NOFS flags.
2383*4882a593Smuzhiyun *
2384*4882a593Smuzhiyun * Return: a per task page_frag if context allows that,
2385*4882a593Smuzhiyun * otherwise a per socket one.
2386*4882a593Smuzhiyun */
sk_page_frag(struct sock * sk)2387*4882a593Smuzhiyun static inline struct page_frag *sk_page_frag(struct sock *sk)
2388*4882a593Smuzhiyun {
2389*4882a593Smuzhiyun if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
2390*4882a593Smuzhiyun (__GFP_DIRECT_RECLAIM | __GFP_FS))
2391*4882a593Smuzhiyun return ¤t->task_frag;
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun return &sk->sk_frag;
2394*4882a593Smuzhiyun }
2395*4882a593Smuzhiyun
2396*4882a593Smuzhiyun bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
2397*4882a593Smuzhiyun
2398*4882a593Smuzhiyun /*
2399*4882a593Smuzhiyun * Default write policy as shown to user space via poll/select/SIGIO
2400*4882a593Smuzhiyun */
sock_writeable(const struct sock * sk)2401*4882a593Smuzhiyun static inline bool sock_writeable(const struct sock *sk)
2402*4882a593Smuzhiyun {
2403*4882a593Smuzhiyun return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
2404*4882a593Smuzhiyun }
2405*4882a593Smuzhiyun
gfp_any(void)2406*4882a593Smuzhiyun static inline gfp_t gfp_any(void)
2407*4882a593Smuzhiyun {
2408*4882a593Smuzhiyun return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
2409*4882a593Smuzhiyun }
2410*4882a593Smuzhiyun
sock_rcvtimeo(const struct sock * sk,bool noblock)2411*4882a593Smuzhiyun static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
2412*4882a593Smuzhiyun {
2413*4882a593Smuzhiyun return noblock ? 0 : sk->sk_rcvtimeo;
2414*4882a593Smuzhiyun }
2415*4882a593Smuzhiyun
sock_sndtimeo(const struct sock * sk,bool noblock)2416*4882a593Smuzhiyun static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
2417*4882a593Smuzhiyun {
2418*4882a593Smuzhiyun return noblock ? 0 : sk->sk_sndtimeo;
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun
sock_rcvlowat(const struct sock * sk,int waitall,int len)2421*4882a593Smuzhiyun static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
2422*4882a593Smuzhiyun {
2423*4882a593Smuzhiyun int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
2424*4882a593Smuzhiyun
2425*4882a593Smuzhiyun return v ?: 1;
2426*4882a593Smuzhiyun }
2427*4882a593Smuzhiyun
2428*4882a593Smuzhiyun /* Alas, with timeout socket operations are not restartable.
2429*4882a593Smuzhiyun * Compare this to poll().
2430*4882a593Smuzhiyun */
sock_intr_errno(long timeo)2431*4882a593Smuzhiyun static inline int sock_intr_errno(long timeo)
2432*4882a593Smuzhiyun {
2433*4882a593Smuzhiyun return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
2434*4882a593Smuzhiyun }
2435*4882a593Smuzhiyun
2436*4882a593Smuzhiyun struct sock_skb_cb {
2437*4882a593Smuzhiyun u32 dropcount;
2438*4882a593Smuzhiyun };
2439*4882a593Smuzhiyun
2440*4882a593Smuzhiyun /* Store sock_skb_cb at the end of skb->cb[] so protocol families
2441*4882a593Smuzhiyun * using skb->cb[] would keep using it directly and utilize its
2442*4882a593Smuzhiyun * alignement guarantee.
2443*4882a593Smuzhiyun */
2444*4882a593Smuzhiyun #define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
2445*4882a593Smuzhiyun sizeof(struct sock_skb_cb)))
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
2448*4882a593Smuzhiyun SOCK_SKB_CB_OFFSET))
2449*4882a593Smuzhiyun
2450*4882a593Smuzhiyun #define sock_skb_cb_check_size(size) \
2451*4882a593Smuzhiyun BUILD_BUG_ON((size) > SOCK_SKB_CB_OFFSET)
2452*4882a593Smuzhiyun
2453*4882a593Smuzhiyun static inline void
sock_skb_set_dropcount(const struct sock * sk,struct sk_buff * skb)2454*4882a593Smuzhiyun sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
2455*4882a593Smuzhiyun {
2456*4882a593Smuzhiyun SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
2457*4882a593Smuzhiyun atomic_read(&sk->sk_drops) : 0;
2458*4882a593Smuzhiyun }
2459*4882a593Smuzhiyun
sk_drops_add(struct sock * sk,const struct sk_buff * skb)2460*4882a593Smuzhiyun static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
2461*4882a593Smuzhiyun {
2462*4882a593Smuzhiyun int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2463*4882a593Smuzhiyun
2464*4882a593Smuzhiyun atomic_add(segs, &sk->sk_drops);
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun
sock_read_timestamp(struct sock * sk)2467*4882a593Smuzhiyun static inline ktime_t sock_read_timestamp(struct sock *sk)
2468*4882a593Smuzhiyun {
2469*4882a593Smuzhiyun #if BITS_PER_LONG==32
2470*4882a593Smuzhiyun unsigned int seq;
2471*4882a593Smuzhiyun ktime_t kt;
2472*4882a593Smuzhiyun
2473*4882a593Smuzhiyun do {
2474*4882a593Smuzhiyun seq = read_seqbegin(&sk->sk_stamp_seq);
2475*4882a593Smuzhiyun kt = sk->sk_stamp;
2476*4882a593Smuzhiyun } while (read_seqretry(&sk->sk_stamp_seq, seq));
2477*4882a593Smuzhiyun
2478*4882a593Smuzhiyun return kt;
2479*4882a593Smuzhiyun #else
2480*4882a593Smuzhiyun return READ_ONCE(sk->sk_stamp);
2481*4882a593Smuzhiyun #endif
2482*4882a593Smuzhiyun }
2483*4882a593Smuzhiyun
sock_write_timestamp(struct sock * sk,ktime_t kt)2484*4882a593Smuzhiyun static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
2485*4882a593Smuzhiyun {
2486*4882a593Smuzhiyun #if BITS_PER_LONG==32
2487*4882a593Smuzhiyun write_seqlock(&sk->sk_stamp_seq);
2488*4882a593Smuzhiyun sk->sk_stamp = kt;
2489*4882a593Smuzhiyun write_sequnlock(&sk->sk_stamp_seq);
2490*4882a593Smuzhiyun #else
2491*4882a593Smuzhiyun WRITE_ONCE(sk->sk_stamp, kt);
2492*4882a593Smuzhiyun #endif
2493*4882a593Smuzhiyun }
2494*4882a593Smuzhiyun
2495*4882a593Smuzhiyun void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
2496*4882a593Smuzhiyun struct sk_buff *skb);
2497*4882a593Smuzhiyun void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
2498*4882a593Smuzhiyun struct sk_buff *skb);
2499*4882a593Smuzhiyun
2500*4882a593Smuzhiyun static inline void
sock_recv_timestamp(struct msghdr * msg,struct sock * sk,struct sk_buff * skb)2501*4882a593Smuzhiyun sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
2502*4882a593Smuzhiyun {
2503*4882a593Smuzhiyun ktime_t kt = skb->tstamp;
2504*4882a593Smuzhiyun struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
2505*4882a593Smuzhiyun
2506*4882a593Smuzhiyun /*
2507*4882a593Smuzhiyun * generate control messages if
2508*4882a593Smuzhiyun * - receive time stamping in software requested
2509*4882a593Smuzhiyun * - software time stamp available and wanted
2510*4882a593Smuzhiyun * - hardware time stamps available and wanted
2511*4882a593Smuzhiyun */
2512*4882a593Smuzhiyun if (sock_flag(sk, SOCK_RCVTSTAMP) ||
2513*4882a593Smuzhiyun (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
2514*4882a593Smuzhiyun (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
2515*4882a593Smuzhiyun (hwtstamps->hwtstamp &&
2516*4882a593Smuzhiyun (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
2517*4882a593Smuzhiyun __sock_recv_timestamp(msg, sk, skb);
2518*4882a593Smuzhiyun else
2519*4882a593Smuzhiyun sock_write_timestamp(sk, kt);
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
2522*4882a593Smuzhiyun __sock_recv_wifi_status(msg, sk, skb);
2523*4882a593Smuzhiyun }
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2526*4882a593Smuzhiyun struct sk_buff *skb);
2527*4882a593Smuzhiyun
2528*4882a593Smuzhiyun #define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
sock_recv_ts_and_drops(struct msghdr * msg,struct sock * sk,struct sk_buff * skb)2529*4882a593Smuzhiyun static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
2530*4882a593Smuzhiyun struct sk_buff *skb)
2531*4882a593Smuzhiyun {
2532*4882a593Smuzhiyun #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
2533*4882a593Smuzhiyun (1UL << SOCK_RCVTSTAMP))
2534*4882a593Smuzhiyun #define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
2535*4882a593Smuzhiyun SOF_TIMESTAMPING_RAW_HARDWARE)
2536*4882a593Smuzhiyun
2537*4882a593Smuzhiyun if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
2538*4882a593Smuzhiyun __sock_recv_ts_and_drops(msg, sk, skb);
2539*4882a593Smuzhiyun else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
2540*4882a593Smuzhiyun sock_write_timestamp(sk, skb->tstamp);
2541*4882a593Smuzhiyun else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
2542*4882a593Smuzhiyun sock_write_timestamp(sk, 0);
2543*4882a593Smuzhiyun }
2544*4882a593Smuzhiyun
2545*4882a593Smuzhiyun void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
2546*4882a593Smuzhiyun
2547*4882a593Smuzhiyun /**
2548*4882a593Smuzhiyun * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
2549*4882a593Smuzhiyun * @sk: socket sending this packet
2550*4882a593Smuzhiyun * @tsflags: timestamping flags to use
2551*4882a593Smuzhiyun * @tx_flags: completed with instructions for time stamping
2552*4882a593Smuzhiyun * @tskey: filled in with next sk_tskey (not for TCP, which uses seqno)
2553*4882a593Smuzhiyun *
2554*4882a593Smuzhiyun * Note: callers should take care of initial ``*tx_flags`` value (usually 0)
2555*4882a593Smuzhiyun */
_sock_tx_timestamp(struct sock * sk,__u16 tsflags,__u8 * tx_flags,__u32 * tskey)2556*4882a593Smuzhiyun static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2557*4882a593Smuzhiyun __u8 *tx_flags, __u32 *tskey)
2558*4882a593Smuzhiyun {
2559*4882a593Smuzhiyun if (unlikely(tsflags)) {
2560*4882a593Smuzhiyun __sock_tx_timestamp(tsflags, tx_flags);
2561*4882a593Smuzhiyun if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
2562*4882a593Smuzhiyun tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
2563*4882a593Smuzhiyun *tskey = sk->sk_tskey++;
2564*4882a593Smuzhiyun }
2565*4882a593Smuzhiyun if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
2566*4882a593Smuzhiyun *tx_flags |= SKBTX_WIFI_STATUS;
2567*4882a593Smuzhiyun }
2568*4882a593Smuzhiyun
sock_tx_timestamp(struct sock * sk,__u16 tsflags,__u8 * tx_flags)2569*4882a593Smuzhiyun static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
2570*4882a593Smuzhiyun __u8 *tx_flags)
2571*4882a593Smuzhiyun {
2572*4882a593Smuzhiyun _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun
skb_setup_tx_timestamp(struct sk_buff * skb,__u16 tsflags)2575*4882a593Smuzhiyun static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
2576*4882a593Smuzhiyun {
2577*4882a593Smuzhiyun _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
2578*4882a593Smuzhiyun &skb_shinfo(skb)->tskey);
2579*4882a593Smuzhiyun }
2580*4882a593Smuzhiyun
2581*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
2582*4882a593Smuzhiyun /**
2583*4882a593Smuzhiyun * sk_eat_skb - Release a skb if it is no longer needed
2584*4882a593Smuzhiyun * @sk: socket to eat this skb from
2585*4882a593Smuzhiyun * @skb: socket buffer to eat
2586*4882a593Smuzhiyun *
2587*4882a593Smuzhiyun * This routine must be called with interrupts disabled or with the socket
2588*4882a593Smuzhiyun * locked so that the sk_buff queue operation is ok.
2589*4882a593Smuzhiyun */
sk_eat_skb(struct sock * sk,struct sk_buff * skb)2590*4882a593Smuzhiyun static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
2591*4882a593Smuzhiyun {
2592*4882a593Smuzhiyun __skb_unlink(skb, &sk->sk_receive_queue);
2593*4882a593Smuzhiyun if (static_branch_unlikely(&tcp_rx_skb_cache_key) &&
2594*4882a593Smuzhiyun !sk->sk_rx_skb_cache) {
2595*4882a593Smuzhiyun sk->sk_rx_skb_cache = skb;
2596*4882a593Smuzhiyun skb_orphan(skb);
2597*4882a593Smuzhiyun return;
2598*4882a593Smuzhiyun }
2599*4882a593Smuzhiyun __kfree_skb(skb);
2600*4882a593Smuzhiyun }
2601*4882a593Smuzhiyun
2602*4882a593Smuzhiyun static inline
sock_net(const struct sock * sk)2603*4882a593Smuzhiyun struct net *sock_net(const struct sock *sk)
2604*4882a593Smuzhiyun {
2605*4882a593Smuzhiyun return read_pnet(&sk->sk_net);
2606*4882a593Smuzhiyun }
2607*4882a593Smuzhiyun
2608*4882a593Smuzhiyun static inline
sock_net_set(struct sock * sk,struct net * net)2609*4882a593Smuzhiyun void sock_net_set(struct sock *sk, struct net *net)
2610*4882a593Smuzhiyun {
2611*4882a593Smuzhiyun write_pnet(&sk->sk_net, net);
2612*4882a593Smuzhiyun }
2613*4882a593Smuzhiyun
2614*4882a593Smuzhiyun static inline bool
skb_sk_is_prefetched(struct sk_buff * skb)2615*4882a593Smuzhiyun skb_sk_is_prefetched(struct sk_buff *skb)
2616*4882a593Smuzhiyun {
2617*4882a593Smuzhiyun #ifdef CONFIG_INET
2618*4882a593Smuzhiyun return skb->destructor == sock_pfree;
2619*4882a593Smuzhiyun #else
2620*4882a593Smuzhiyun return false;
2621*4882a593Smuzhiyun #endif /* CONFIG_INET */
2622*4882a593Smuzhiyun }
2623*4882a593Smuzhiyun
2624*4882a593Smuzhiyun /* This helper checks if a socket is a full socket,
2625*4882a593Smuzhiyun * ie _not_ a timewait or request socket.
2626*4882a593Smuzhiyun */
sk_fullsock(const struct sock * sk)2627*4882a593Smuzhiyun static inline bool sk_fullsock(const struct sock *sk)
2628*4882a593Smuzhiyun {
2629*4882a593Smuzhiyun return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
2630*4882a593Smuzhiyun }
2631*4882a593Smuzhiyun
2632*4882a593Smuzhiyun static inline bool
sk_is_refcounted(struct sock * sk)2633*4882a593Smuzhiyun sk_is_refcounted(struct sock *sk)
2634*4882a593Smuzhiyun {
2635*4882a593Smuzhiyun /* Only full sockets have sk->sk_flags. */
2636*4882a593Smuzhiyun return !sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE);
2637*4882a593Smuzhiyun }
2638*4882a593Smuzhiyun
2639*4882a593Smuzhiyun /**
2640*4882a593Smuzhiyun * skb_steal_sock - steal a socket from an sk_buff
2641*4882a593Smuzhiyun * @skb: sk_buff to steal the socket from
2642*4882a593Smuzhiyun * @refcounted: is set to true if the socket is reference-counted
2643*4882a593Smuzhiyun */
2644*4882a593Smuzhiyun static inline struct sock *
skb_steal_sock(struct sk_buff * skb,bool * refcounted)2645*4882a593Smuzhiyun skb_steal_sock(struct sk_buff *skb, bool *refcounted)
2646*4882a593Smuzhiyun {
2647*4882a593Smuzhiyun if (skb->sk) {
2648*4882a593Smuzhiyun struct sock *sk = skb->sk;
2649*4882a593Smuzhiyun
2650*4882a593Smuzhiyun *refcounted = true;
2651*4882a593Smuzhiyun if (skb_sk_is_prefetched(skb))
2652*4882a593Smuzhiyun *refcounted = sk_is_refcounted(sk);
2653*4882a593Smuzhiyun skb->destructor = NULL;
2654*4882a593Smuzhiyun skb->sk = NULL;
2655*4882a593Smuzhiyun return sk;
2656*4882a593Smuzhiyun }
2657*4882a593Smuzhiyun *refcounted = false;
2658*4882a593Smuzhiyun return NULL;
2659*4882a593Smuzhiyun }
2660*4882a593Smuzhiyun
2661*4882a593Smuzhiyun /* Checks if this SKB belongs to an HW offloaded socket
2662*4882a593Smuzhiyun * and whether any SW fallbacks are required based on dev.
2663*4882a593Smuzhiyun * Check decrypted mark in case skb_orphan() cleared socket.
2664*4882a593Smuzhiyun */
sk_validate_xmit_skb(struct sk_buff * skb,struct net_device * dev)2665*4882a593Smuzhiyun static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2666*4882a593Smuzhiyun struct net_device *dev)
2667*4882a593Smuzhiyun {
2668*4882a593Smuzhiyun #ifdef CONFIG_SOCK_VALIDATE_XMIT
2669*4882a593Smuzhiyun struct sock *sk = skb->sk;
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2672*4882a593Smuzhiyun skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2673*4882a593Smuzhiyun #ifdef CONFIG_TLS_DEVICE
2674*4882a593Smuzhiyun } else if (unlikely(skb->decrypted)) {
2675*4882a593Smuzhiyun pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
2676*4882a593Smuzhiyun kfree_skb(skb);
2677*4882a593Smuzhiyun skb = NULL;
2678*4882a593Smuzhiyun #endif
2679*4882a593Smuzhiyun }
2680*4882a593Smuzhiyun #endif
2681*4882a593Smuzhiyun
2682*4882a593Smuzhiyun return skb;
2683*4882a593Smuzhiyun }
2684*4882a593Smuzhiyun
2685*4882a593Smuzhiyun /* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
2686*4882a593Smuzhiyun * SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
2687*4882a593Smuzhiyun */
sk_listener(const struct sock * sk)2688*4882a593Smuzhiyun static inline bool sk_listener(const struct sock *sk)
2689*4882a593Smuzhiyun {
2690*4882a593Smuzhiyun return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
2691*4882a593Smuzhiyun }
2692*4882a593Smuzhiyun
2693*4882a593Smuzhiyun void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
2694*4882a593Smuzhiyun int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
2695*4882a593Smuzhiyun int type);
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun bool sk_ns_capable(const struct sock *sk,
2698*4882a593Smuzhiyun struct user_namespace *user_ns, int cap);
2699*4882a593Smuzhiyun bool sk_capable(const struct sock *sk, int cap);
2700*4882a593Smuzhiyun bool sk_net_capable(const struct sock *sk, int cap);
2701*4882a593Smuzhiyun
2702*4882a593Smuzhiyun void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
2703*4882a593Smuzhiyun
2704*4882a593Smuzhiyun /* Take into consideration the size of the struct sk_buff overhead in the
2705*4882a593Smuzhiyun * determination of these values, since that is non-constant across
2706*4882a593Smuzhiyun * platforms. This makes socket queueing behavior and performance
2707*4882a593Smuzhiyun * not depend upon such differences.
2708*4882a593Smuzhiyun */
2709*4882a593Smuzhiyun #define _SK_MEM_PACKETS 256
2710*4882a593Smuzhiyun #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
2711*4882a593Smuzhiyun #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2712*4882a593Smuzhiyun #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
2713*4882a593Smuzhiyun
2714*4882a593Smuzhiyun extern __u32 sysctl_wmem_max;
2715*4882a593Smuzhiyun extern __u32 sysctl_rmem_max;
2716*4882a593Smuzhiyun
2717*4882a593Smuzhiyun extern int sysctl_tstamp_allow_data;
2718*4882a593Smuzhiyun extern int sysctl_optmem_max;
2719*4882a593Smuzhiyun
2720*4882a593Smuzhiyun extern __u32 sysctl_wmem_default;
2721*4882a593Smuzhiyun extern __u32 sysctl_rmem_default;
2722*4882a593Smuzhiyun
2723*4882a593Smuzhiyun #define SKB_FRAG_PAGE_ORDER get_order(32768)
2724*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2725*4882a593Smuzhiyun
sk_get_wmem0(const struct sock * sk,const struct proto * proto)2726*4882a593Smuzhiyun static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
2727*4882a593Smuzhiyun {
2728*4882a593Smuzhiyun /* Does this proto have per netns sysctl_wmem ? */
2729*4882a593Smuzhiyun if (proto->sysctl_wmem_offset)
2730*4882a593Smuzhiyun return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset));
2731*4882a593Smuzhiyun
2732*4882a593Smuzhiyun return READ_ONCE(*proto->sysctl_wmem);
2733*4882a593Smuzhiyun }
2734*4882a593Smuzhiyun
sk_get_rmem0(const struct sock * sk,const struct proto * proto)2735*4882a593Smuzhiyun static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
2736*4882a593Smuzhiyun {
2737*4882a593Smuzhiyun /* Does this proto have per netns sysctl_rmem ? */
2738*4882a593Smuzhiyun if (proto->sysctl_rmem_offset)
2739*4882a593Smuzhiyun return READ_ONCE(*(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset));
2740*4882a593Smuzhiyun
2741*4882a593Smuzhiyun return READ_ONCE(*proto->sysctl_rmem);
2742*4882a593Smuzhiyun }
2743*4882a593Smuzhiyun
2744*4882a593Smuzhiyun /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
2745*4882a593Smuzhiyun * Some wifi drivers need to tweak it to get more chunks.
2746*4882a593Smuzhiyun * They can use this helper from their ndo_start_xmit()
2747*4882a593Smuzhiyun */
sk_pacing_shift_update(struct sock * sk,int val)2748*4882a593Smuzhiyun static inline void sk_pacing_shift_update(struct sock *sk, int val)
2749*4882a593Smuzhiyun {
2750*4882a593Smuzhiyun if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
2751*4882a593Smuzhiyun return;
2752*4882a593Smuzhiyun WRITE_ONCE(sk->sk_pacing_shift, val);
2753*4882a593Smuzhiyun }
2754*4882a593Smuzhiyun
2755*4882a593Smuzhiyun /* if a socket is bound to a device, check that the given device
2756*4882a593Smuzhiyun * index is either the same or that the socket is bound to an L3
2757*4882a593Smuzhiyun * master device and the given device index is also enslaved to
2758*4882a593Smuzhiyun * that L3 master
2759*4882a593Smuzhiyun */
sk_dev_equal_l3scope(struct sock * sk,int dif)2760*4882a593Smuzhiyun static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
2761*4882a593Smuzhiyun {
2762*4882a593Smuzhiyun int mdif;
2763*4882a593Smuzhiyun
2764*4882a593Smuzhiyun if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
2765*4882a593Smuzhiyun return true;
2766*4882a593Smuzhiyun
2767*4882a593Smuzhiyun mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
2768*4882a593Smuzhiyun if (mdif && mdif == sk->sk_bound_dev_if)
2769*4882a593Smuzhiyun return true;
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun return false;
2772*4882a593Smuzhiyun }
2773*4882a593Smuzhiyun
2774*4882a593Smuzhiyun void sock_def_readable(struct sock *sk);
2775*4882a593Smuzhiyun
2776*4882a593Smuzhiyun int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk);
2777*4882a593Smuzhiyun void sock_enable_timestamps(struct sock *sk);
2778*4882a593Smuzhiyun void sock_no_linger(struct sock *sk);
2779*4882a593Smuzhiyun void sock_set_keepalive(struct sock *sk);
2780*4882a593Smuzhiyun void sock_set_priority(struct sock *sk, u32 priority);
2781*4882a593Smuzhiyun void sock_set_rcvbuf(struct sock *sk, int val);
2782*4882a593Smuzhiyun void sock_set_mark(struct sock *sk, u32 val);
2783*4882a593Smuzhiyun void sock_set_reuseaddr(struct sock *sk);
2784*4882a593Smuzhiyun void sock_set_reuseport(struct sock *sk);
2785*4882a593Smuzhiyun void sock_set_sndtimeo(struct sock *sk, s64 secs);
2786*4882a593Smuzhiyun
2787*4882a593Smuzhiyun int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
2788*4882a593Smuzhiyun
2789*4882a593Smuzhiyun #endif /* _SOCK_H */
2790