1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _RDS_RDS_H
3*4882a593Smuzhiyun #define _RDS_RDS_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <net/sock.h>
6*4882a593Smuzhiyun #include <linux/scatterlist.h>
7*4882a593Smuzhiyun #include <linux/highmem.h>
8*4882a593Smuzhiyun #include <rdma/rdma_cm.h>
9*4882a593Smuzhiyun #include <linux/mutex.h>
10*4882a593Smuzhiyun #include <linux/rds.h>
11*4882a593Smuzhiyun #include <linux/rhashtable.h>
12*4882a593Smuzhiyun #include <linux/refcount.h>
13*4882a593Smuzhiyun #include <linux/in6.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include "info.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * RDS Network protocol version
19*4882a593Smuzhiyun */
20*4882a593Smuzhiyun #define RDS_PROTOCOL_3_0 0x0300
21*4882a593Smuzhiyun #define RDS_PROTOCOL_3_1 0x0301
22*4882a593Smuzhiyun #define RDS_PROTOCOL_4_0 0x0400
23*4882a593Smuzhiyun #define RDS_PROTOCOL_4_1 0x0401
24*4882a593Smuzhiyun #define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
25*4882a593Smuzhiyun #define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
26*4882a593Smuzhiyun #define RDS_PROTOCOL_MINOR(v) ((v) & 255)
27*4882a593Smuzhiyun #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
28*4882a593Smuzhiyun #define RDS_PROTOCOL_COMPAT_VERSION RDS_PROTOCOL_3_1
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* The following ports, 16385, 18634, 18635, are registered with IANA as
31*4882a593Smuzhiyun * the ports to be used for RDS over TCP and UDP. Currently, only RDS over
32*4882a593Smuzhiyun * TCP and RDS over IB/RDMA are implemented. 18634 is the historical value
33*4882a593Smuzhiyun * used for the RDMA_CM listener port. RDS/TCP uses port 16385. After
34*4882a593Smuzhiyun * IPv6 work, RDMA_CM also uses 16385 as the listener port. 18634 is kept
35*4882a593Smuzhiyun * to ensure compatibility with older RDS modules. Those ports are defined
36*4882a593Smuzhiyun * in each transport's header file.
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun #define RDS_PORT 18634
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #ifdef ATOMIC64_INIT
41*4882a593Smuzhiyun #define KERNEL_HAS_ATOMIC64
42*4882a593Smuzhiyun #endif
43*4882a593Smuzhiyun #ifdef RDS_DEBUG
44*4882a593Smuzhiyun #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
45*4882a593Smuzhiyun #else
46*4882a593Smuzhiyun /* sigh, pr_debug() causes unused variable warnings */
47*4882a593Smuzhiyun static inline __printf(1, 2)
rdsdebug(char * fmt,...)48*4882a593Smuzhiyun void rdsdebug(char *fmt, ...)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define RDS_FRAG_SHIFT 12
54*4882a593Smuzhiyun #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /* Used to limit both RDMA and non-RDMA RDS message to 1MB */
57*4882a593Smuzhiyun #define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #define RDS_CONG_MAP_BYTES (65536 / 8)
60*4882a593Smuzhiyun #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
61*4882a593Smuzhiyun #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct rds_cong_map {
64*4882a593Smuzhiyun struct rb_node m_rb_node;
65*4882a593Smuzhiyun struct in6_addr m_addr;
66*4882a593Smuzhiyun wait_queue_head_t m_waitq;
67*4882a593Smuzhiyun struct list_head m_conn_list;
68*4882a593Smuzhiyun unsigned long m_page_addrs[RDS_CONG_MAP_PAGES];
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * This is how we will track the connection state:
74*4882a593Smuzhiyun * A connection is always in one of the following
75*4882a593Smuzhiyun * states. Updates to the state are atomic and imply
76*4882a593Smuzhiyun * a memory barrier.
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun enum {
79*4882a593Smuzhiyun RDS_CONN_DOWN = 0,
80*4882a593Smuzhiyun RDS_CONN_CONNECTING,
81*4882a593Smuzhiyun RDS_CONN_DISCONNECTING,
82*4882a593Smuzhiyun RDS_CONN_UP,
83*4882a593Smuzhiyun RDS_CONN_RESETTING,
84*4882a593Smuzhiyun RDS_CONN_ERROR,
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* Bits for c_flags */
88*4882a593Smuzhiyun #define RDS_LL_SEND_FULL 0
89*4882a593Smuzhiyun #define RDS_RECONNECT_PENDING 1
90*4882a593Smuzhiyun #define RDS_IN_XMIT 2
91*4882a593Smuzhiyun #define RDS_RECV_REFILL 3
92*4882a593Smuzhiyun #define RDS_DESTROY_PENDING 4
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* Max number of multipaths per RDS connection. Must be a power of 2 */
95*4882a593Smuzhiyun #define RDS_MPATH_WORKERS 8
96*4882a593Smuzhiyun #define RDS_MPATH_HASH(rs, n) (jhash_1word((rs)->rs_bound_port, \
97*4882a593Smuzhiyun (rs)->rs_hash_initval) & ((n) - 1))
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #define IS_CANONICAL(laddr, faddr) (htonl(laddr) < htonl(faddr))
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* Per mpath connection state */
102*4882a593Smuzhiyun struct rds_conn_path {
103*4882a593Smuzhiyun struct rds_connection *cp_conn;
104*4882a593Smuzhiyun struct rds_message *cp_xmit_rm;
105*4882a593Smuzhiyun unsigned long cp_xmit_sg;
106*4882a593Smuzhiyun unsigned int cp_xmit_hdr_off;
107*4882a593Smuzhiyun unsigned int cp_xmit_data_off;
108*4882a593Smuzhiyun unsigned int cp_xmit_atomic_sent;
109*4882a593Smuzhiyun unsigned int cp_xmit_rdma_sent;
110*4882a593Smuzhiyun unsigned int cp_xmit_data_sent;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun spinlock_t cp_lock; /* protect msg queues */
113*4882a593Smuzhiyun u64 cp_next_tx_seq;
114*4882a593Smuzhiyun struct list_head cp_send_queue;
115*4882a593Smuzhiyun struct list_head cp_retrans;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun u64 cp_next_rx_seq;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun void *cp_transport_data;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun atomic_t cp_state;
122*4882a593Smuzhiyun unsigned long cp_send_gen;
123*4882a593Smuzhiyun unsigned long cp_flags;
124*4882a593Smuzhiyun unsigned long cp_reconnect_jiffies;
125*4882a593Smuzhiyun struct delayed_work cp_send_w;
126*4882a593Smuzhiyun struct delayed_work cp_recv_w;
127*4882a593Smuzhiyun struct delayed_work cp_conn_w;
128*4882a593Smuzhiyun struct work_struct cp_down_w;
129*4882a593Smuzhiyun struct mutex cp_cm_lock; /* protect cp_state & cm */
130*4882a593Smuzhiyun wait_queue_head_t cp_waitq;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun unsigned int cp_unacked_packets;
133*4882a593Smuzhiyun unsigned int cp_unacked_bytes;
134*4882a593Smuzhiyun unsigned int cp_index;
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* One rds_connection per RDS address pair */
138*4882a593Smuzhiyun struct rds_connection {
139*4882a593Smuzhiyun struct hlist_node c_hash_node;
140*4882a593Smuzhiyun struct in6_addr c_laddr;
141*4882a593Smuzhiyun struct in6_addr c_faddr;
142*4882a593Smuzhiyun int c_dev_if; /* ifindex used for this conn */
143*4882a593Smuzhiyun int c_bound_if; /* ifindex of c_laddr */
144*4882a593Smuzhiyun unsigned int c_loopback:1,
145*4882a593Smuzhiyun c_isv6:1,
146*4882a593Smuzhiyun c_ping_triggered:1,
147*4882a593Smuzhiyun c_pad_to_32:29;
148*4882a593Smuzhiyun int c_npaths;
149*4882a593Smuzhiyun struct rds_connection *c_passive;
150*4882a593Smuzhiyun struct rds_transport *c_trans;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun struct rds_cong_map *c_lcong;
153*4882a593Smuzhiyun struct rds_cong_map *c_fcong;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* Protocol version */
156*4882a593Smuzhiyun unsigned int c_proposed_version;
157*4882a593Smuzhiyun unsigned int c_version;
158*4882a593Smuzhiyun possible_net_t c_net;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* TOS */
161*4882a593Smuzhiyun u8 c_tos;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun struct list_head c_map_item;
164*4882a593Smuzhiyun unsigned long c_map_queued;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun struct rds_conn_path *c_path;
167*4882a593Smuzhiyun wait_queue_head_t c_hs_waitq; /* handshake waitq */
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun u32 c_my_gen_num;
170*4882a593Smuzhiyun u32 c_peer_gen_num;
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun static inline
rds_conn_net(struct rds_connection * conn)174*4882a593Smuzhiyun struct net *rds_conn_net(struct rds_connection *conn)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun return read_pnet(&conn->c_net);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun static inline
rds_conn_net_set(struct rds_connection * conn,struct net * net)180*4882a593Smuzhiyun void rds_conn_net_set(struct rds_connection *conn, struct net *net)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun write_pnet(&conn->c_net, net);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #define RDS_FLAG_CONG_BITMAP 0x01
186*4882a593Smuzhiyun #define RDS_FLAG_ACK_REQUIRED 0x02
187*4882a593Smuzhiyun #define RDS_FLAG_RETRANSMITTED 0x04
188*4882a593Smuzhiyun #define RDS_MAX_ADV_CREDIT 255
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* RDS_FLAG_PROBE_PORT is the reserved sport used for sending a ping
191*4882a593Smuzhiyun * probe to exchange control information before establishing a connection.
192*4882a593Smuzhiyun * Currently the control information that is exchanged is the number of
193*4882a593Smuzhiyun * supported paths. If the peer is a legacy (older kernel revision) peer,
194*4882a593Smuzhiyun * it would return a pong message without additional control information
195*4882a593Smuzhiyun * that would then alert the sender that the peer was an older rev.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun #define RDS_FLAG_PROBE_PORT 1
198*4882a593Smuzhiyun #define RDS_HS_PROBE(sport, dport) \
199*4882a593Smuzhiyun ((sport == RDS_FLAG_PROBE_PORT && dport == 0) || \
200*4882a593Smuzhiyun (sport == 0 && dport == RDS_FLAG_PROBE_PORT))
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * Maximum space available for extension headers.
203*4882a593Smuzhiyun */
204*4882a593Smuzhiyun #define RDS_HEADER_EXT_SPACE 16
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun struct rds_header {
207*4882a593Smuzhiyun __be64 h_sequence;
208*4882a593Smuzhiyun __be64 h_ack;
209*4882a593Smuzhiyun __be32 h_len;
210*4882a593Smuzhiyun __be16 h_sport;
211*4882a593Smuzhiyun __be16 h_dport;
212*4882a593Smuzhiyun u8 h_flags;
213*4882a593Smuzhiyun u8 h_credit;
214*4882a593Smuzhiyun u8 h_padding[4];
215*4882a593Smuzhiyun __sum16 h_csum;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun u8 h_exthdr[RDS_HEADER_EXT_SPACE];
218*4882a593Smuzhiyun };
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * Reserved - indicates end of extensions
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun #define RDS_EXTHDR_NONE 0
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun * This extension header is included in the very
227*4882a593Smuzhiyun * first message that is sent on a new connection,
228*4882a593Smuzhiyun * and identifies the protocol level. This will help
229*4882a593Smuzhiyun * rolling updates if a future change requires breaking
230*4882a593Smuzhiyun * the protocol.
231*4882a593Smuzhiyun * NB: This is no longer true for IB, where we do a version
232*4882a593Smuzhiyun * negotiation during the connection setup phase (protocol
233*4882a593Smuzhiyun * version information is included in the RDMA CM private data).
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun #define RDS_EXTHDR_VERSION 1
236*4882a593Smuzhiyun struct rds_ext_header_version {
237*4882a593Smuzhiyun __be32 h_version;
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun * This extension header is included in the RDS message
242*4882a593Smuzhiyun * chasing an RDMA operation.
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun #define RDS_EXTHDR_RDMA 2
245*4882a593Smuzhiyun struct rds_ext_header_rdma {
246*4882a593Smuzhiyun __be32 h_rdma_rkey;
247*4882a593Smuzhiyun };
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun * This extension header tells the peer about the
251*4882a593Smuzhiyun * destination <R_Key,offset> of the requested RDMA
252*4882a593Smuzhiyun * operation.
253*4882a593Smuzhiyun */
254*4882a593Smuzhiyun #define RDS_EXTHDR_RDMA_DEST 3
255*4882a593Smuzhiyun struct rds_ext_header_rdma_dest {
256*4882a593Smuzhiyun __be32 h_rdma_rkey;
257*4882a593Smuzhiyun __be32 h_rdma_offset;
258*4882a593Smuzhiyun };
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* Extension header announcing number of paths.
261*4882a593Smuzhiyun * Implicit length = 2 bytes.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun #define RDS_EXTHDR_NPATHS 5
264*4882a593Smuzhiyun #define RDS_EXTHDR_GEN_NUM 6
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun #define __RDS_EXTHDR_MAX 16 /* for now */
267*4882a593Smuzhiyun #define RDS_RX_MAX_TRACES (RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
268*4882a593Smuzhiyun #define RDS_MSG_RX_HDR 0
269*4882a593Smuzhiyun #define RDS_MSG_RX_START 1
270*4882a593Smuzhiyun #define RDS_MSG_RX_END 2
271*4882a593Smuzhiyun #define RDS_MSG_RX_CMSG 3
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* The following values are whitelisted for usercopy */
274*4882a593Smuzhiyun struct rds_inc_usercopy {
275*4882a593Smuzhiyun rds_rdma_cookie_t rdma_cookie;
276*4882a593Smuzhiyun ktime_t rx_tstamp;
277*4882a593Smuzhiyun };
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun struct rds_incoming {
280*4882a593Smuzhiyun refcount_t i_refcount;
281*4882a593Smuzhiyun struct list_head i_item;
282*4882a593Smuzhiyun struct rds_connection *i_conn;
283*4882a593Smuzhiyun struct rds_conn_path *i_conn_path;
284*4882a593Smuzhiyun struct rds_header i_hdr;
285*4882a593Smuzhiyun unsigned long i_rx_jiffies;
286*4882a593Smuzhiyun struct in6_addr i_saddr;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun struct rds_inc_usercopy i_usercopy;
289*4882a593Smuzhiyun u64 i_rx_lat_trace[RDS_RX_MAX_TRACES];
290*4882a593Smuzhiyun };
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun struct rds_mr {
293*4882a593Smuzhiyun struct rb_node r_rb_node;
294*4882a593Smuzhiyun struct kref r_kref;
295*4882a593Smuzhiyun u32 r_key;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* A copy of the creation flags */
298*4882a593Smuzhiyun unsigned int r_use_once:1;
299*4882a593Smuzhiyun unsigned int r_invalidate:1;
300*4882a593Smuzhiyun unsigned int r_write:1;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun struct rds_sock *r_sock; /* back pointer to the socket that owns us */
303*4882a593Smuzhiyun struct rds_transport *r_trans;
304*4882a593Smuzhiyun void *r_trans_private;
305*4882a593Smuzhiyun };
306*4882a593Smuzhiyun
rds_rdma_make_cookie(u32 r_key,u32 offset)307*4882a593Smuzhiyun static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun return r_key | (((u64) offset) << 32);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
rds_rdma_cookie_key(rds_rdma_cookie_t cookie)312*4882a593Smuzhiyun static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun return cookie;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)317*4882a593Smuzhiyun static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun return cookie >> 32;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* atomic operation types */
323*4882a593Smuzhiyun #define RDS_ATOMIC_TYPE_CSWP 0
324*4882a593Smuzhiyun #define RDS_ATOMIC_TYPE_FADD 1
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * m_sock_item and m_conn_item are on lists that are serialized under
328*4882a593Smuzhiyun * conn->c_lock. m_sock_item has additional meaning in that once it is empty
329*4882a593Smuzhiyun * the message will not be put back on the retransmit list after being sent.
330*4882a593Smuzhiyun * messages that are canceled while being sent rely on this.
331*4882a593Smuzhiyun *
332*4882a593Smuzhiyun * m_inc is used by loopback so that it can pass an incoming message straight
333*4882a593Smuzhiyun * back up into the rx path. It embeds a wire header which is also used by
334*4882a593Smuzhiyun * the send path, which is kind of awkward.
335*4882a593Smuzhiyun *
336*4882a593Smuzhiyun * m_sock_item indicates the message's presence on a socket's send or receive
337*4882a593Smuzhiyun * queue. m_rs will point to that socket.
338*4882a593Smuzhiyun *
339*4882a593Smuzhiyun * m_daddr is used by cancellation to prune messages to a given destination.
340*4882a593Smuzhiyun *
341*4882a593Smuzhiyun * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
342*4882a593Smuzhiyun * nesting. As paths iterate over messages on a sock, or conn, they must
343*4882a593Smuzhiyun * also lock the conn, or sock, to remove the message from those lists too.
344*4882a593Smuzhiyun * Testing the flag to determine if the message is still on the lists lets
345*4882a593Smuzhiyun * us avoid testing the list_head directly. That means each path can use
346*4882a593Smuzhiyun * the message's list_head to keep it on a local list while juggling locks
347*4882a593Smuzhiyun * without confusing the other path.
348*4882a593Smuzhiyun *
349*4882a593Smuzhiyun * m_ack_seq is an optional field set by transports who need a different
350*4882a593Smuzhiyun * sequence number range to invalidate. They can use this in a callback
351*4882a593Smuzhiyun * that they pass to rds_send_drop_acked() to see if each message has been
352*4882a593Smuzhiyun * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
353*4882a593Smuzhiyun * had ack_seq set yet.
354*4882a593Smuzhiyun */
355*4882a593Smuzhiyun #define RDS_MSG_ON_SOCK 1
356*4882a593Smuzhiyun #define RDS_MSG_ON_CONN 2
357*4882a593Smuzhiyun #define RDS_MSG_HAS_ACK_SEQ 3
358*4882a593Smuzhiyun #define RDS_MSG_ACK_REQUIRED 4
359*4882a593Smuzhiyun #define RDS_MSG_RETRANSMITTED 5
360*4882a593Smuzhiyun #define RDS_MSG_MAPPED 6
361*4882a593Smuzhiyun #define RDS_MSG_PAGEVEC 7
362*4882a593Smuzhiyun #define RDS_MSG_FLUSH 8
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun struct rds_znotifier {
365*4882a593Smuzhiyun struct mmpin z_mmp;
366*4882a593Smuzhiyun u32 z_cookie;
367*4882a593Smuzhiyun };
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun struct rds_msg_zcopy_info {
370*4882a593Smuzhiyun struct list_head rs_zcookie_next;
371*4882a593Smuzhiyun union {
372*4882a593Smuzhiyun struct rds_znotifier znotif;
373*4882a593Smuzhiyun struct rds_zcopy_cookies zcookies;
374*4882a593Smuzhiyun };
375*4882a593Smuzhiyun };
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun struct rds_msg_zcopy_queue {
378*4882a593Smuzhiyun struct list_head zcookie_head;
379*4882a593Smuzhiyun spinlock_t lock; /* protects zcookie_head queue */
380*4882a593Smuzhiyun };
381*4882a593Smuzhiyun
rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue * q)382*4882a593Smuzhiyun static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun spin_lock_init(&q->lock);
385*4882a593Smuzhiyun INIT_LIST_HEAD(&q->zcookie_head);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun struct rds_iov_vector {
389*4882a593Smuzhiyun struct rds_iovec *iov;
390*4882a593Smuzhiyun int len;
391*4882a593Smuzhiyun };
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun struct rds_iov_vector_arr {
394*4882a593Smuzhiyun struct rds_iov_vector *vec;
395*4882a593Smuzhiyun int len;
396*4882a593Smuzhiyun int indx;
397*4882a593Smuzhiyun int incr;
398*4882a593Smuzhiyun };
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun struct rds_message {
401*4882a593Smuzhiyun refcount_t m_refcount;
402*4882a593Smuzhiyun struct list_head m_sock_item;
403*4882a593Smuzhiyun struct list_head m_conn_item;
404*4882a593Smuzhiyun struct rds_incoming m_inc;
405*4882a593Smuzhiyun u64 m_ack_seq;
406*4882a593Smuzhiyun struct in6_addr m_daddr;
407*4882a593Smuzhiyun unsigned long m_flags;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* Never access m_rs without holding m_rs_lock.
410*4882a593Smuzhiyun * Lock nesting is
411*4882a593Smuzhiyun * rm->m_rs_lock
412*4882a593Smuzhiyun * -> rs->rs_lock
413*4882a593Smuzhiyun */
414*4882a593Smuzhiyun spinlock_t m_rs_lock;
415*4882a593Smuzhiyun wait_queue_head_t m_flush_wait;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun struct rds_sock *m_rs;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* cookie to send to remote, in rds header */
420*4882a593Smuzhiyun rds_rdma_cookie_t m_rdma_cookie;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun unsigned int m_used_sgs;
423*4882a593Smuzhiyun unsigned int m_total_sgs;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun void *m_final_op;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun struct {
428*4882a593Smuzhiyun struct rm_atomic_op {
429*4882a593Smuzhiyun int op_type;
430*4882a593Smuzhiyun union {
431*4882a593Smuzhiyun struct {
432*4882a593Smuzhiyun uint64_t compare;
433*4882a593Smuzhiyun uint64_t swap;
434*4882a593Smuzhiyun uint64_t compare_mask;
435*4882a593Smuzhiyun uint64_t swap_mask;
436*4882a593Smuzhiyun } op_m_cswp;
437*4882a593Smuzhiyun struct {
438*4882a593Smuzhiyun uint64_t add;
439*4882a593Smuzhiyun uint64_t nocarry_mask;
440*4882a593Smuzhiyun } op_m_fadd;
441*4882a593Smuzhiyun };
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun u32 op_rkey;
444*4882a593Smuzhiyun u64 op_remote_addr;
445*4882a593Smuzhiyun unsigned int op_notify:1;
446*4882a593Smuzhiyun unsigned int op_recverr:1;
447*4882a593Smuzhiyun unsigned int op_mapped:1;
448*4882a593Smuzhiyun unsigned int op_silent:1;
449*4882a593Smuzhiyun unsigned int op_active:1;
450*4882a593Smuzhiyun struct scatterlist *op_sg;
451*4882a593Smuzhiyun struct rds_notifier *op_notifier;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun struct rds_mr *op_rdma_mr;
454*4882a593Smuzhiyun } atomic;
455*4882a593Smuzhiyun struct rm_rdma_op {
456*4882a593Smuzhiyun u32 op_rkey;
457*4882a593Smuzhiyun u64 op_remote_addr;
458*4882a593Smuzhiyun unsigned int op_write:1;
459*4882a593Smuzhiyun unsigned int op_fence:1;
460*4882a593Smuzhiyun unsigned int op_notify:1;
461*4882a593Smuzhiyun unsigned int op_recverr:1;
462*4882a593Smuzhiyun unsigned int op_mapped:1;
463*4882a593Smuzhiyun unsigned int op_silent:1;
464*4882a593Smuzhiyun unsigned int op_active:1;
465*4882a593Smuzhiyun unsigned int op_bytes;
466*4882a593Smuzhiyun unsigned int op_nents;
467*4882a593Smuzhiyun unsigned int op_count;
468*4882a593Smuzhiyun struct scatterlist *op_sg;
469*4882a593Smuzhiyun struct rds_notifier *op_notifier;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun struct rds_mr *op_rdma_mr;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun u64 op_odp_addr;
474*4882a593Smuzhiyun struct rds_mr *op_odp_mr;
475*4882a593Smuzhiyun } rdma;
476*4882a593Smuzhiyun struct rm_data_op {
477*4882a593Smuzhiyun unsigned int op_active:1;
478*4882a593Smuzhiyun unsigned int op_nents;
479*4882a593Smuzhiyun unsigned int op_count;
480*4882a593Smuzhiyun unsigned int op_dmasg;
481*4882a593Smuzhiyun unsigned int op_dmaoff;
482*4882a593Smuzhiyun struct rds_znotifier *op_mmp_znotifier;
483*4882a593Smuzhiyun struct scatterlist *op_sg;
484*4882a593Smuzhiyun } data;
485*4882a593Smuzhiyun };
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun struct rds_conn_path *m_conn_path;
488*4882a593Smuzhiyun };
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /*
491*4882a593Smuzhiyun * The RDS notifier is used (optionally) to tell the application about
492*4882a593Smuzhiyun * completed RDMA operations. Rather than keeping the whole rds message
493*4882a593Smuzhiyun * around on the queue, we allocate a small notifier that is put on the
494*4882a593Smuzhiyun * socket's notifier_list. Notifications are delivered to the application
495*4882a593Smuzhiyun * through control messages.
496*4882a593Smuzhiyun */
497*4882a593Smuzhiyun struct rds_notifier {
498*4882a593Smuzhiyun struct list_head n_list;
499*4882a593Smuzhiyun uint64_t n_user_token;
500*4882a593Smuzhiyun int n_status;
501*4882a593Smuzhiyun };
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /* Available as part of RDS core, so doesn't need to participate
504*4882a593Smuzhiyun * in get_preferred transport etc
505*4882a593Smuzhiyun */
506*4882a593Smuzhiyun #define RDS_TRANS_LOOP 3
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /**
509*4882a593Smuzhiyun * struct rds_transport - transport specific behavioural hooks
510*4882a593Smuzhiyun *
511*4882a593Smuzhiyun * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
512*4882a593Smuzhiyun * part of a message. The caller serializes on the send_sem so this
513*4882a593Smuzhiyun * doesn't need to be reentrant for a given conn. The header must be
514*4882a593Smuzhiyun * sent before the data payload. .xmit must be prepared to send a
515*4882a593Smuzhiyun * message with no data payload. .xmit should return the number of
516*4882a593Smuzhiyun * bytes that were sent down the connection, including header bytes.
517*4882a593Smuzhiyun * Returning 0 tells the caller that it doesn't need to perform any
518*4882a593Smuzhiyun * additional work now. This is usually the case when the transport has
519*4882a593Smuzhiyun * filled the sending queue for its connection and will handle
520*4882a593Smuzhiyun * triggering the rds thread to continue the send when space becomes
521*4882a593Smuzhiyun * available. Returning -EAGAIN tells the caller to retry the send
522*4882a593Smuzhiyun * immediately. Returning -ENOMEM tells the caller to retry the send at
523*4882a593Smuzhiyun * some point in the future.
524*4882a593Smuzhiyun *
525*4882a593Smuzhiyun * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
526*4882a593Smuzhiyun * it returns the connection can not call rds_recv_incoming().
527*4882a593Smuzhiyun * This will only be called once after conn_connect returns
528*4882a593Smuzhiyun * non-zero success and will The caller serializes this with
529*4882a593Smuzhiyun * the send and connecting paths (xmit_* and conn_*). The
530*4882a593Smuzhiyun * transport is responsible for other serialization, including
531*4882a593Smuzhiyun * rds_recv_incoming(). This is called in process context but
532*4882a593Smuzhiyun * should try hard not to block.
533*4882a593Smuzhiyun */
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun struct rds_transport {
536*4882a593Smuzhiyun char t_name[TRANSNAMSIZ];
537*4882a593Smuzhiyun struct list_head t_item;
538*4882a593Smuzhiyun struct module *t_owner;
539*4882a593Smuzhiyun unsigned int t_prefer_loopback:1,
540*4882a593Smuzhiyun t_mp_capable:1;
541*4882a593Smuzhiyun unsigned int t_type;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun int (*laddr_check)(struct net *net, const struct in6_addr *addr,
544*4882a593Smuzhiyun __u32 scope_id);
545*4882a593Smuzhiyun int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
546*4882a593Smuzhiyun void (*conn_free)(void *data);
547*4882a593Smuzhiyun int (*conn_path_connect)(struct rds_conn_path *cp);
548*4882a593Smuzhiyun void (*conn_path_shutdown)(struct rds_conn_path *conn);
549*4882a593Smuzhiyun void (*xmit_path_prepare)(struct rds_conn_path *cp);
550*4882a593Smuzhiyun void (*xmit_path_complete)(struct rds_conn_path *cp);
551*4882a593Smuzhiyun int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
552*4882a593Smuzhiyun unsigned int hdr_off, unsigned int sg, unsigned int off);
553*4882a593Smuzhiyun int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
554*4882a593Smuzhiyun int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
555*4882a593Smuzhiyun int (*recv_path)(struct rds_conn_path *cp);
556*4882a593Smuzhiyun int (*inc_copy_to_user)(struct rds_incoming *inc, struct iov_iter *to);
557*4882a593Smuzhiyun void (*inc_free)(struct rds_incoming *inc);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
560*4882a593Smuzhiyun struct rdma_cm_event *event, bool isv6);
561*4882a593Smuzhiyun int (*cm_initiate_connect)(struct rdma_cm_id *cm_id, bool isv6);
562*4882a593Smuzhiyun void (*cm_connect_complete)(struct rds_connection *conn,
563*4882a593Smuzhiyun struct rdma_cm_event *event);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun unsigned int (*stats_info_copy)(struct rds_info_iterator *iter,
566*4882a593Smuzhiyun unsigned int avail);
567*4882a593Smuzhiyun void (*exit)(void);
568*4882a593Smuzhiyun void *(*get_mr)(struct scatterlist *sg, unsigned long nr_sg,
569*4882a593Smuzhiyun struct rds_sock *rs, u32 *key_ret,
570*4882a593Smuzhiyun struct rds_connection *conn,
571*4882a593Smuzhiyun u64 start, u64 length, int need_odp);
572*4882a593Smuzhiyun void (*sync_mr)(void *trans_private, int direction);
573*4882a593Smuzhiyun void (*free_mr)(void *trans_private, int invalidate);
574*4882a593Smuzhiyun void (*flush_mrs)(void);
575*4882a593Smuzhiyun bool (*t_unloading)(struct rds_connection *conn);
576*4882a593Smuzhiyun u8 (*get_tos_map)(u8 tos);
577*4882a593Smuzhiyun };
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /* Bind hash table key length. It is the sum of the size of a struct
580*4882a593Smuzhiyun * in6_addr, a scope_id and a port.
581*4882a593Smuzhiyun */
582*4882a593Smuzhiyun #define RDS_BOUND_KEY_LEN \
583*4882a593Smuzhiyun (sizeof(struct in6_addr) + sizeof(__u32) + sizeof(__be16))
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun struct rds_sock {
586*4882a593Smuzhiyun struct sock rs_sk;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun u64 rs_user_addr;
589*4882a593Smuzhiyun u64 rs_user_bytes;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /*
592*4882a593Smuzhiyun * bound_addr used for both incoming and outgoing, no INADDR_ANY
593*4882a593Smuzhiyun * support.
594*4882a593Smuzhiyun */
595*4882a593Smuzhiyun struct rhash_head rs_bound_node;
596*4882a593Smuzhiyun u8 rs_bound_key[RDS_BOUND_KEY_LEN];
597*4882a593Smuzhiyun struct sockaddr_in6 rs_bound_sin6;
598*4882a593Smuzhiyun #define rs_bound_addr rs_bound_sin6.sin6_addr
599*4882a593Smuzhiyun #define rs_bound_addr_v4 rs_bound_sin6.sin6_addr.s6_addr32[3]
600*4882a593Smuzhiyun #define rs_bound_port rs_bound_sin6.sin6_port
601*4882a593Smuzhiyun #define rs_bound_scope_id rs_bound_sin6.sin6_scope_id
602*4882a593Smuzhiyun struct in6_addr rs_conn_addr;
603*4882a593Smuzhiyun #define rs_conn_addr_v4 rs_conn_addr.s6_addr32[3]
604*4882a593Smuzhiyun __be16 rs_conn_port;
605*4882a593Smuzhiyun struct rds_transport *rs_transport;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /*
608*4882a593Smuzhiyun * rds_sendmsg caches the conn it used the last time around.
609*4882a593Smuzhiyun * This helps avoid costly lookups.
610*4882a593Smuzhiyun */
611*4882a593Smuzhiyun struct rds_connection *rs_conn;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /* flag indicating we were congested or not */
614*4882a593Smuzhiyun int rs_congested;
615*4882a593Smuzhiyun /* seen congestion (ENOBUFS) when sending? */
616*4882a593Smuzhiyun int rs_seen_congestion;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* rs_lock protects all these adjacent members before the newline */
619*4882a593Smuzhiyun spinlock_t rs_lock;
620*4882a593Smuzhiyun struct list_head rs_send_queue;
621*4882a593Smuzhiyun u32 rs_snd_bytes;
622*4882a593Smuzhiyun int rs_rcv_bytes;
623*4882a593Smuzhiyun struct list_head rs_notify_queue; /* currently used for failed RDMAs */
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
626*4882a593Smuzhiyun * to decide whether the application should be woken up.
627*4882a593Smuzhiyun * If not set, we use rs_cong_track to find out whether a cong map
628*4882a593Smuzhiyun * update arrived.
629*4882a593Smuzhiyun */
630*4882a593Smuzhiyun uint64_t rs_cong_mask;
631*4882a593Smuzhiyun uint64_t rs_cong_notify;
632*4882a593Smuzhiyun struct list_head rs_cong_list;
633*4882a593Smuzhiyun unsigned long rs_cong_track;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun /*
636*4882a593Smuzhiyun * rs_recv_lock protects the receive queue, and is
637*4882a593Smuzhiyun * used to serialize with rds_release.
638*4882a593Smuzhiyun */
639*4882a593Smuzhiyun rwlock_t rs_recv_lock;
640*4882a593Smuzhiyun struct list_head rs_recv_queue;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /* just for stats reporting */
643*4882a593Smuzhiyun struct list_head rs_item;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun /* these have their own lock */
646*4882a593Smuzhiyun spinlock_t rs_rdma_lock;
647*4882a593Smuzhiyun struct rb_root rs_rdma_keys;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /* Socket options - in case there will be more */
650*4882a593Smuzhiyun unsigned char rs_recverr,
651*4882a593Smuzhiyun rs_cong_monitor;
652*4882a593Smuzhiyun u32 rs_hash_initval;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /* Socket receive path trace points*/
655*4882a593Smuzhiyun u8 rs_rx_traces;
656*4882a593Smuzhiyun u8 rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
657*4882a593Smuzhiyun struct rds_msg_zcopy_queue rs_zcookie_queue;
658*4882a593Smuzhiyun u8 rs_tos;
659*4882a593Smuzhiyun };
660*4882a593Smuzhiyun
rds_sk_to_rs(const struct sock * sk)661*4882a593Smuzhiyun static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun return container_of(sk, struct rds_sock, rs_sk);
664*4882a593Smuzhiyun }
rds_rs_to_sk(struct rds_sock * rs)665*4882a593Smuzhiyun static inline struct sock *rds_rs_to_sk(struct rds_sock *rs)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun return &rs->rs_sk;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /*
671*4882a593Smuzhiyun * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
672*4882a593Smuzhiyun * to account for overhead. We don't account for overhead, we just apply
673*4882a593Smuzhiyun * the number of payload bytes to the specified value.
674*4882a593Smuzhiyun */
rds_sk_sndbuf(struct rds_sock * rs)675*4882a593Smuzhiyun static inline int rds_sk_sndbuf(struct rds_sock *rs)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun return rds_rs_to_sk(rs)->sk_sndbuf / 2;
678*4882a593Smuzhiyun }
rds_sk_rcvbuf(struct rds_sock * rs)679*4882a593Smuzhiyun static inline int rds_sk_rcvbuf(struct rds_sock *rs)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun return rds_rs_to_sk(rs)->sk_rcvbuf / 2;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun struct rds_statistics {
685*4882a593Smuzhiyun uint64_t s_conn_reset;
686*4882a593Smuzhiyun uint64_t s_recv_drop_bad_checksum;
687*4882a593Smuzhiyun uint64_t s_recv_drop_old_seq;
688*4882a593Smuzhiyun uint64_t s_recv_drop_no_sock;
689*4882a593Smuzhiyun uint64_t s_recv_drop_dead_sock;
690*4882a593Smuzhiyun uint64_t s_recv_deliver_raced;
691*4882a593Smuzhiyun uint64_t s_recv_delivered;
692*4882a593Smuzhiyun uint64_t s_recv_queued;
693*4882a593Smuzhiyun uint64_t s_recv_immediate_retry;
694*4882a593Smuzhiyun uint64_t s_recv_delayed_retry;
695*4882a593Smuzhiyun uint64_t s_recv_ack_required;
696*4882a593Smuzhiyun uint64_t s_recv_rdma_bytes;
697*4882a593Smuzhiyun uint64_t s_recv_ping;
698*4882a593Smuzhiyun uint64_t s_send_queue_empty;
699*4882a593Smuzhiyun uint64_t s_send_queue_full;
700*4882a593Smuzhiyun uint64_t s_send_lock_contention;
701*4882a593Smuzhiyun uint64_t s_send_lock_queue_raced;
702*4882a593Smuzhiyun uint64_t s_send_immediate_retry;
703*4882a593Smuzhiyun uint64_t s_send_delayed_retry;
704*4882a593Smuzhiyun uint64_t s_send_drop_acked;
705*4882a593Smuzhiyun uint64_t s_send_ack_required;
706*4882a593Smuzhiyun uint64_t s_send_queued;
707*4882a593Smuzhiyun uint64_t s_send_rdma;
708*4882a593Smuzhiyun uint64_t s_send_rdma_bytes;
709*4882a593Smuzhiyun uint64_t s_send_pong;
710*4882a593Smuzhiyun uint64_t s_page_remainder_hit;
711*4882a593Smuzhiyun uint64_t s_page_remainder_miss;
712*4882a593Smuzhiyun uint64_t s_copy_to_user;
713*4882a593Smuzhiyun uint64_t s_copy_from_user;
714*4882a593Smuzhiyun uint64_t s_cong_update_queued;
715*4882a593Smuzhiyun uint64_t s_cong_update_received;
716*4882a593Smuzhiyun uint64_t s_cong_send_error;
717*4882a593Smuzhiyun uint64_t s_cong_send_blocked;
718*4882a593Smuzhiyun uint64_t s_recv_bytes_added_to_socket;
719*4882a593Smuzhiyun uint64_t s_recv_bytes_removed_from_socket;
720*4882a593Smuzhiyun uint64_t s_send_stuck_rm;
721*4882a593Smuzhiyun };
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun /* af_rds.c */
724*4882a593Smuzhiyun void rds_sock_addref(struct rds_sock *rs);
725*4882a593Smuzhiyun void rds_sock_put(struct rds_sock *rs);
726*4882a593Smuzhiyun void rds_wake_sk_sleep(struct rds_sock *rs);
__rds_wake_sk_sleep(struct sock * sk)727*4882a593Smuzhiyun static inline void __rds_wake_sk_sleep(struct sock *sk)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun wait_queue_head_t *waitq = sk_sleep(sk);
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun if (!sock_flag(sk, SOCK_DEAD) && waitq)
732*4882a593Smuzhiyun wake_up(waitq);
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun extern wait_queue_head_t rds_poll_waitq;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /* bind.c */
738*4882a593Smuzhiyun int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
739*4882a593Smuzhiyun void rds_remove_bound(struct rds_sock *rs);
740*4882a593Smuzhiyun struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
741*4882a593Smuzhiyun __u32 scope_id);
742*4882a593Smuzhiyun int rds_bind_lock_init(void);
743*4882a593Smuzhiyun void rds_bind_lock_destroy(void);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun /* cong.c */
746*4882a593Smuzhiyun int rds_cong_get_maps(struct rds_connection *conn);
747*4882a593Smuzhiyun void rds_cong_add_conn(struct rds_connection *conn);
748*4882a593Smuzhiyun void rds_cong_remove_conn(struct rds_connection *conn);
749*4882a593Smuzhiyun void rds_cong_set_bit(struct rds_cong_map *map, __be16 port);
750*4882a593Smuzhiyun void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port);
751*4882a593Smuzhiyun int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs);
752*4882a593Smuzhiyun void rds_cong_queue_updates(struct rds_cong_map *map);
753*4882a593Smuzhiyun void rds_cong_map_updated(struct rds_cong_map *map, uint64_t);
754*4882a593Smuzhiyun int rds_cong_updated_since(unsigned long *recent);
755*4882a593Smuzhiyun void rds_cong_add_socket(struct rds_sock *);
756*4882a593Smuzhiyun void rds_cong_remove_socket(struct rds_sock *);
757*4882a593Smuzhiyun void rds_cong_exit(void);
758*4882a593Smuzhiyun struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /* connection.c */
761*4882a593Smuzhiyun extern u32 rds_gen_num;
762*4882a593Smuzhiyun int rds_conn_init(void);
763*4882a593Smuzhiyun void rds_conn_exit(void);
764*4882a593Smuzhiyun struct rds_connection *rds_conn_create(struct net *net,
765*4882a593Smuzhiyun const struct in6_addr *laddr,
766*4882a593Smuzhiyun const struct in6_addr *faddr,
767*4882a593Smuzhiyun struct rds_transport *trans,
768*4882a593Smuzhiyun u8 tos, gfp_t gfp,
769*4882a593Smuzhiyun int dev_if);
770*4882a593Smuzhiyun struct rds_connection *rds_conn_create_outgoing(struct net *net,
771*4882a593Smuzhiyun const struct in6_addr *laddr,
772*4882a593Smuzhiyun const struct in6_addr *faddr,
773*4882a593Smuzhiyun struct rds_transport *trans,
774*4882a593Smuzhiyun u8 tos, gfp_t gfp, int dev_if);
775*4882a593Smuzhiyun void rds_conn_shutdown(struct rds_conn_path *cpath);
776*4882a593Smuzhiyun void rds_conn_destroy(struct rds_connection *conn);
777*4882a593Smuzhiyun void rds_conn_drop(struct rds_connection *conn);
778*4882a593Smuzhiyun void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
779*4882a593Smuzhiyun void rds_conn_connect_if_down(struct rds_connection *conn);
780*4882a593Smuzhiyun void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
781*4882a593Smuzhiyun void rds_check_all_paths(struct rds_connection *conn);
782*4882a593Smuzhiyun void rds_for_each_conn_info(struct socket *sock, unsigned int len,
783*4882a593Smuzhiyun struct rds_info_iterator *iter,
784*4882a593Smuzhiyun struct rds_info_lengths *lens,
785*4882a593Smuzhiyun int (*visitor)(struct rds_connection *, void *),
786*4882a593Smuzhiyun u64 *buffer,
787*4882a593Smuzhiyun size_t item_len);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun __printf(2, 3)
790*4882a593Smuzhiyun void __rds_conn_path_error(struct rds_conn_path *cp, const char *, ...);
791*4882a593Smuzhiyun #define rds_conn_path_error(cp, fmt...) \
792*4882a593Smuzhiyun __rds_conn_path_error(cp, KERN_WARNING "RDS: " fmt)
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun static inline int
rds_conn_path_transition(struct rds_conn_path * cp,int old,int new)795*4882a593Smuzhiyun rds_conn_path_transition(struct rds_conn_path *cp, int old, int new)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun return atomic_cmpxchg(&cp->cp_state, old, new) == old;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun static inline int
rds_conn_transition(struct rds_connection * conn,int old,int new)801*4882a593Smuzhiyun rds_conn_transition(struct rds_connection *conn, int old, int new)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun WARN_ON(conn->c_trans->t_mp_capable);
804*4882a593Smuzhiyun return rds_conn_path_transition(&conn->c_path[0], old, new);
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun static inline int
rds_conn_path_state(struct rds_conn_path * cp)808*4882a593Smuzhiyun rds_conn_path_state(struct rds_conn_path *cp)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun return atomic_read(&cp->cp_state);
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun static inline int
rds_conn_state(struct rds_connection * conn)814*4882a593Smuzhiyun rds_conn_state(struct rds_connection *conn)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun WARN_ON(conn->c_trans->t_mp_capable);
817*4882a593Smuzhiyun return rds_conn_path_state(&conn->c_path[0]);
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun static inline int
rds_conn_path_up(struct rds_conn_path * cp)821*4882a593Smuzhiyun rds_conn_path_up(struct rds_conn_path *cp)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun return atomic_read(&cp->cp_state) == RDS_CONN_UP;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun static inline int
rds_conn_path_down(struct rds_conn_path * cp)827*4882a593Smuzhiyun rds_conn_path_down(struct rds_conn_path *cp)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun return atomic_read(&cp->cp_state) == RDS_CONN_DOWN;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun static inline int
rds_conn_up(struct rds_connection * conn)833*4882a593Smuzhiyun rds_conn_up(struct rds_connection *conn)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun WARN_ON(conn->c_trans->t_mp_capable);
836*4882a593Smuzhiyun return rds_conn_path_up(&conn->c_path[0]);
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun static inline int
rds_conn_path_connecting(struct rds_conn_path * cp)840*4882a593Smuzhiyun rds_conn_path_connecting(struct rds_conn_path *cp)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun return atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun static inline int
rds_conn_connecting(struct rds_connection * conn)846*4882a593Smuzhiyun rds_conn_connecting(struct rds_connection *conn)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun WARN_ON(conn->c_trans->t_mp_capable);
849*4882a593Smuzhiyun return rds_conn_path_connecting(&conn->c_path[0]);
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /* message.c */
853*4882a593Smuzhiyun struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
854*4882a593Smuzhiyun struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
855*4882a593Smuzhiyun int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
856*4882a593Smuzhiyun bool zcopy);
857*4882a593Smuzhiyun struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
858*4882a593Smuzhiyun void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
859*4882a593Smuzhiyun __be16 dport, u64 seq);
860*4882a593Smuzhiyun int rds_message_add_extension(struct rds_header *hdr,
861*4882a593Smuzhiyun unsigned int type, const void *data, unsigned int len);
862*4882a593Smuzhiyun int rds_message_next_extension(struct rds_header *hdr,
863*4882a593Smuzhiyun unsigned int *pos, void *buf, unsigned int *buflen);
864*4882a593Smuzhiyun int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
865*4882a593Smuzhiyun int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
866*4882a593Smuzhiyun void rds_message_inc_free(struct rds_incoming *inc);
867*4882a593Smuzhiyun void rds_message_addref(struct rds_message *rm);
868*4882a593Smuzhiyun void rds_message_put(struct rds_message *rm);
869*4882a593Smuzhiyun void rds_message_wait(struct rds_message *rm);
870*4882a593Smuzhiyun void rds_message_unmapped(struct rds_message *rm);
871*4882a593Smuzhiyun void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *info);
872*4882a593Smuzhiyun
rds_message_make_checksum(struct rds_header * hdr)873*4882a593Smuzhiyun static inline void rds_message_make_checksum(struct rds_header *hdr)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun hdr->h_csum = 0;
876*4882a593Smuzhiyun hdr->h_csum = ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2);
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
rds_message_verify_checksum(const struct rds_header * hdr)879*4882a593Smuzhiyun static inline int rds_message_verify_checksum(const struct rds_header *hdr)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun return !hdr->h_csum || ip_fast_csum((void *) hdr, sizeof(*hdr) >> 2) == 0;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /* page.c */
886*4882a593Smuzhiyun int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
887*4882a593Smuzhiyun gfp_t gfp);
888*4882a593Smuzhiyun void rds_page_exit(void);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /* recv.c */
891*4882a593Smuzhiyun void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
892*4882a593Smuzhiyun struct in6_addr *saddr);
893*4882a593Smuzhiyun void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *conn,
894*4882a593Smuzhiyun struct in6_addr *saddr);
895*4882a593Smuzhiyun void rds_inc_put(struct rds_incoming *inc);
896*4882a593Smuzhiyun void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr,
897*4882a593Smuzhiyun struct in6_addr *daddr,
898*4882a593Smuzhiyun struct rds_incoming *inc, gfp_t gfp);
899*4882a593Smuzhiyun int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
900*4882a593Smuzhiyun int msg_flags);
901*4882a593Smuzhiyun void rds_clear_recv_queue(struct rds_sock *rs);
902*4882a593Smuzhiyun int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msg);
903*4882a593Smuzhiyun void rds_inc_info_copy(struct rds_incoming *inc,
904*4882a593Smuzhiyun struct rds_info_iterator *iter,
905*4882a593Smuzhiyun __be32 saddr, __be32 daddr, int flip);
906*4882a593Smuzhiyun void rds6_inc_info_copy(struct rds_incoming *inc,
907*4882a593Smuzhiyun struct rds_info_iterator *iter,
908*4882a593Smuzhiyun struct in6_addr *saddr, struct in6_addr *daddr,
909*4882a593Smuzhiyun int flip);
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun /* send.c */
912*4882a593Smuzhiyun int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len);
913*4882a593Smuzhiyun void rds_send_path_reset(struct rds_conn_path *conn);
914*4882a593Smuzhiyun int rds_send_xmit(struct rds_conn_path *cp);
915*4882a593Smuzhiyun struct sockaddr_in;
916*4882a593Smuzhiyun void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest);
917*4882a593Smuzhiyun typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
918*4882a593Smuzhiyun void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
919*4882a593Smuzhiyun is_acked_func is_acked);
920*4882a593Smuzhiyun void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack,
921*4882a593Smuzhiyun is_acked_func is_acked);
922*4882a593Smuzhiyun void rds_send_ping(struct rds_connection *conn, int cp_index);
923*4882a593Smuzhiyun int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun /* rdma.c */
926*4882a593Smuzhiyun void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
927*4882a593Smuzhiyun int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen);
928*4882a593Smuzhiyun int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen);
929*4882a593Smuzhiyun int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen);
930*4882a593Smuzhiyun void rds_rdma_drop_keys(struct rds_sock *rs);
931*4882a593Smuzhiyun int rds_rdma_extra_size(struct rds_rdma_args *args,
932*4882a593Smuzhiyun struct rds_iov_vector *iov);
933*4882a593Smuzhiyun int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
934*4882a593Smuzhiyun struct cmsghdr *cmsg);
935*4882a593Smuzhiyun int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
936*4882a593Smuzhiyun struct cmsghdr *cmsg,
937*4882a593Smuzhiyun struct rds_iov_vector *vec);
938*4882a593Smuzhiyun int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
939*4882a593Smuzhiyun struct cmsghdr *cmsg);
940*4882a593Smuzhiyun void rds_rdma_free_op(struct rm_rdma_op *ro);
941*4882a593Smuzhiyun void rds_atomic_free_op(struct rm_atomic_op *ao);
942*4882a593Smuzhiyun void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
943*4882a593Smuzhiyun void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
944*4882a593Smuzhiyun int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
945*4882a593Smuzhiyun struct cmsghdr *cmsg);
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun void __rds_put_mr_final(struct kref *kref);
948*4882a593Smuzhiyun
rds_destroy_pending(struct rds_connection * conn)949*4882a593Smuzhiyun static inline bool rds_destroy_pending(struct rds_connection *conn)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun return !check_net(rds_conn_net(conn)) ||
952*4882a593Smuzhiyun (conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun enum {
956*4882a593Smuzhiyun ODP_NOT_NEEDED,
957*4882a593Smuzhiyun ODP_ZEROBASED,
958*4882a593Smuzhiyun ODP_VIRTUAL
959*4882a593Smuzhiyun };
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun /* stats.c */
962*4882a593Smuzhiyun DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
963*4882a593Smuzhiyun #define rds_stats_inc_which(which, member) do { \
964*4882a593Smuzhiyun per_cpu(which, get_cpu()).member++; \
965*4882a593Smuzhiyun put_cpu(); \
966*4882a593Smuzhiyun } while (0)
967*4882a593Smuzhiyun #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
968*4882a593Smuzhiyun #define rds_stats_add_which(which, member, count) do { \
969*4882a593Smuzhiyun per_cpu(which, get_cpu()).member += count; \
970*4882a593Smuzhiyun put_cpu(); \
971*4882a593Smuzhiyun } while (0)
972*4882a593Smuzhiyun #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
973*4882a593Smuzhiyun int rds_stats_init(void);
974*4882a593Smuzhiyun void rds_stats_exit(void);
975*4882a593Smuzhiyun void rds_stats_info_copy(struct rds_info_iterator *iter,
976*4882a593Smuzhiyun uint64_t *values, const char *const *names,
977*4882a593Smuzhiyun size_t nr);
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun /* sysctl.c */
980*4882a593Smuzhiyun int rds_sysctl_init(void);
981*4882a593Smuzhiyun void rds_sysctl_exit(void);
982*4882a593Smuzhiyun extern unsigned long rds_sysctl_sndbuf_min;
983*4882a593Smuzhiyun extern unsigned long rds_sysctl_sndbuf_default;
984*4882a593Smuzhiyun extern unsigned long rds_sysctl_sndbuf_max;
985*4882a593Smuzhiyun extern unsigned long rds_sysctl_reconnect_min_jiffies;
986*4882a593Smuzhiyun extern unsigned long rds_sysctl_reconnect_max_jiffies;
987*4882a593Smuzhiyun extern unsigned int rds_sysctl_max_unacked_packets;
988*4882a593Smuzhiyun extern unsigned int rds_sysctl_max_unacked_bytes;
989*4882a593Smuzhiyun extern unsigned int rds_sysctl_ping_enable;
990*4882a593Smuzhiyun extern unsigned long rds_sysctl_trace_flags;
991*4882a593Smuzhiyun extern unsigned int rds_sysctl_trace_level;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun /* threads.c */
994*4882a593Smuzhiyun int rds_threads_init(void);
995*4882a593Smuzhiyun void rds_threads_exit(void);
996*4882a593Smuzhiyun extern struct workqueue_struct *rds_wq;
997*4882a593Smuzhiyun void rds_queue_reconnect(struct rds_conn_path *cp);
998*4882a593Smuzhiyun void rds_connect_worker(struct work_struct *);
999*4882a593Smuzhiyun void rds_shutdown_worker(struct work_struct *);
1000*4882a593Smuzhiyun void rds_send_worker(struct work_struct *);
1001*4882a593Smuzhiyun void rds_recv_worker(struct work_struct *);
1002*4882a593Smuzhiyun void rds_connect_path_complete(struct rds_conn_path *conn, int curr);
1003*4882a593Smuzhiyun void rds_connect_complete(struct rds_connection *conn);
1004*4882a593Smuzhiyun int rds_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun /* transport.c */
1007*4882a593Smuzhiyun void rds_trans_register(struct rds_transport *trans);
1008*4882a593Smuzhiyun void rds_trans_unregister(struct rds_transport *trans);
1009*4882a593Smuzhiyun struct rds_transport *rds_trans_get_preferred(struct net *net,
1010*4882a593Smuzhiyun const struct in6_addr *addr,
1011*4882a593Smuzhiyun __u32 scope_id);
1012*4882a593Smuzhiyun void rds_trans_put(struct rds_transport *trans);
1013*4882a593Smuzhiyun unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
1014*4882a593Smuzhiyun unsigned int avail);
1015*4882a593Smuzhiyun struct rds_transport *rds_trans_get(int t_type);
1016*4882a593Smuzhiyun int rds_trans_init(void);
1017*4882a593Smuzhiyun void rds_trans_exit(void);
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun #endif
1020