xref: /OK3568_Linux_fs/kernel/net/rds/ib.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _RDS_IB_H
3*4882a593Smuzhiyun #define _RDS_IB_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
6*4882a593Smuzhiyun #include <rdma/rdma_cm.h>
7*4882a593Smuzhiyun #include <linux/interrupt.h>
8*4882a593Smuzhiyun #include <linux/pci.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include "rds.h"
11*4882a593Smuzhiyun #include "rdma_transport.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define RDS_IB_MAX_SGE			8
14*4882a593Smuzhiyun #define RDS_IB_RECV_SGE 		2
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define RDS_IB_DEFAULT_RECV_WR		1024
17*4882a593Smuzhiyun #define RDS_IB_DEFAULT_SEND_WR		256
18*4882a593Smuzhiyun #define RDS_IB_DEFAULT_FR_WR		512
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define RDS_IB_DEFAULT_RETRY_COUNT	1
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define RDS_IB_SUPPORTED_PROTOCOLS	0x00000003	/* minor versions supported */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define RDS_IB_RECYCLE_BATCH_COUNT	32
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define RDS_IB_WC_MAX			32
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun extern struct rw_semaphore rds_ib_devices_lock;
29*4882a593Smuzhiyun extern struct list_head rds_ib_devices;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
33*4882a593Smuzhiyun  * try and minimize the amount of memory tied up both the device and
34*4882a593Smuzhiyun  * socket receive queues.
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun struct rds_page_frag {
37*4882a593Smuzhiyun 	struct list_head	f_item;
38*4882a593Smuzhiyun 	struct list_head	f_cache_entry;
39*4882a593Smuzhiyun 	struct scatterlist	f_sg;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun struct rds_ib_incoming {
43*4882a593Smuzhiyun 	struct list_head	ii_frags;
44*4882a593Smuzhiyun 	struct list_head	ii_cache_entry;
45*4882a593Smuzhiyun 	struct rds_incoming	ii_inc;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun struct rds_ib_cache_head {
49*4882a593Smuzhiyun 	struct list_head *first;
50*4882a593Smuzhiyun 	unsigned long count;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun struct rds_ib_refill_cache {
54*4882a593Smuzhiyun 	struct rds_ib_cache_head __percpu *percpu;
55*4882a593Smuzhiyun 	struct list_head	 *xfer;
56*4882a593Smuzhiyun 	struct list_head	 *ready;
57*4882a593Smuzhiyun };
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /* This is the common structure for the IB private data exchange in setting up
60*4882a593Smuzhiyun  * an RDS connection.  The exchange is different for IPv4 and IPv6 connections.
61*4882a593Smuzhiyun  * The reason is that the address size is different and the addresses
62*4882a593Smuzhiyun  * exchanged are in the beginning of the structure.  Hence it is not possible
63*4882a593Smuzhiyun  * for interoperability if same structure is used.
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun struct rds_ib_conn_priv_cmn {
66*4882a593Smuzhiyun 	u8			ricpc_protocol_major;
67*4882a593Smuzhiyun 	u8			ricpc_protocol_minor;
68*4882a593Smuzhiyun 	__be16			ricpc_protocol_minor_mask;	/* bitmask */
69*4882a593Smuzhiyun 	u8			ricpc_dp_toss;
70*4882a593Smuzhiyun 	u8			ripc_reserved1;
71*4882a593Smuzhiyun 	__be16			ripc_reserved2;
72*4882a593Smuzhiyun 	__be64			ricpc_ack_seq;
73*4882a593Smuzhiyun 	__be32			ricpc_credit;	/* non-zero enables flow ctl */
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun struct rds_ib_connect_private {
77*4882a593Smuzhiyun 	/* Add new fields at the end, and don't permute existing fields. */
78*4882a593Smuzhiyun 	__be32				dp_saddr;
79*4882a593Smuzhiyun 	__be32				dp_daddr;
80*4882a593Smuzhiyun 	struct rds_ib_conn_priv_cmn	dp_cmn;
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun struct rds6_ib_connect_private {
84*4882a593Smuzhiyun 	/* Add new fields at the end, and don't permute existing fields. */
85*4882a593Smuzhiyun 	struct in6_addr			dp_saddr;
86*4882a593Smuzhiyun 	struct in6_addr			dp_daddr;
87*4882a593Smuzhiyun 	struct rds_ib_conn_priv_cmn	dp_cmn;
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define dp_protocol_major	dp_cmn.ricpc_protocol_major
91*4882a593Smuzhiyun #define dp_protocol_minor	dp_cmn.ricpc_protocol_minor
92*4882a593Smuzhiyun #define dp_protocol_minor_mask	dp_cmn.ricpc_protocol_minor_mask
93*4882a593Smuzhiyun #define dp_ack_seq		dp_cmn.ricpc_ack_seq
94*4882a593Smuzhiyun #define dp_credit		dp_cmn.ricpc_credit
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun union rds_ib_conn_priv {
97*4882a593Smuzhiyun 	struct rds_ib_connect_private	ricp_v4;
98*4882a593Smuzhiyun 	struct rds6_ib_connect_private	ricp_v6;
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun struct rds_ib_send_work {
102*4882a593Smuzhiyun 	void			*s_op;
103*4882a593Smuzhiyun 	union {
104*4882a593Smuzhiyun 		struct ib_send_wr	s_wr;
105*4882a593Smuzhiyun 		struct ib_rdma_wr	s_rdma_wr;
106*4882a593Smuzhiyun 		struct ib_atomic_wr	s_atomic_wr;
107*4882a593Smuzhiyun 	};
108*4882a593Smuzhiyun 	struct ib_sge		s_sge[RDS_IB_MAX_SGE];
109*4882a593Smuzhiyun 	unsigned long		s_queued;
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun struct rds_ib_recv_work {
113*4882a593Smuzhiyun 	struct rds_ib_incoming 	*r_ibinc;
114*4882a593Smuzhiyun 	struct rds_page_frag	*r_frag;
115*4882a593Smuzhiyun 	struct ib_recv_wr	r_wr;
116*4882a593Smuzhiyun 	struct ib_sge		r_sge[2];
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun struct rds_ib_work_ring {
120*4882a593Smuzhiyun 	u32		w_nr;
121*4882a593Smuzhiyun 	u32		w_alloc_ptr;
122*4882a593Smuzhiyun 	u32		w_alloc_ctr;
123*4882a593Smuzhiyun 	u32		w_free_ptr;
124*4882a593Smuzhiyun 	atomic_t	w_free_ctr;
125*4882a593Smuzhiyun };
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /* Rings are posted with all the allocations they'll need to queue the
128*4882a593Smuzhiyun  * incoming message to the receiving socket so this can't fail.
129*4882a593Smuzhiyun  * All fragments start with a header, so we can make sure we're not receiving
130*4882a593Smuzhiyun  * garbage, and we can tell a small 8 byte fragment from an ACK frame.
131*4882a593Smuzhiyun  */
132*4882a593Smuzhiyun struct rds_ib_ack_state {
133*4882a593Smuzhiyun 	u64		ack_next;
134*4882a593Smuzhiyun 	u64		ack_recv;
135*4882a593Smuzhiyun 	unsigned int	ack_required:1;
136*4882a593Smuzhiyun 	unsigned int	ack_next_valid:1;
137*4882a593Smuzhiyun 	unsigned int	ack_recv_valid:1;
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun struct rds_ib_device;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun struct rds_ib_connection {
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	struct list_head	ib_node;
146*4882a593Smuzhiyun 	struct rds_ib_device	*rds_ibdev;
147*4882a593Smuzhiyun 	struct rds_connection	*conn;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* alphabet soup, IBTA style */
150*4882a593Smuzhiyun 	struct rdma_cm_id	*i_cm_id;
151*4882a593Smuzhiyun 	struct ib_pd		*i_pd;
152*4882a593Smuzhiyun 	struct ib_cq		*i_send_cq;
153*4882a593Smuzhiyun 	struct ib_cq		*i_recv_cq;
154*4882a593Smuzhiyun 	struct ib_wc		i_send_wc[RDS_IB_WC_MAX];
155*4882a593Smuzhiyun 	struct ib_wc		i_recv_wc[RDS_IB_WC_MAX];
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	/* To control the number of wrs from fastreg */
158*4882a593Smuzhiyun 	atomic_t		i_fastreg_wrs;
159*4882a593Smuzhiyun 	atomic_t		i_fastreg_inuse_count;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/* interrupt handling */
162*4882a593Smuzhiyun 	struct tasklet_struct	i_send_tasklet;
163*4882a593Smuzhiyun 	struct tasklet_struct	i_recv_tasklet;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* tx */
166*4882a593Smuzhiyun 	struct rds_ib_work_ring	i_send_ring;
167*4882a593Smuzhiyun 	struct rm_data_op	*i_data_op;
168*4882a593Smuzhiyun 	struct rds_header	**i_send_hdrs;
169*4882a593Smuzhiyun 	dma_addr_t		*i_send_hdrs_dma;
170*4882a593Smuzhiyun 	struct rds_ib_send_work *i_sends;
171*4882a593Smuzhiyun 	atomic_t		i_signaled_sends;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* rx */
174*4882a593Smuzhiyun 	struct mutex		i_recv_mutex;
175*4882a593Smuzhiyun 	struct rds_ib_work_ring	i_recv_ring;
176*4882a593Smuzhiyun 	struct rds_ib_incoming	*i_ibinc;
177*4882a593Smuzhiyun 	u32			i_recv_data_rem;
178*4882a593Smuzhiyun 	struct rds_header	**i_recv_hdrs;
179*4882a593Smuzhiyun 	dma_addr_t		*i_recv_hdrs_dma;
180*4882a593Smuzhiyun 	struct rds_ib_recv_work *i_recvs;
181*4882a593Smuzhiyun 	u64			i_ack_recv;	/* last ACK received */
182*4882a593Smuzhiyun 	struct rds_ib_refill_cache i_cache_incs;
183*4882a593Smuzhiyun 	struct rds_ib_refill_cache i_cache_frags;
184*4882a593Smuzhiyun 	atomic_t		i_cache_allocs;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	/* sending acks */
187*4882a593Smuzhiyun 	unsigned long		i_ack_flags;
188*4882a593Smuzhiyun #ifdef KERNEL_HAS_ATOMIC64
189*4882a593Smuzhiyun 	atomic64_t		i_ack_next;	/* next ACK to send */
190*4882a593Smuzhiyun #else
191*4882a593Smuzhiyun 	spinlock_t		i_ack_lock;	/* protect i_ack_next */
192*4882a593Smuzhiyun 	u64			i_ack_next;	/* next ACK to send */
193*4882a593Smuzhiyun #endif
194*4882a593Smuzhiyun 	struct rds_header	*i_ack;
195*4882a593Smuzhiyun 	struct ib_send_wr	i_ack_wr;
196*4882a593Smuzhiyun 	struct ib_sge		i_ack_sge;
197*4882a593Smuzhiyun 	dma_addr_t		i_ack_dma;
198*4882a593Smuzhiyun 	unsigned long		i_ack_queued;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* Flow control related information
201*4882a593Smuzhiyun 	 *
202*4882a593Smuzhiyun 	 * Our algorithm uses a pair variables that we need to access
203*4882a593Smuzhiyun 	 * atomically - one for the send credits, and one posted
204*4882a593Smuzhiyun 	 * recv credits we need to transfer to remote.
205*4882a593Smuzhiyun 	 * Rather than protect them using a slow spinlock, we put both into
206*4882a593Smuzhiyun 	 * a single atomic_t and update it using cmpxchg
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	atomic_t		i_credits;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/* Protocol version specific information */
211*4882a593Smuzhiyun 	unsigned int		i_flowctl:1;	/* enable/disable flow ctl */
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/* Batched completions */
214*4882a593Smuzhiyun 	unsigned int		i_unsignaled_wrs;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	/* Endpoint role in connection */
217*4882a593Smuzhiyun 	bool			i_active_side;
218*4882a593Smuzhiyun 	atomic_t		i_cq_quiesce;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/* Send/Recv vectors */
221*4882a593Smuzhiyun 	int			i_scq_vector;
222*4882a593Smuzhiyun 	int			i_rcq_vector;
223*4882a593Smuzhiyun 	u8			i_sl;
224*4882a593Smuzhiyun };
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /* This assumes that atomic_t is at least 32 bits */
227*4882a593Smuzhiyun #define IB_GET_SEND_CREDITS(v)	((v) & 0xffff)
228*4882a593Smuzhiyun #define IB_GET_POST_CREDITS(v)	((v) >> 16)
229*4882a593Smuzhiyun #define IB_SET_SEND_CREDITS(v)	((v) & 0xffff)
230*4882a593Smuzhiyun #define IB_SET_POST_CREDITS(v)	((v) << 16)
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun struct rds_ib_ipaddr {
233*4882a593Smuzhiyun 	struct list_head	list;
234*4882a593Smuzhiyun 	__be32			ipaddr;
235*4882a593Smuzhiyun 	struct rcu_head		rcu;
236*4882a593Smuzhiyun };
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun enum {
239*4882a593Smuzhiyun 	RDS_IB_MR_8K_POOL,
240*4882a593Smuzhiyun 	RDS_IB_MR_1M_POOL,
241*4882a593Smuzhiyun };
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun struct rds_ib_device {
244*4882a593Smuzhiyun 	struct list_head	list;
245*4882a593Smuzhiyun 	struct list_head	ipaddr_list;
246*4882a593Smuzhiyun 	struct list_head	conn_list;
247*4882a593Smuzhiyun 	struct ib_device	*dev;
248*4882a593Smuzhiyun 	struct ib_pd		*pd;
249*4882a593Smuzhiyun 	u8			odp_capable:1;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	unsigned int		max_mrs;
252*4882a593Smuzhiyun 	struct rds_ib_mr_pool	*mr_1m_pool;
253*4882a593Smuzhiyun 	struct rds_ib_mr_pool   *mr_8k_pool;
254*4882a593Smuzhiyun 	unsigned int		max_8k_mrs;
255*4882a593Smuzhiyun 	unsigned int		max_1m_mrs;
256*4882a593Smuzhiyun 	int			max_sge;
257*4882a593Smuzhiyun 	unsigned int		max_wrs;
258*4882a593Smuzhiyun 	unsigned int		max_initiator_depth;
259*4882a593Smuzhiyun 	unsigned int		max_responder_resources;
260*4882a593Smuzhiyun 	spinlock_t		spinlock;	/* protect the above */
261*4882a593Smuzhiyun 	refcount_t		refcount;
262*4882a593Smuzhiyun 	struct work_struct	free_work;
263*4882a593Smuzhiyun 	int			*vector_load;
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /* bits for i_ack_flags */
269*4882a593Smuzhiyun #define IB_ACK_IN_FLIGHT	0
270*4882a593Smuzhiyun #define IB_ACK_REQUESTED	1
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun /* Magic WR_ID for ACKs */
273*4882a593Smuzhiyun #define RDS_IB_ACK_WR_ID	(~(u64) 0)
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun struct rds_ib_statistics {
276*4882a593Smuzhiyun 	uint64_t	s_ib_connect_raced;
277*4882a593Smuzhiyun 	uint64_t	s_ib_listen_closed_stale;
278*4882a593Smuzhiyun 	uint64_t	s_ib_evt_handler_call;
279*4882a593Smuzhiyun 	uint64_t	s_ib_tasklet_call;
280*4882a593Smuzhiyun 	uint64_t	s_ib_tx_cq_event;
281*4882a593Smuzhiyun 	uint64_t	s_ib_tx_ring_full;
282*4882a593Smuzhiyun 	uint64_t	s_ib_tx_throttle;
283*4882a593Smuzhiyun 	uint64_t	s_ib_tx_sg_mapping_failure;
284*4882a593Smuzhiyun 	uint64_t	s_ib_tx_stalled;
285*4882a593Smuzhiyun 	uint64_t	s_ib_tx_credit_updates;
286*4882a593Smuzhiyun 	uint64_t	s_ib_rx_cq_event;
287*4882a593Smuzhiyun 	uint64_t	s_ib_rx_ring_empty;
288*4882a593Smuzhiyun 	uint64_t	s_ib_rx_refill_from_cq;
289*4882a593Smuzhiyun 	uint64_t	s_ib_rx_refill_from_thread;
290*4882a593Smuzhiyun 	uint64_t	s_ib_rx_alloc_limit;
291*4882a593Smuzhiyun 	uint64_t	s_ib_rx_total_frags;
292*4882a593Smuzhiyun 	uint64_t	s_ib_rx_total_incs;
293*4882a593Smuzhiyun 	uint64_t	s_ib_rx_credit_updates;
294*4882a593Smuzhiyun 	uint64_t	s_ib_ack_sent;
295*4882a593Smuzhiyun 	uint64_t	s_ib_ack_send_failure;
296*4882a593Smuzhiyun 	uint64_t	s_ib_ack_send_delayed;
297*4882a593Smuzhiyun 	uint64_t	s_ib_ack_send_piggybacked;
298*4882a593Smuzhiyun 	uint64_t	s_ib_ack_received;
299*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_8k_alloc;
300*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_8k_free;
301*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_8k_used;
302*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_8k_pool_flush;
303*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_8k_pool_wait;
304*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_8k_pool_depleted;
305*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_1m_alloc;
306*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_1m_free;
307*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_1m_used;
308*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_1m_pool_flush;
309*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_1m_pool_wait;
310*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_1m_pool_depleted;
311*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_8k_reused;
312*4882a593Smuzhiyun 	uint64_t	s_ib_rdma_mr_1m_reused;
313*4882a593Smuzhiyun 	uint64_t	s_ib_atomic_cswp;
314*4882a593Smuzhiyun 	uint64_t	s_ib_atomic_fadd;
315*4882a593Smuzhiyun 	uint64_t	s_ib_recv_added_to_cache;
316*4882a593Smuzhiyun 	uint64_t	s_ib_recv_removed_from_cache;
317*4882a593Smuzhiyun };
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun extern struct workqueue_struct *rds_ib_wq;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun  * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
323*4882a593Smuzhiyun  * doesn't define it.
324*4882a593Smuzhiyun  */
rds_ib_dma_sync_sg_for_cpu(struct ib_device * dev,struct scatterlist * sglist,unsigned int sg_dma_len,int direction)325*4882a593Smuzhiyun static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
326*4882a593Smuzhiyun 					      struct scatterlist *sglist,
327*4882a593Smuzhiyun 					      unsigned int sg_dma_len,
328*4882a593Smuzhiyun 					      int direction)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	struct scatterlist *sg;
331*4882a593Smuzhiyun 	unsigned int i;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	for_each_sg(sglist, sg, sg_dma_len, i) {
334*4882a593Smuzhiyun 		ib_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
335*4882a593Smuzhiyun 					   sg_dma_len(sg), direction);
336*4882a593Smuzhiyun 	}
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun #define ib_dma_sync_sg_for_cpu	rds_ib_dma_sync_sg_for_cpu
339*4882a593Smuzhiyun 
rds_ib_dma_sync_sg_for_device(struct ib_device * dev,struct scatterlist * sglist,unsigned int sg_dma_len,int direction)340*4882a593Smuzhiyun static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
341*4882a593Smuzhiyun 						 struct scatterlist *sglist,
342*4882a593Smuzhiyun 						 unsigned int sg_dma_len,
343*4882a593Smuzhiyun 						 int direction)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	struct scatterlist *sg;
346*4882a593Smuzhiyun 	unsigned int i;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	for_each_sg(sglist, sg, sg_dma_len, i) {
349*4882a593Smuzhiyun 		ib_dma_sync_single_for_device(dev, sg_dma_address(sg),
350*4882a593Smuzhiyun 					      sg_dma_len(sg), direction);
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun #define ib_dma_sync_sg_for_device	rds_ib_dma_sync_sg_for_device
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /* ib.c */
357*4882a593Smuzhiyun extern struct rds_transport rds_ib_transport;
358*4882a593Smuzhiyun struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
359*4882a593Smuzhiyun void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
360*4882a593Smuzhiyun extern struct ib_client rds_ib_client;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun extern unsigned int rds_ib_retry_count;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun extern spinlock_t ib_nodev_conns_lock;
365*4882a593Smuzhiyun extern struct list_head ib_nodev_conns;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun /* ib_cm.c */
368*4882a593Smuzhiyun int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
369*4882a593Smuzhiyun void rds_ib_conn_free(void *arg);
370*4882a593Smuzhiyun int rds_ib_conn_path_connect(struct rds_conn_path *cp);
371*4882a593Smuzhiyun void rds_ib_conn_path_shutdown(struct rds_conn_path *cp);
372*4882a593Smuzhiyun void rds_ib_state_change(struct sock *sk);
373*4882a593Smuzhiyun int rds_ib_listen_init(void);
374*4882a593Smuzhiyun void rds_ib_listen_stop(void);
375*4882a593Smuzhiyun __printf(2, 3)
376*4882a593Smuzhiyun void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
377*4882a593Smuzhiyun int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
378*4882a593Smuzhiyun 			     struct rdma_cm_event *event, bool isv6);
379*4882a593Smuzhiyun int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id, bool isv6);
380*4882a593Smuzhiyun void rds_ib_cm_connect_complete(struct rds_connection *conn,
381*4882a593Smuzhiyun 				struct rdma_cm_event *event);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun #define rds_ib_conn_error(conn, fmt...) \
384*4882a593Smuzhiyun 	__rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun /* ib_rdma.c */
387*4882a593Smuzhiyun int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
388*4882a593Smuzhiyun 			 struct in6_addr *ipaddr);
389*4882a593Smuzhiyun void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
390*4882a593Smuzhiyun void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
391*4882a593Smuzhiyun void rds_ib_destroy_nodev_conns(void);
392*4882a593Smuzhiyun void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /* ib_recv.c */
395*4882a593Smuzhiyun int rds_ib_recv_init(void);
396*4882a593Smuzhiyun void rds_ib_recv_exit(void);
397*4882a593Smuzhiyun int rds_ib_recv_path(struct rds_conn_path *conn);
398*4882a593Smuzhiyun int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp);
399*4882a593Smuzhiyun void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
400*4882a593Smuzhiyun void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
401*4882a593Smuzhiyun void rds_ib_inc_free(struct rds_incoming *inc);
402*4882a593Smuzhiyun int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
403*4882a593Smuzhiyun void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
404*4882a593Smuzhiyun 			     struct rds_ib_ack_state *state);
405*4882a593Smuzhiyun void rds_ib_recv_tasklet_fn(unsigned long data);
406*4882a593Smuzhiyun void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
407*4882a593Smuzhiyun void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
408*4882a593Smuzhiyun void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
409*4882a593Smuzhiyun void rds_ib_attempt_ack(struct rds_ib_connection *ic);
410*4882a593Smuzhiyun void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
411*4882a593Smuzhiyun u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
412*4882a593Smuzhiyun void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun /* ib_ring.c */
415*4882a593Smuzhiyun void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
416*4882a593Smuzhiyun void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
417*4882a593Smuzhiyun u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
418*4882a593Smuzhiyun void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
419*4882a593Smuzhiyun void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
420*4882a593Smuzhiyun int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
421*4882a593Smuzhiyun int rds_ib_ring_low(struct rds_ib_work_ring *ring);
422*4882a593Smuzhiyun u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
423*4882a593Smuzhiyun u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
424*4882a593Smuzhiyun extern wait_queue_head_t rds_ib_ring_empty_wait;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun /* ib_send.c */
427*4882a593Smuzhiyun void rds_ib_xmit_path_complete(struct rds_conn_path *cp);
428*4882a593Smuzhiyun int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
429*4882a593Smuzhiyun 		unsigned int hdr_off, unsigned int sg, unsigned int off);
430*4882a593Smuzhiyun void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
431*4882a593Smuzhiyun void rds_ib_send_init_ring(struct rds_ib_connection *ic);
432*4882a593Smuzhiyun void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
433*4882a593Smuzhiyun int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
434*4882a593Smuzhiyun void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
435*4882a593Smuzhiyun void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
436*4882a593Smuzhiyun int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
437*4882a593Smuzhiyun 			     u32 *adv_credits, int need_posted, int max_posted);
438*4882a593Smuzhiyun int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun /* ib_stats.c */
441*4882a593Smuzhiyun DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
442*4882a593Smuzhiyun #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
443*4882a593Smuzhiyun #define rds_ib_stats_add(member, count) \
444*4882a593Smuzhiyun 		rds_stats_add_which(rds_ib_stats, member, count)
445*4882a593Smuzhiyun unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
446*4882a593Smuzhiyun 				    unsigned int avail);
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun /* ib_sysctl.c */
449*4882a593Smuzhiyun int rds_ib_sysctl_init(void);
450*4882a593Smuzhiyun void rds_ib_sysctl_exit(void);
451*4882a593Smuzhiyun extern unsigned long rds_ib_sysctl_max_send_wr;
452*4882a593Smuzhiyun extern unsigned long rds_ib_sysctl_max_recv_wr;
453*4882a593Smuzhiyun extern unsigned long rds_ib_sysctl_max_unsig_wrs;
454*4882a593Smuzhiyun extern unsigned long rds_ib_sysctl_max_unsig_bytes;
455*4882a593Smuzhiyun extern unsigned long rds_ib_sysctl_max_recv_allocation;
456*4882a593Smuzhiyun extern unsigned int rds_ib_sysctl_flow_control;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun #endif
459