xref: /OK3568_Linux_fs/kernel/drivers/infiniband/sw/rxe/rxe_verbs.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef RXE_VERBS_H
8*4882a593Smuzhiyun #define RXE_VERBS_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/interrupt.h>
11*4882a593Smuzhiyun #include <linux/workqueue.h>
12*4882a593Smuzhiyun #include <rdma/rdma_user_rxe.h>
13*4882a593Smuzhiyun #include "rxe_pool.h"
14*4882a593Smuzhiyun #include "rxe_task.h"
15*4882a593Smuzhiyun #include "rxe_hw_counters.h"
16*4882a593Smuzhiyun 
pkey_match(u16 key1,u16 key2)17*4882a593Smuzhiyun static inline int pkey_match(u16 key1, u16 key2)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	return (((key1 & 0x7fff) != 0) &&
20*4882a593Smuzhiyun 		((key1 & 0x7fff) == (key2 & 0x7fff)) &&
21*4882a593Smuzhiyun 		((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* Return >0 if psn_a > psn_b
25*4882a593Smuzhiyun  *	   0 if psn_a == psn_b
26*4882a593Smuzhiyun  *	  <0 if psn_a < psn_b
27*4882a593Smuzhiyun  */
psn_compare(u32 psn_a,u32 psn_b)28*4882a593Smuzhiyun static inline int psn_compare(u32 psn_a, u32 psn_b)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	s32 diff;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	diff = (psn_a - psn_b) << 8;
33*4882a593Smuzhiyun 	return diff;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun struct rxe_ucontext {
37*4882a593Smuzhiyun 	struct ib_ucontext ibuc;
38*4882a593Smuzhiyun 	struct rxe_pool_entry	pelem;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun struct rxe_pd {
42*4882a593Smuzhiyun 	struct ib_pd            ibpd;
43*4882a593Smuzhiyun 	struct rxe_pool_entry	pelem;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun struct rxe_ah {
47*4882a593Smuzhiyun 	struct ib_ah		ibah;
48*4882a593Smuzhiyun 	struct rxe_pool_entry	pelem;
49*4882a593Smuzhiyun 	struct rxe_pd		*pd;
50*4882a593Smuzhiyun 	struct rxe_av		av;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun struct rxe_cqe {
54*4882a593Smuzhiyun 	union {
55*4882a593Smuzhiyun 		struct ib_wc		ibwc;
56*4882a593Smuzhiyun 		struct ib_uverbs_wc	uibwc;
57*4882a593Smuzhiyun 	};
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun struct rxe_cq {
61*4882a593Smuzhiyun 	struct ib_cq		ibcq;
62*4882a593Smuzhiyun 	struct rxe_pool_entry	pelem;
63*4882a593Smuzhiyun 	struct rxe_queue	*queue;
64*4882a593Smuzhiyun 	spinlock_t		cq_lock;
65*4882a593Smuzhiyun 	u8			notify;
66*4882a593Smuzhiyun 	bool			is_dying;
67*4882a593Smuzhiyun 	int			is_user;
68*4882a593Smuzhiyun 	struct tasklet_struct	comp_task;
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun enum wqe_state {
72*4882a593Smuzhiyun 	wqe_state_posted,
73*4882a593Smuzhiyun 	wqe_state_processing,
74*4882a593Smuzhiyun 	wqe_state_pending,
75*4882a593Smuzhiyun 	wqe_state_done,
76*4882a593Smuzhiyun 	wqe_state_error,
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun struct rxe_sq {
80*4882a593Smuzhiyun 	int			max_wr;
81*4882a593Smuzhiyun 	int			max_sge;
82*4882a593Smuzhiyun 	int			max_inline;
83*4882a593Smuzhiyun 	spinlock_t		sq_lock; /* guard queue */
84*4882a593Smuzhiyun 	struct rxe_queue	*queue;
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun struct rxe_rq {
88*4882a593Smuzhiyun 	int			max_wr;
89*4882a593Smuzhiyun 	int			max_sge;
90*4882a593Smuzhiyun 	spinlock_t		producer_lock; /* guard queue producer */
91*4882a593Smuzhiyun 	spinlock_t		consumer_lock; /* guard queue consumer */
92*4882a593Smuzhiyun 	struct rxe_queue	*queue;
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun struct rxe_srq {
96*4882a593Smuzhiyun 	struct ib_srq		ibsrq;
97*4882a593Smuzhiyun 	struct rxe_pool_entry	pelem;
98*4882a593Smuzhiyun 	struct rxe_pd		*pd;
99*4882a593Smuzhiyun 	struct rxe_rq		rq;
100*4882a593Smuzhiyun 	u32			srq_num;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	int			limit;
103*4882a593Smuzhiyun 	int			error;
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun enum rxe_qp_state {
107*4882a593Smuzhiyun 	QP_STATE_RESET,
108*4882a593Smuzhiyun 	QP_STATE_INIT,
109*4882a593Smuzhiyun 	QP_STATE_READY,
110*4882a593Smuzhiyun 	QP_STATE_DRAIN,		/* req only */
111*4882a593Smuzhiyun 	QP_STATE_DRAINED,	/* req only */
112*4882a593Smuzhiyun 	QP_STATE_ERROR
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun struct rxe_req_info {
116*4882a593Smuzhiyun 	enum rxe_qp_state	state;
117*4882a593Smuzhiyun 	int			wqe_index;
118*4882a593Smuzhiyun 	u32			psn;
119*4882a593Smuzhiyun 	int			opcode;
120*4882a593Smuzhiyun 	atomic_t		rd_atomic;
121*4882a593Smuzhiyun 	int			wait_fence;
122*4882a593Smuzhiyun 	int			need_rd_atomic;
123*4882a593Smuzhiyun 	int			wait_psn;
124*4882a593Smuzhiyun 	int			need_retry;
125*4882a593Smuzhiyun 	int			noack_pkts;
126*4882a593Smuzhiyun 	struct rxe_task		task;
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun struct rxe_comp_info {
130*4882a593Smuzhiyun 	u32			psn;
131*4882a593Smuzhiyun 	int			opcode;
132*4882a593Smuzhiyun 	int			timeout;
133*4882a593Smuzhiyun 	int			timeout_retry;
134*4882a593Smuzhiyun 	int			started_retry;
135*4882a593Smuzhiyun 	u32			retry_cnt;
136*4882a593Smuzhiyun 	u32			rnr_retry;
137*4882a593Smuzhiyun 	struct rxe_task		task;
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun enum rdatm_res_state {
141*4882a593Smuzhiyun 	rdatm_res_state_next,
142*4882a593Smuzhiyun 	rdatm_res_state_new,
143*4882a593Smuzhiyun 	rdatm_res_state_replay,
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun struct resp_res {
147*4882a593Smuzhiyun 	int			type;
148*4882a593Smuzhiyun 	int			replay;
149*4882a593Smuzhiyun 	u32			first_psn;
150*4882a593Smuzhiyun 	u32			last_psn;
151*4882a593Smuzhiyun 	u32			cur_psn;
152*4882a593Smuzhiyun 	enum rdatm_res_state	state;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	union {
155*4882a593Smuzhiyun 		struct {
156*4882a593Smuzhiyun 			struct sk_buff	*skb;
157*4882a593Smuzhiyun 		} atomic;
158*4882a593Smuzhiyun 		struct {
159*4882a593Smuzhiyun 			struct rxe_mem	*mr;
160*4882a593Smuzhiyun 			u64		va_org;
161*4882a593Smuzhiyun 			u32		rkey;
162*4882a593Smuzhiyun 			u32		length;
163*4882a593Smuzhiyun 			u64		va;
164*4882a593Smuzhiyun 			u32		resid;
165*4882a593Smuzhiyun 		} read;
166*4882a593Smuzhiyun 	};
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun struct rxe_resp_info {
170*4882a593Smuzhiyun 	enum rxe_qp_state	state;
171*4882a593Smuzhiyun 	u32			msn;
172*4882a593Smuzhiyun 	u32			psn;
173*4882a593Smuzhiyun 	u32			ack_psn;
174*4882a593Smuzhiyun 	int			opcode;
175*4882a593Smuzhiyun 	int			drop_msg;
176*4882a593Smuzhiyun 	int			goto_error;
177*4882a593Smuzhiyun 	int			sent_psn_nak;
178*4882a593Smuzhiyun 	enum ib_wc_status	status;
179*4882a593Smuzhiyun 	u8			aeth_syndrome;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* Receive only */
182*4882a593Smuzhiyun 	struct rxe_recv_wqe	*wqe;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	/* RDMA read / atomic only */
185*4882a593Smuzhiyun 	u64			va;
186*4882a593Smuzhiyun 	struct rxe_mem		*mr;
187*4882a593Smuzhiyun 	u32			resid;
188*4882a593Smuzhiyun 	u32			rkey;
189*4882a593Smuzhiyun 	u32			length;
190*4882a593Smuzhiyun 	u64			atomic_orig;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* SRQ only */
193*4882a593Smuzhiyun 	struct {
194*4882a593Smuzhiyun 		struct rxe_recv_wqe	wqe;
195*4882a593Smuzhiyun 		struct ib_sge		sge[RXE_MAX_SGE];
196*4882a593Smuzhiyun 	} srq_wqe;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/* Responder resources. It's a circular list where the oldest
199*4882a593Smuzhiyun 	 * resource is dropped first.
200*4882a593Smuzhiyun 	 */
201*4882a593Smuzhiyun 	struct resp_res		*resources;
202*4882a593Smuzhiyun 	unsigned int		res_head;
203*4882a593Smuzhiyun 	unsigned int		res_tail;
204*4882a593Smuzhiyun 	struct resp_res		*res;
205*4882a593Smuzhiyun 	struct rxe_task		task;
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun struct rxe_qp {
209*4882a593Smuzhiyun 	struct rxe_pool_entry	pelem;
210*4882a593Smuzhiyun 	struct ib_qp		ibqp;
211*4882a593Smuzhiyun 	struct ib_qp_attr	attr;
212*4882a593Smuzhiyun 	unsigned int		valid;
213*4882a593Smuzhiyun 	unsigned int		mtu;
214*4882a593Smuzhiyun 	int			is_user;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	struct rxe_pd		*pd;
217*4882a593Smuzhiyun 	struct rxe_srq		*srq;
218*4882a593Smuzhiyun 	struct rxe_cq		*scq;
219*4882a593Smuzhiyun 	struct rxe_cq		*rcq;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	enum ib_sig_type	sq_sig_type;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	struct rxe_sq		sq;
224*4882a593Smuzhiyun 	struct rxe_rq		rq;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	struct socket		*sk;
227*4882a593Smuzhiyun 	u32			dst_cookie;
228*4882a593Smuzhiyun 	u16			src_port;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	struct rxe_av		pri_av;
231*4882a593Smuzhiyun 	struct rxe_av		alt_av;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* list of mcast groups qp has joined (for cleanup) */
234*4882a593Smuzhiyun 	struct list_head	grp_list;
235*4882a593Smuzhiyun 	spinlock_t		grp_lock; /* guard grp_list */
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	struct sk_buff_head	req_pkts;
238*4882a593Smuzhiyun 	struct sk_buff_head	resp_pkts;
239*4882a593Smuzhiyun 	struct sk_buff_head	send_pkts;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	struct rxe_req_info	req;
242*4882a593Smuzhiyun 	struct rxe_comp_info	comp;
243*4882a593Smuzhiyun 	struct rxe_resp_info	resp;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	atomic_t		ssn;
246*4882a593Smuzhiyun 	atomic_t		skb_out;
247*4882a593Smuzhiyun 	int			need_req_skb;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/* Timer for retranmitting packet when ACKs have been lost. RC
250*4882a593Smuzhiyun 	 * only. The requester sets it when it is not already
251*4882a593Smuzhiyun 	 * started. The responder resets it whenever an ack is
252*4882a593Smuzhiyun 	 * received.
253*4882a593Smuzhiyun 	 */
254*4882a593Smuzhiyun 	struct timer_list retrans_timer;
255*4882a593Smuzhiyun 	u64 qp_timeout_jiffies;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* Timer for handling RNR NAKS. */
258*4882a593Smuzhiyun 	struct timer_list rnr_nak_timer;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	spinlock_t		state_lock; /* guard requester and completer */
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	struct execute_work	cleanup_work;
263*4882a593Smuzhiyun };
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun enum rxe_mem_state {
266*4882a593Smuzhiyun 	RXE_MEM_STATE_ZOMBIE,
267*4882a593Smuzhiyun 	RXE_MEM_STATE_INVALID,
268*4882a593Smuzhiyun 	RXE_MEM_STATE_FREE,
269*4882a593Smuzhiyun 	RXE_MEM_STATE_VALID,
270*4882a593Smuzhiyun };
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun enum rxe_mem_type {
273*4882a593Smuzhiyun 	RXE_MEM_TYPE_NONE,
274*4882a593Smuzhiyun 	RXE_MEM_TYPE_DMA,
275*4882a593Smuzhiyun 	RXE_MEM_TYPE_MR,
276*4882a593Smuzhiyun 	RXE_MEM_TYPE_FMR,
277*4882a593Smuzhiyun 	RXE_MEM_TYPE_MW,
278*4882a593Smuzhiyun };
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun #define RXE_BUF_PER_MAP		(PAGE_SIZE / sizeof(struct rxe_phys_buf))
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun struct rxe_phys_buf {
283*4882a593Smuzhiyun 	u64      addr;
284*4882a593Smuzhiyun 	u64      size;
285*4882a593Smuzhiyun };
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun struct rxe_map {
288*4882a593Smuzhiyun 	struct rxe_phys_buf	buf[RXE_BUF_PER_MAP];
289*4882a593Smuzhiyun };
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun struct rxe_mem {
292*4882a593Smuzhiyun 	struct rxe_pool_entry	pelem;
293*4882a593Smuzhiyun 	union {
294*4882a593Smuzhiyun 		struct ib_mr		ibmr;
295*4882a593Smuzhiyun 		struct ib_mw		ibmw;
296*4882a593Smuzhiyun 	};
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	struct ib_umem		*umem;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	enum rxe_mem_state	state;
301*4882a593Smuzhiyun 	enum rxe_mem_type	type;
302*4882a593Smuzhiyun 	u64			va;
303*4882a593Smuzhiyun 	u64			iova;
304*4882a593Smuzhiyun 	size_t			length;
305*4882a593Smuzhiyun 	u32			offset;
306*4882a593Smuzhiyun 	int			access;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	int			page_shift;
309*4882a593Smuzhiyun 	int			page_mask;
310*4882a593Smuzhiyun 	int			map_shift;
311*4882a593Smuzhiyun 	int			map_mask;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	u32			num_buf;
314*4882a593Smuzhiyun 	u32			nbuf;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	u32			max_buf;
317*4882a593Smuzhiyun 	u32			num_map;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	struct rxe_map		**map;
320*4882a593Smuzhiyun };
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun struct rxe_mc_grp {
323*4882a593Smuzhiyun 	struct rxe_pool_entry	pelem;
324*4882a593Smuzhiyun 	spinlock_t		mcg_lock; /* guard group */
325*4882a593Smuzhiyun 	struct rxe_dev		*rxe;
326*4882a593Smuzhiyun 	struct list_head	qp_list;
327*4882a593Smuzhiyun 	union ib_gid		mgid;
328*4882a593Smuzhiyun 	int			num_qp;
329*4882a593Smuzhiyun 	u32			qkey;
330*4882a593Smuzhiyun 	u16			pkey;
331*4882a593Smuzhiyun };
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun struct rxe_mc_elem {
334*4882a593Smuzhiyun 	struct rxe_pool_entry	pelem;
335*4882a593Smuzhiyun 	struct list_head	qp_list;
336*4882a593Smuzhiyun 	struct list_head	grp_list;
337*4882a593Smuzhiyun 	struct rxe_qp		*qp;
338*4882a593Smuzhiyun 	struct rxe_mc_grp	*grp;
339*4882a593Smuzhiyun };
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun struct rxe_port {
342*4882a593Smuzhiyun 	struct ib_port_attr	attr;
343*4882a593Smuzhiyun 	__be64			port_guid;
344*4882a593Smuzhiyun 	__be64			subnet_prefix;
345*4882a593Smuzhiyun 	spinlock_t		port_lock; /* guard port */
346*4882a593Smuzhiyun 	unsigned int		mtu_cap;
347*4882a593Smuzhiyun 	/* special QPs */
348*4882a593Smuzhiyun 	u32			qp_smi_index;
349*4882a593Smuzhiyun 	u32			qp_gsi_index;
350*4882a593Smuzhiyun };
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun struct rxe_dev {
353*4882a593Smuzhiyun 	struct ib_device	ib_dev;
354*4882a593Smuzhiyun 	struct ib_device_attr	attr;
355*4882a593Smuzhiyun 	int			max_ucontext;
356*4882a593Smuzhiyun 	int			max_inline_data;
357*4882a593Smuzhiyun 	struct mutex	usdev_lock;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	struct net_device	*ndev;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	int			xmit_errors;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	struct rxe_pool		uc_pool;
364*4882a593Smuzhiyun 	struct rxe_pool		pd_pool;
365*4882a593Smuzhiyun 	struct rxe_pool		ah_pool;
366*4882a593Smuzhiyun 	struct rxe_pool		srq_pool;
367*4882a593Smuzhiyun 	struct rxe_pool		qp_pool;
368*4882a593Smuzhiyun 	struct rxe_pool		cq_pool;
369*4882a593Smuzhiyun 	struct rxe_pool		mr_pool;
370*4882a593Smuzhiyun 	struct rxe_pool		mw_pool;
371*4882a593Smuzhiyun 	struct rxe_pool		mc_grp_pool;
372*4882a593Smuzhiyun 	struct rxe_pool		mc_elem_pool;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	spinlock_t		pending_lock; /* guard pending_mmaps */
375*4882a593Smuzhiyun 	struct list_head	pending_mmaps;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	spinlock_t		mmap_offset_lock; /* guard mmap_offset */
378*4882a593Smuzhiyun 	u64			mmap_offset;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	atomic64_t		stats_counters[RXE_NUM_OF_COUNTERS];
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	struct rxe_port		port;
383*4882a593Smuzhiyun 	struct crypto_shash	*tfm;
384*4882a593Smuzhiyun };
385*4882a593Smuzhiyun 
rxe_counter_inc(struct rxe_dev * rxe,enum rxe_counters index)386*4882a593Smuzhiyun static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	atomic64_inc(&rxe->stats_counters[index]);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
to_rdev(struct ib_device * dev)391*4882a593Smuzhiyun static inline struct rxe_dev *to_rdev(struct ib_device *dev)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
to_ruc(struct ib_ucontext * uc)396*4882a593Smuzhiyun static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
to_rpd(struct ib_pd * pd)401*4882a593Smuzhiyun static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
to_rah(struct ib_ah * ah)406*4882a593Smuzhiyun static inline struct rxe_ah *to_rah(struct ib_ah *ah)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun 
to_rsrq(struct ib_srq * srq)411*4882a593Smuzhiyun static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
to_rqp(struct ib_qp * qp)416*4882a593Smuzhiyun static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
to_rcq(struct ib_cq * cq)421*4882a593Smuzhiyun static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun 
to_rmr(struct ib_mr * mr)426*4882a593Smuzhiyun static inline struct rxe_mem *to_rmr(struct ib_mr *mr)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun 	return mr ? container_of(mr, struct rxe_mem, ibmr) : NULL;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
to_rmw(struct ib_mw * mw)431*4882a593Smuzhiyun static inline struct rxe_mem *to_rmw(struct ib_mw *mw)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	return mw ? container_of(mw, struct rxe_mem, ibmw) : NULL;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
mr_pd(struct rxe_mem * mr)436*4882a593Smuzhiyun static inline struct rxe_pd *mr_pd(struct rxe_mem *mr)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	return to_rpd(mr->ibmr.pd);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun 
mr_lkey(struct rxe_mem * mr)441*4882a593Smuzhiyun static inline u32 mr_lkey(struct rxe_mem *mr)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	return mr->ibmr.lkey;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
mr_rkey(struct rxe_mem * mr)446*4882a593Smuzhiyun static inline u32 mr_rkey(struct rxe_mem *mr)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	return mr->ibmr.rkey;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun void rxe_mc_cleanup(struct rxe_pool_entry *arg);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun #endif /* RXE_VERBS_H */
456