xref: /OK3568_Linux_fs/kernel/drivers/infiniband/sw/siw/siw_verbs.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4*4882a593Smuzhiyun /* Copyright (c) 2008-2019, IBM Corporation */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/errno.h>
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/uaccess.h>
9*4882a593Smuzhiyun #include <linux/vmalloc.h>
10*4882a593Smuzhiyun #include <linux/xarray.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <rdma/iw_cm.h>
13*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
14*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
15*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "siw.h"
18*4882a593Smuzhiyun #include "siw_verbs.h"
19*4882a593Smuzhiyun #include "siw_mem.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = {
22*4882a593Smuzhiyun 	[IB_QPS_RESET] = SIW_QP_STATE_IDLE,
23*4882a593Smuzhiyun 	[IB_QPS_INIT] = SIW_QP_STATE_IDLE,
24*4882a593Smuzhiyun 	[IB_QPS_RTR] = SIW_QP_STATE_RTR,
25*4882a593Smuzhiyun 	[IB_QPS_RTS] = SIW_QP_STATE_RTS,
26*4882a593Smuzhiyun 	[IB_QPS_SQD] = SIW_QP_STATE_CLOSING,
27*4882a593Smuzhiyun 	[IB_QPS_SQE] = SIW_QP_STATE_TERMINATE,
28*4882a593Smuzhiyun 	[IB_QPS_ERR] = SIW_QP_STATE_ERROR
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
32*4882a593Smuzhiyun 	[IB_QPS_RESET] = "RESET", [IB_QPS_INIT] = "INIT", [IB_QPS_RTR] = "RTR",
33*4882a593Smuzhiyun 	[IB_QPS_RTS] = "RTS",     [IB_QPS_SQD] = "SQD",   [IB_QPS_SQE] = "SQE",
34*4882a593Smuzhiyun 	[IB_QPS_ERR] = "ERR"
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun 
siw_mmap_free(struct rdma_user_mmap_entry * rdma_entry)37*4882a593Smuzhiyun void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	kfree(entry);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
siw_mmap(struct ib_ucontext * ctx,struct vm_area_struct * vma)44*4882a593Smuzhiyun int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	struct siw_ucontext *uctx = to_siw_ctx(ctx);
47*4882a593Smuzhiyun 	size_t size = vma->vm_end - vma->vm_start;
48*4882a593Smuzhiyun 	struct rdma_user_mmap_entry *rdma_entry;
49*4882a593Smuzhiyun 	struct siw_user_mmap_entry *entry;
50*4882a593Smuzhiyun 	int rv = -EINVAL;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	/*
53*4882a593Smuzhiyun 	 * Must be page aligned
54*4882a593Smuzhiyun 	 */
55*4882a593Smuzhiyun 	if (vma->vm_start & (PAGE_SIZE - 1)) {
56*4882a593Smuzhiyun 		pr_warn("siw: mmap not page aligned\n");
57*4882a593Smuzhiyun 		return -EINVAL;
58*4882a593Smuzhiyun 	}
59*4882a593Smuzhiyun 	rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
60*4882a593Smuzhiyun 	if (!rdma_entry) {
61*4882a593Smuzhiyun 		siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
62*4882a593Smuzhiyun 			vma->vm_pgoff, size);
63*4882a593Smuzhiyun 		return -EINVAL;
64*4882a593Smuzhiyun 	}
65*4882a593Smuzhiyun 	entry = to_siw_mmap_entry(rdma_entry);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	rv = remap_vmalloc_range(vma, entry->address, 0);
68*4882a593Smuzhiyun 	if (rv) {
69*4882a593Smuzhiyun 		pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
70*4882a593Smuzhiyun 			size);
71*4882a593Smuzhiyun 		goto out;
72*4882a593Smuzhiyun 	}
73*4882a593Smuzhiyun out:
74*4882a593Smuzhiyun 	rdma_user_mmap_entry_put(rdma_entry);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	return rv;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
siw_alloc_ucontext(struct ib_ucontext * base_ctx,struct ib_udata * udata)79*4882a593Smuzhiyun int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(base_ctx->device);
82*4882a593Smuzhiyun 	struct siw_ucontext *ctx = to_siw_ctx(base_ctx);
83*4882a593Smuzhiyun 	struct siw_uresp_alloc_ctx uresp = {};
84*4882a593Smuzhiyun 	int rv;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
87*4882a593Smuzhiyun 		rv = -ENOMEM;
88*4882a593Smuzhiyun 		goto err_out;
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 	ctx->sdev = sdev;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	uresp.dev_id = sdev->vendor_part_id;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (udata->outlen < sizeof(uresp)) {
95*4882a593Smuzhiyun 		rv = -EINVAL;
96*4882a593Smuzhiyun 		goto err_out;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 	rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
99*4882a593Smuzhiyun 	if (rv)
100*4882a593Smuzhiyun 		goto err_out;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	siw_dbg(base_ctx->device, "success. now %d context(s)\n",
103*4882a593Smuzhiyun 		atomic_read(&sdev->num_ctx));
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	return 0;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun err_out:
108*4882a593Smuzhiyun 	atomic_dec(&sdev->num_ctx);
109*4882a593Smuzhiyun 	siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv,
110*4882a593Smuzhiyun 		atomic_read(&sdev->num_ctx));
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return rv;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
siw_dealloc_ucontext(struct ib_ucontext * base_ctx)115*4882a593Smuzhiyun void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	atomic_dec(&uctx->sdev->num_ctx);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
siw_query_device(struct ib_device * base_dev,struct ib_device_attr * attr,struct ib_udata * udata)122*4882a593Smuzhiyun int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
123*4882a593Smuzhiyun 		     struct ib_udata *udata)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(base_dev);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (udata->inlen || udata->outlen)
128*4882a593Smuzhiyun 		return -EINVAL;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	memset(attr, 0, sizeof(*attr));
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	/* Revisit atomic caps if RFC 7306 gets supported */
133*4882a593Smuzhiyun 	attr->atomic_cap = 0;
134*4882a593Smuzhiyun 	attr->device_cap_flags =
135*4882a593Smuzhiyun 		IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_ALLOW_USER_UNREG;
136*4882a593Smuzhiyun 	attr->max_cq = sdev->attrs.max_cq;
137*4882a593Smuzhiyun 	attr->max_cqe = sdev->attrs.max_cqe;
138*4882a593Smuzhiyun 	attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
139*4882a593Smuzhiyun 	attr->max_mr = sdev->attrs.max_mr;
140*4882a593Smuzhiyun 	attr->max_mw = sdev->attrs.max_mw;
141*4882a593Smuzhiyun 	attr->max_mr_size = ~0ull;
142*4882a593Smuzhiyun 	attr->max_pd = sdev->attrs.max_pd;
143*4882a593Smuzhiyun 	attr->max_qp = sdev->attrs.max_qp;
144*4882a593Smuzhiyun 	attr->max_qp_init_rd_atom = sdev->attrs.max_ird;
145*4882a593Smuzhiyun 	attr->max_qp_rd_atom = sdev->attrs.max_ord;
146*4882a593Smuzhiyun 	attr->max_qp_wr = sdev->attrs.max_qp_wr;
147*4882a593Smuzhiyun 	attr->max_recv_sge = sdev->attrs.max_sge;
148*4882a593Smuzhiyun 	attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird;
149*4882a593Smuzhiyun 	attr->max_send_sge = sdev->attrs.max_sge;
150*4882a593Smuzhiyun 	attr->max_sge_rd = sdev->attrs.max_sge_rd;
151*4882a593Smuzhiyun 	attr->max_srq = sdev->attrs.max_srq;
152*4882a593Smuzhiyun 	attr->max_srq_sge = sdev->attrs.max_srq_sge;
153*4882a593Smuzhiyun 	attr->max_srq_wr = sdev->attrs.max_srq_wr;
154*4882a593Smuzhiyun 	attr->page_size_cap = PAGE_SIZE;
155*4882a593Smuzhiyun 	attr->vendor_id = SIW_VENDOR_ID;
156*4882a593Smuzhiyun 	attr->vendor_part_id = sdev->vendor_part_id;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	return 0;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
siw_query_port(struct ib_device * base_dev,u8 port,struct ib_port_attr * attr)163*4882a593Smuzhiyun int siw_query_port(struct ib_device *base_dev, u8 port,
164*4882a593Smuzhiyun 		   struct ib_port_attr *attr)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(base_dev);
167*4882a593Smuzhiyun 	int rv;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	memset(attr, 0, sizeof(*attr));
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
172*4882a593Smuzhiyun 			 &attr->active_width);
173*4882a593Smuzhiyun 	attr->gid_tbl_len = 1;
174*4882a593Smuzhiyun 	attr->max_msg_sz = -1;
175*4882a593Smuzhiyun 	attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
176*4882a593Smuzhiyun 	attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
177*4882a593Smuzhiyun 	attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
178*4882a593Smuzhiyun 		IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
179*4882a593Smuzhiyun 	attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
180*4882a593Smuzhiyun 	attr->state = sdev->state;
181*4882a593Smuzhiyun 	/*
182*4882a593Smuzhiyun 	 * All zero
183*4882a593Smuzhiyun 	 *
184*4882a593Smuzhiyun 	 * attr->lid = 0;
185*4882a593Smuzhiyun 	 * attr->bad_pkey_cntr = 0;
186*4882a593Smuzhiyun 	 * attr->qkey_viol_cntr = 0;
187*4882a593Smuzhiyun 	 * attr->sm_lid = 0;
188*4882a593Smuzhiyun 	 * attr->lmc = 0;
189*4882a593Smuzhiyun 	 * attr->max_vl_num = 0;
190*4882a593Smuzhiyun 	 * attr->sm_sl = 0;
191*4882a593Smuzhiyun 	 * attr->subnet_timeout = 0;
192*4882a593Smuzhiyun 	 * attr->init_type_repy = 0;
193*4882a593Smuzhiyun 	 */
194*4882a593Smuzhiyun 	return rv;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
siw_get_port_immutable(struct ib_device * base_dev,u8 port,struct ib_port_immutable * port_immutable)197*4882a593Smuzhiyun int siw_get_port_immutable(struct ib_device *base_dev, u8 port,
198*4882a593Smuzhiyun 			   struct ib_port_immutable *port_immutable)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct ib_port_attr attr;
201*4882a593Smuzhiyun 	int rv = siw_query_port(base_dev, port, &attr);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	if (rv)
204*4882a593Smuzhiyun 		return rv;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	port_immutable->gid_tbl_len = attr.gid_tbl_len;
207*4882a593Smuzhiyun 	port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	return 0;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
siw_query_gid(struct ib_device * base_dev,u8 port,int idx,union ib_gid * gid)212*4882a593Smuzhiyun int siw_query_gid(struct ib_device *base_dev, u8 port, int idx,
213*4882a593Smuzhiyun 		  union ib_gid *gid)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(base_dev);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	/* subnet_prefix == interface_id == 0; */
218*4882a593Smuzhiyun 	memset(gid, 0, sizeof(*gid));
219*4882a593Smuzhiyun 	memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
siw_alloc_pd(struct ib_pd * pd,struct ib_udata * udata)224*4882a593Smuzhiyun int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(pd->device);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) {
229*4882a593Smuzhiyun 		atomic_dec(&sdev->num_pd);
230*4882a593Smuzhiyun 		return -ENOMEM;
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 	siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd));
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	return 0;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
siw_dealloc_pd(struct ib_pd * pd,struct ib_udata * udata)237*4882a593Smuzhiyun int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(pd->device);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	siw_dbg_pd(pd, "free PD\n");
242*4882a593Smuzhiyun 	atomic_dec(&sdev->num_pd);
243*4882a593Smuzhiyun 	return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
siw_qp_get_ref(struct ib_qp * base_qp)246*4882a593Smuzhiyun void siw_qp_get_ref(struct ib_qp *base_qp)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	siw_qp_get(to_siw_qp(base_qp));
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
siw_qp_put_ref(struct ib_qp * base_qp)251*4882a593Smuzhiyun void siw_qp_put_ref(struct ib_qp *base_qp)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	siw_qp_put(to_siw_qp(base_qp));
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun static struct rdma_user_mmap_entry *
siw_mmap_entry_insert(struct siw_ucontext * uctx,void * address,size_t length,u64 * offset)257*4882a593Smuzhiyun siw_mmap_entry_insert(struct siw_ucontext *uctx,
258*4882a593Smuzhiyun 		      void *address, size_t length,
259*4882a593Smuzhiyun 		      u64 *offset)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
262*4882a593Smuzhiyun 	int rv;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	*offset = SIW_INVAL_UOBJ_KEY;
265*4882a593Smuzhiyun 	if (!entry)
266*4882a593Smuzhiyun 		return NULL;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	entry->address = address;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
271*4882a593Smuzhiyun 					 &entry->rdma_entry,
272*4882a593Smuzhiyun 					 length);
273*4882a593Smuzhiyun 	if (rv) {
274*4882a593Smuzhiyun 		kfree(entry);
275*4882a593Smuzhiyun 		return NULL;
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	return &entry->rdma_entry;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun /*
284*4882a593Smuzhiyun  * siw_create_qp()
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  * Create QP of requested size on given device.
287*4882a593Smuzhiyun  *
288*4882a593Smuzhiyun  * @pd:		Protection Domain
289*4882a593Smuzhiyun  * @attrs:	Initial QP attributes.
290*4882a593Smuzhiyun  * @udata:	used to provide QP ID, SQ and RQ size back to user.
291*4882a593Smuzhiyun  */
292*4882a593Smuzhiyun 
siw_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * attrs,struct ib_udata * udata)293*4882a593Smuzhiyun struct ib_qp *siw_create_qp(struct ib_pd *pd,
294*4882a593Smuzhiyun 			    struct ib_qp_init_attr *attrs,
295*4882a593Smuzhiyun 			    struct ib_udata *udata)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct siw_qp *qp = NULL;
298*4882a593Smuzhiyun 	struct ib_device *base_dev = pd->device;
299*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(base_dev);
300*4882a593Smuzhiyun 	struct siw_ucontext *uctx =
301*4882a593Smuzhiyun 		rdma_udata_to_drv_context(udata, struct siw_ucontext,
302*4882a593Smuzhiyun 					  base_ucontext);
303*4882a593Smuzhiyun 	unsigned long flags;
304*4882a593Smuzhiyun 	int num_sqe, num_rqe, rv = 0;
305*4882a593Smuzhiyun 	size_t length;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	siw_dbg(base_dev, "create new QP\n");
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
310*4882a593Smuzhiyun 		siw_dbg(base_dev, "too many QP's\n");
311*4882a593Smuzhiyun 		rv = -ENOMEM;
312*4882a593Smuzhiyun 		goto err_out;
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 	if (attrs->qp_type != IB_QPT_RC) {
315*4882a593Smuzhiyun 		siw_dbg(base_dev, "only RC QP's supported\n");
316*4882a593Smuzhiyun 		rv = -EOPNOTSUPP;
317*4882a593Smuzhiyun 		goto err_out;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 	if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
320*4882a593Smuzhiyun 	    (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
321*4882a593Smuzhiyun 	    (attrs->cap.max_send_sge > SIW_MAX_SGE) ||
322*4882a593Smuzhiyun 	    (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
323*4882a593Smuzhiyun 		siw_dbg(base_dev, "QP size error\n");
324*4882a593Smuzhiyun 		rv = -EINVAL;
325*4882a593Smuzhiyun 		goto err_out;
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 	if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
328*4882a593Smuzhiyun 		siw_dbg(base_dev, "max inline send: %d > %d\n",
329*4882a593Smuzhiyun 			attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
330*4882a593Smuzhiyun 		rv = -EINVAL;
331*4882a593Smuzhiyun 		goto err_out;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 	/*
334*4882a593Smuzhiyun 	 * NOTE: we allow for zero element SQ and RQ WQE's SGL's
335*4882a593Smuzhiyun 	 * but not for a QP unable to hold any WQE (SQ + RQ)
336*4882a593Smuzhiyun 	 */
337*4882a593Smuzhiyun 	if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) {
338*4882a593Smuzhiyun 		siw_dbg(base_dev, "QP must have send or receive queue\n");
339*4882a593Smuzhiyun 		rv = -EINVAL;
340*4882a593Smuzhiyun 		goto err_out;
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
344*4882a593Smuzhiyun 		siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
345*4882a593Smuzhiyun 		rv = -EINVAL;
346*4882a593Smuzhiyun 		goto err_out;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
349*4882a593Smuzhiyun 	if (!qp) {
350*4882a593Smuzhiyun 		rv = -ENOMEM;
351*4882a593Smuzhiyun 		goto err_out;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 	init_rwsem(&qp->state_lock);
354*4882a593Smuzhiyun 	spin_lock_init(&qp->sq_lock);
355*4882a593Smuzhiyun 	spin_lock_init(&qp->rq_lock);
356*4882a593Smuzhiyun 	spin_lock_init(&qp->orq_lock);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	rv = siw_qp_add(sdev, qp);
359*4882a593Smuzhiyun 	if (rv)
360*4882a593Smuzhiyun 		goto err_out;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	num_sqe = attrs->cap.max_send_wr;
363*4882a593Smuzhiyun 	num_rqe = attrs->cap.max_recv_wr;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/* All queue indices are derived from modulo operations
366*4882a593Smuzhiyun 	 * on a free running 'get' (consumer) and 'put' (producer)
367*4882a593Smuzhiyun 	 * unsigned counter. Having queue sizes at power of two
368*4882a593Smuzhiyun 	 * avoids handling counter wrap around.
369*4882a593Smuzhiyun 	 */
370*4882a593Smuzhiyun 	if (num_sqe)
371*4882a593Smuzhiyun 		num_sqe = roundup_pow_of_two(num_sqe);
372*4882a593Smuzhiyun 	else {
373*4882a593Smuzhiyun 		/* Zero sized SQ is not supported */
374*4882a593Smuzhiyun 		rv = -EINVAL;
375*4882a593Smuzhiyun 		goto err_out_xa;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 	if (num_rqe)
378*4882a593Smuzhiyun 		num_rqe = roundup_pow_of_two(num_rqe);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	if (udata)
381*4882a593Smuzhiyun 		qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
382*4882a593Smuzhiyun 	else
383*4882a593Smuzhiyun 		qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	if (qp->sendq == NULL) {
386*4882a593Smuzhiyun 		rv = -ENOMEM;
387*4882a593Smuzhiyun 		goto err_out_xa;
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 	if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
390*4882a593Smuzhiyun 		if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
391*4882a593Smuzhiyun 			qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
392*4882a593Smuzhiyun 		else {
393*4882a593Smuzhiyun 			rv = -EINVAL;
394*4882a593Smuzhiyun 			goto err_out_xa;
395*4882a593Smuzhiyun 		}
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 	qp->pd = pd;
398*4882a593Smuzhiyun 	qp->scq = to_siw_cq(attrs->send_cq);
399*4882a593Smuzhiyun 	qp->rcq = to_siw_cq(attrs->recv_cq);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	if (attrs->srq) {
402*4882a593Smuzhiyun 		/*
403*4882a593Smuzhiyun 		 * SRQ support.
404*4882a593Smuzhiyun 		 * Verbs 6.3.7: ignore RQ size, if SRQ present
405*4882a593Smuzhiyun 		 * Verbs 6.3.5: do not check PD of SRQ against PD of QP
406*4882a593Smuzhiyun 		 */
407*4882a593Smuzhiyun 		qp->srq = to_siw_srq(attrs->srq);
408*4882a593Smuzhiyun 		qp->attrs.rq_size = 0;
409*4882a593Smuzhiyun 		siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
410*4882a593Smuzhiyun 			qp->base_qp.qp_num);
411*4882a593Smuzhiyun 	} else if (num_rqe) {
412*4882a593Smuzhiyun 		if (udata)
413*4882a593Smuzhiyun 			qp->recvq =
414*4882a593Smuzhiyun 				vmalloc_user(num_rqe * sizeof(struct siw_rqe));
415*4882a593Smuzhiyun 		else
416*4882a593Smuzhiyun 			qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 		if (qp->recvq == NULL) {
419*4882a593Smuzhiyun 			rv = -ENOMEM;
420*4882a593Smuzhiyun 			goto err_out_xa;
421*4882a593Smuzhiyun 		}
422*4882a593Smuzhiyun 		qp->attrs.rq_size = num_rqe;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 	qp->attrs.sq_size = num_sqe;
425*4882a593Smuzhiyun 	qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
426*4882a593Smuzhiyun 	qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	/* Make those two tunables fixed for now. */
429*4882a593Smuzhiyun 	qp->tx_ctx.gso_seg_limit = 1;
430*4882a593Smuzhiyun 	qp->tx_ctx.zcopy_tx = zcopy_tx;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	qp->attrs.state = SIW_QP_STATE_IDLE;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if (udata) {
435*4882a593Smuzhiyun 		struct siw_uresp_create_qp uresp = {};
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 		uresp.num_sqe = num_sqe;
438*4882a593Smuzhiyun 		uresp.num_rqe = num_rqe;
439*4882a593Smuzhiyun 		uresp.qp_id = qp_id(qp);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		if (qp->sendq) {
442*4882a593Smuzhiyun 			length = num_sqe * sizeof(struct siw_sqe);
443*4882a593Smuzhiyun 			qp->sq_entry =
444*4882a593Smuzhiyun 				siw_mmap_entry_insert(uctx, qp->sendq,
445*4882a593Smuzhiyun 						      length, &uresp.sq_key);
446*4882a593Smuzhiyun 			if (!qp->sq_entry) {
447*4882a593Smuzhiyun 				rv = -ENOMEM;
448*4882a593Smuzhiyun 				goto err_out_xa;
449*4882a593Smuzhiyun 			}
450*4882a593Smuzhiyun 		}
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 		if (qp->recvq) {
453*4882a593Smuzhiyun 			length = num_rqe * sizeof(struct siw_rqe);
454*4882a593Smuzhiyun 			qp->rq_entry =
455*4882a593Smuzhiyun 				siw_mmap_entry_insert(uctx, qp->recvq,
456*4882a593Smuzhiyun 						      length, &uresp.rq_key);
457*4882a593Smuzhiyun 			if (!qp->rq_entry) {
458*4882a593Smuzhiyun 				uresp.sq_key = SIW_INVAL_UOBJ_KEY;
459*4882a593Smuzhiyun 				rv = -ENOMEM;
460*4882a593Smuzhiyun 				goto err_out_xa;
461*4882a593Smuzhiyun 			}
462*4882a593Smuzhiyun 		}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 		if (udata->outlen < sizeof(uresp)) {
465*4882a593Smuzhiyun 			rv = -EINVAL;
466*4882a593Smuzhiyun 			goto err_out_xa;
467*4882a593Smuzhiyun 		}
468*4882a593Smuzhiyun 		rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
469*4882a593Smuzhiyun 		if (rv)
470*4882a593Smuzhiyun 			goto err_out_xa;
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 	qp->tx_cpu = siw_get_tx_cpu(sdev);
473*4882a593Smuzhiyun 	if (qp->tx_cpu < 0) {
474*4882a593Smuzhiyun 		rv = -EINVAL;
475*4882a593Smuzhiyun 		goto err_out_xa;
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 	INIT_LIST_HEAD(&qp->devq);
478*4882a593Smuzhiyun 	spin_lock_irqsave(&sdev->lock, flags);
479*4882a593Smuzhiyun 	list_add_tail(&qp->devq, &sdev->qp_list);
480*4882a593Smuzhiyun 	spin_unlock_irqrestore(&sdev->lock, flags);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	return &qp->base_qp;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun err_out_xa:
485*4882a593Smuzhiyun 	xa_erase(&sdev->qp_xa, qp_id(qp));
486*4882a593Smuzhiyun err_out:
487*4882a593Smuzhiyun 	if (qp) {
488*4882a593Smuzhiyun 		if (uctx) {
489*4882a593Smuzhiyun 			rdma_user_mmap_entry_remove(qp->sq_entry);
490*4882a593Smuzhiyun 			rdma_user_mmap_entry_remove(qp->rq_entry);
491*4882a593Smuzhiyun 		}
492*4882a593Smuzhiyun 		vfree(qp->sendq);
493*4882a593Smuzhiyun 		vfree(qp->recvq);
494*4882a593Smuzhiyun 		kfree(qp);
495*4882a593Smuzhiyun 	}
496*4882a593Smuzhiyun 	atomic_dec(&sdev->num_qp);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	return ERR_PTR(rv);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun /*
502*4882a593Smuzhiyun  * Minimum siw_query_qp() verb interface.
503*4882a593Smuzhiyun  *
504*4882a593Smuzhiyun  * @qp_attr_mask is not used but all available information is provided
505*4882a593Smuzhiyun  */
siw_query_qp(struct ib_qp * base_qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)506*4882a593Smuzhiyun int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
507*4882a593Smuzhiyun 		 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	struct siw_qp *qp;
510*4882a593Smuzhiyun 	struct siw_device *sdev;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	if (base_qp && qp_attr && qp_init_attr) {
513*4882a593Smuzhiyun 		qp = to_siw_qp(base_qp);
514*4882a593Smuzhiyun 		sdev = to_siw_dev(base_qp->device);
515*4882a593Smuzhiyun 	} else {
516*4882a593Smuzhiyun 		return -EINVAL;
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun 	qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
519*4882a593Smuzhiyun 	qp_attr->cap.max_send_wr = qp->attrs.sq_size;
520*4882a593Smuzhiyun 	qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
521*4882a593Smuzhiyun 	qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
522*4882a593Smuzhiyun 	qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
523*4882a593Smuzhiyun 	qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
524*4882a593Smuzhiyun 	qp_attr->max_rd_atomic = qp->attrs.irq_size;
525*4882a593Smuzhiyun 	qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
528*4882a593Smuzhiyun 				   IB_ACCESS_REMOTE_WRITE |
529*4882a593Smuzhiyun 				   IB_ACCESS_REMOTE_READ;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	qp_init_attr->qp_type = base_qp->qp_type;
532*4882a593Smuzhiyun 	qp_init_attr->send_cq = base_qp->send_cq;
533*4882a593Smuzhiyun 	qp_init_attr->recv_cq = base_qp->recv_cq;
534*4882a593Smuzhiyun 	qp_init_attr->srq = base_qp->srq;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	qp_init_attr->cap = qp_attr->cap;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	return 0;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun 
siw_verbs_modify_qp(struct ib_qp * base_qp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)541*4882a593Smuzhiyun int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
542*4882a593Smuzhiyun 			int attr_mask, struct ib_udata *udata)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun 	struct siw_qp_attrs new_attrs;
545*4882a593Smuzhiyun 	enum siw_qp_attr_mask siw_attr_mask = 0;
546*4882a593Smuzhiyun 	struct siw_qp *qp = to_siw_qp(base_qp);
547*4882a593Smuzhiyun 	int rv = 0;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	if (!attr_mask)
550*4882a593Smuzhiyun 		return 0;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	memset(&new_attrs, 0, sizeof(new_attrs));
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
555*4882a593Smuzhiyun 		siw_attr_mask = SIW_QP_ATTR_ACCESS_FLAGS;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
558*4882a593Smuzhiyun 			new_attrs.flags |= SIW_RDMA_READ_ENABLED;
559*4882a593Smuzhiyun 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
560*4882a593Smuzhiyun 			new_attrs.flags |= SIW_RDMA_WRITE_ENABLED;
561*4882a593Smuzhiyun 		if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
562*4882a593Smuzhiyun 			new_attrs.flags |= SIW_RDMA_BIND_ENABLED;
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun 	if (attr_mask & IB_QP_STATE) {
565*4882a593Smuzhiyun 		siw_dbg_qp(qp, "desired IB QP state: %s\n",
566*4882a593Smuzhiyun 			   ib_qp_state_to_string[attr->qp_state]);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 		new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state];
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 		if (new_attrs.state > SIW_QP_STATE_RTS)
571*4882a593Smuzhiyun 			qp->tx_ctx.tx_suspend = 1;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 		siw_attr_mask |= SIW_QP_ATTR_STATE;
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 	if (!siw_attr_mask)
576*4882a593Smuzhiyun 		goto out;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	down_write(&qp->state_lock);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	up_write(&qp->state_lock);
583*4882a593Smuzhiyun out:
584*4882a593Smuzhiyun 	return rv;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
siw_destroy_qp(struct ib_qp * base_qp,struct ib_udata * udata)587*4882a593Smuzhiyun int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	struct siw_qp *qp = to_siw_qp(base_qp);
590*4882a593Smuzhiyun 	struct siw_ucontext *uctx =
591*4882a593Smuzhiyun 		rdma_udata_to_drv_context(udata, struct siw_ucontext,
592*4882a593Smuzhiyun 					  base_ucontext);
593*4882a593Smuzhiyun 	struct siw_qp_attrs qp_attrs;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	/*
598*4882a593Smuzhiyun 	 * Mark QP as in process of destruction to prevent from
599*4882a593Smuzhiyun 	 * any async callbacks to RDMA core
600*4882a593Smuzhiyun 	 */
601*4882a593Smuzhiyun 	qp->attrs.flags |= SIW_QP_IN_DESTROY;
602*4882a593Smuzhiyun 	qp->rx_stream.rx_suspend = 1;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	if (uctx) {
605*4882a593Smuzhiyun 		rdma_user_mmap_entry_remove(qp->sq_entry);
606*4882a593Smuzhiyun 		rdma_user_mmap_entry_remove(qp->rq_entry);
607*4882a593Smuzhiyun 	}
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	down_write(&qp->state_lock);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	qp_attrs.state = SIW_QP_STATE_ERROR;
612*4882a593Smuzhiyun 	siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	if (qp->cep) {
615*4882a593Smuzhiyun 		siw_cep_put(qp->cep);
616*4882a593Smuzhiyun 		qp->cep = NULL;
617*4882a593Smuzhiyun 	}
618*4882a593Smuzhiyun 	up_write(&qp->state_lock);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	kfree(qp->tx_ctx.mpa_crc_hd);
621*4882a593Smuzhiyun 	kfree(qp->rx_stream.mpa_crc_hd);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	qp->scq = qp->rcq = NULL;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	siw_qp_put(qp);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	return 0;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun /*
631*4882a593Smuzhiyun  * siw_copy_inline_sgl()
632*4882a593Smuzhiyun  *
633*4882a593Smuzhiyun  * Prepare sgl of inlined data for sending. For userland callers
634*4882a593Smuzhiyun  * function checks if given buffer addresses and len's are within
635*4882a593Smuzhiyun  * process context bounds.
636*4882a593Smuzhiyun  * Data from all provided sge's are copied together into the wqe,
637*4882a593Smuzhiyun  * referenced by a single sge.
638*4882a593Smuzhiyun  */
siw_copy_inline_sgl(const struct ib_send_wr * core_wr,struct siw_sqe * sqe)639*4882a593Smuzhiyun static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
640*4882a593Smuzhiyun 			       struct siw_sqe *sqe)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun 	struct ib_sge *core_sge = core_wr->sg_list;
643*4882a593Smuzhiyun 	void *kbuf = &sqe->sge[1];
644*4882a593Smuzhiyun 	int num_sge = core_wr->num_sge, bytes = 0;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	sqe->sge[0].laddr = (uintptr_t)kbuf;
647*4882a593Smuzhiyun 	sqe->sge[0].lkey = 0;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	while (num_sge--) {
650*4882a593Smuzhiyun 		if (!core_sge->length) {
651*4882a593Smuzhiyun 			core_sge++;
652*4882a593Smuzhiyun 			continue;
653*4882a593Smuzhiyun 		}
654*4882a593Smuzhiyun 		bytes += core_sge->length;
655*4882a593Smuzhiyun 		if (bytes > SIW_MAX_INLINE) {
656*4882a593Smuzhiyun 			bytes = -EINVAL;
657*4882a593Smuzhiyun 			break;
658*4882a593Smuzhiyun 		}
659*4882a593Smuzhiyun 		memcpy(kbuf, (void *)(uintptr_t)core_sge->addr,
660*4882a593Smuzhiyun 		       core_sge->length);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 		kbuf += core_sge->length;
663*4882a593Smuzhiyun 		core_sge++;
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 	sqe->sge[0].length = bytes > 0 ? bytes : 0;
666*4882a593Smuzhiyun 	sqe->num_sge = bytes > 0 ? 1 : 0;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	return bytes;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun /* Complete SQ WR's without processing */
siw_sq_flush_wr(struct siw_qp * qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)672*4882a593Smuzhiyun static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
673*4882a593Smuzhiyun 			   const struct ib_send_wr **bad_wr)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	struct siw_sqe sqe = {};
676*4882a593Smuzhiyun 	int rv = 0;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	while (wr) {
679*4882a593Smuzhiyun 		sqe.id = wr->wr_id;
680*4882a593Smuzhiyun 		sqe.opcode = wr->opcode;
681*4882a593Smuzhiyun 		rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
682*4882a593Smuzhiyun 		if (rv) {
683*4882a593Smuzhiyun 			if (bad_wr)
684*4882a593Smuzhiyun 				*bad_wr = wr;
685*4882a593Smuzhiyun 			break;
686*4882a593Smuzhiyun 		}
687*4882a593Smuzhiyun 		wr = wr->next;
688*4882a593Smuzhiyun 	}
689*4882a593Smuzhiyun 	return rv;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun /* Complete RQ WR's without processing */
siw_rq_flush_wr(struct siw_qp * qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)693*4882a593Smuzhiyun static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
694*4882a593Smuzhiyun 			   const struct ib_recv_wr **bad_wr)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	struct siw_rqe rqe = {};
697*4882a593Smuzhiyun 	int rv = 0;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	while (wr) {
700*4882a593Smuzhiyun 		rqe.id = wr->wr_id;
701*4882a593Smuzhiyun 		rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
702*4882a593Smuzhiyun 		if (rv) {
703*4882a593Smuzhiyun 			if (bad_wr)
704*4882a593Smuzhiyun 				*bad_wr = wr;
705*4882a593Smuzhiyun 			break;
706*4882a593Smuzhiyun 		}
707*4882a593Smuzhiyun 		wr = wr->next;
708*4882a593Smuzhiyun 	}
709*4882a593Smuzhiyun 	return rv;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun /*
713*4882a593Smuzhiyun  * siw_post_send()
714*4882a593Smuzhiyun  *
715*4882a593Smuzhiyun  * Post a list of S-WR's to a SQ.
716*4882a593Smuzhiyun  *
717*4882a593Smuzhiyun  * @base_qp:	Base QP contained in siw QP
718*4882a593Smuzhiyun  * @wr:		Null terminated list of user WR's
719*4882a593Smuzhiyun  * @bad_wr:	Points to failing WR in case of synchronous failure.
720*4882a593Smuzhiyun  */
siw_post_send(struct ib_qp * base_qp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)721*4882a593Smuzhiyun int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
722*4882a593Smuzhiyun 		  const struct ib_send_wr **bad_wr)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun 	struct siw_qp *qp = to_siw_qp(base_qp);
725*4882a593Smuzhiyun 	struct siw_wqe *wqe = tx_wqe(qp);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	unsigned long flags;
728*4882a593Smuzhiyun 	int rv = 0;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
731*4882a593Smuzhiyun 		siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
732*4882a593Smuzhiyun 		*bad_wr = wr;
733*4882a593Smuzhiyun 		return -EINVAL;
734*4882a593Smuzhiyun 	}
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	/*
737*4882a593Smuzhiyun 	 * Try to acquire QP state lock. Must be non-blocking
738*4882a593Smuzhiyun 	 * to accommodate kernel clients needs.
739*4882a593Smuzhiyun 	 */
740*4882a593Smuzhiyun 	if (!down_read_trylock(&qp->state_lock)) {
741*4882a593Smuzhiyun 		if (qp->attrs.state == SIW_QP_STATE_ERROR) {
742*4882a593Smuzhiyun 			/*
743*4882a593Smuzhiyun 			 * ERROR state is final, so we can be sure
744*4882a593Smuzhiyun 			 * this state will not change as long as the QP
745*4882a593Smuzhiyun 			 * exists.
746*4882a593Smuzhiyun 			 *
747*4882a593Smuzhiyun 			 * This handles an ib_drain_sq() call with
748*4882a593Smuzhiyun 			 * a concurrent request to set the QP state
749*4882a593Smuzhiyun 			 * to ERROR.
750*4882a593Smuzhiyun 			 */
751*4882a593Smuzhiyun 			rv = siw_sq_flush_wr(qp, wr, bad_wr);
752*4882a593Smuzhiyun 		} else {
753*4882a593Smuzhiyun 			siw_dbg_qp(qp, "QP locked, state %d\n",
754*4882a593Smuzhiyun 				   qp->attrs.state);
755*4882a593Smuzhiyun 			*bad_wr = wr;
756*4882a593Smuzhiyun 			rv = -ENOTCONN;
757*4882a593Smuzhiyun 		}
758*4882a593Smuzhiyun 		return rv;
759*4882a593Smuzhiyun 	}
760*4882a593Smuzhiyun 	if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
761*4882a593Smuzhiyun 		if (qp->attrs.state == SIW_QP_STATE_ERROR) {
762*4882a593Smuzhiyun 			/*
763*4882a593Smuzhiyun 			 * Immediately flush this WR to CQ, if QP
764*4882a593Smuzhiyun 			 * is in ERROR state. SQ is guaranteed to
765*4882a593Smuzhiyun 			 * be empty, so WR complets in-order.
766*4882a593Smuzhiyun 			 *
767*4882a593Smuzhiyun 			 * Typically triggered by ib_drain_sq().
768*4882a593Smuzhiyun 			 */
769*4882a593Smuzhiyun 			rv = siw_sq_flush_wr(qp, wr, bad_wr);
770*4882a593Smuzhiyun 		} else {
771*4882a593Smuzhiyun 			siw_dbg_qp(qp, "QP out of state %d\n",
772*4882a593Smuzhiyun 				   qp->attrs.state);
773*4882a593Smuzhiyun 			*bad_wr = wr;
774*4882a593Smuzhiyun 			rv = -ENOTCONN;
775*4882a593Smuzhiyun 		}
776*4882a593Smuzhiyun 		up_read(&qp->state_lock);
777*4882a593Smuzhiyun 		return rv;
778*4882a593Smuzhiyun 	}
779*4882a593Smuzhiyun 	spin_lock_irqsave(&qp->sq_lock, flags);
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	while (wr) {
782*4882a593Smuzhiyun 		u32 idx = qp->sq_put % qp->attrs.sq_size;
783*4882a593Smuzhiyun 		struct siw_sqe *sqe = &qp->sendq[idx];
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 		if (sqe->flags) {
786*4882a593Smuzhiyun 			siw_dbg_qp(qp, "sq full\n");
787*4882a593Smuzhiyun 			rv = -ENOMEM;
788*4882a593Smuzhiyun 			break;
789*4882a593Smuzhiyun 		}
790*4882a593Smuzhiyun 		if (wr->num_sge > qp->attrs.sq_max_sges) {
791*4882a593Smuzhiyun 			siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
792*4882a593Smuzhiyun 			rv = -EINVAL;
793*4882a593Smuzhiyun 			break;
794*4882a593Smuzhiyun 		}
795*4882a593Smuzhiyun 		sqe->id = wr->wr_id;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 		if ((wr->send_flags & IB_SEND_SIGNALED) ||
798*4882a593Smuzhiyun 		    (qp->attrs.flags & SIW_SIGNAL_ALL_WR))
799*4882a593Smuzhiyun 			sqe->flags |= SIW_WQE_SIGNALLED;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 		if (wr->send_flags & IB_SEND_FENCE)
802*4882a593Smuzhiyun 			sqe->flags |= SIW_WQE_READ_FENCE;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 		switch (wr->opcode) {
805*4882a593Smuzhiyun 		case IB_WR_SEND:
806*4882a593Smuzhiyun 		case IB_WR_SEND_WITH_INV:
807*4882a593Smuzhiyun 			if (wr->send_flags & IB_SEND_SOLICITED)
808*4882a593Smuzhiyun 				sqe->flags |= SIW_WQE_SOLICITED;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 			if (!(wr->send_flags & IB_SEND_INLINE)) {
811*4882a593Smuzhiyun 				siw_copy_sgl(wr->sg_list, sqe->sge,
812*4882a593Smuzhiyun 					     wr->num_sge);
813*4882a593Smuzhiyun 				sqe->num_sge = wr->num_sge;
814*4882a593Smuzhiyun 			} else {
815*4882a593Smuzhiyun 				rv = siw_copy_inline_sgl(wr, sqe);
816*4882a593Smuzhiyun 				if (rv <= 0) {
817*4882a593Smuzhiyun 					rv = -EINVAL;
818*4882a593Smuzhiyun 					break;
819*4882a593Smuzhiyun 				}
820*4882a593Smuzhiyun 				sqe->flags |= SIW_WQE_INLINE;
821*4882a593Smuzhiyun 				sqe->num_sge = 1;
822*4882a593Smuzhiyun 			}
823*4882a593Smuzhiyun 			if (wr->opcode == IB_WR_SEND)
824*4882a593Smuzhiyun 				sqe->opcode = SIW_OP_SEND;
825*4882a593Smuzhiyun 			else {
826*4882a593Smuzhiyun 				sqe->opcode = SIW_OP_SEND_REMOTE_INV;
827*4882a593Smuzhiyun 				sqe->rkey = wr->ex.invalidate_rkey;
828*4882a593Smuzhiyun 			}
829*4882a593Smuzhiyun 			break;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 		case IB_WR_RDMA_READ_WITH_INV:
832*4882a593Smuzhiyun 		case IB_WR_RDMA_READ:
833*4882a593Smuzhiyun 			/*
834*4882a593Smuzhiyun 			 * iWarp restricts RREAD sink to SGL containing
835*4882a593Smuzhiyun 			 * 1 SGE only. we could relax to SGL with multiple
836*4882a593Smuzhiyun 			 * elements referring the SAME ltag or even sending
837*4882a593Smuzhiyun 			 * a private per-rreq tag referring to a checked
838*4882a593Smuzhiyun 			 * local sgl with MULTIPLE ltag's.
839*4882a593Smuzhiyun 			 */
840*4882a593Smuzhiyun 			if (unlikely(wr->num_sge != 1)) {
841*4882a593Smuzhiyun 				rv = -EINVAL;
842*4882a593Smuzhiyun 				break;
843*4882a593Smuzhiyun 			}
844*4882a593Smuzhiyun 			siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1);
845*4882a593Smuzhiyun 			/*
846*4882a593Smuzhiyun 			 * NOTE: zero length RREAD is allowed!
847*4882a593Smuzhiyun 			 */
848*4882a593Smuzhiyun 			sqe->raddr = rdma_wr(wr)->remote_addr;
849*4882a593Smuzhiyun 			sqe->rkey = rdma_wr(wr)->rkey;
850*4882a593Smuzhiyun 			sqe->num_sge = 1;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 			if (wr->opcode == IB_WR_RDMA_READ)
853*4882a593Smuzhiyun 				sqe->opcode = SIW_OP_READ;
854*4882a593Smuzhiyun 			else
855*4882a593Smuzhiyun 				sqe->opcode = SIW_OP_READ_LOCAL_INV;
856*4882a593Smuzhiyun 			break;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 		case IB_WR_RDMA_WRITE:
859*4882a593Smuzhiyun 			if (!(wr->send_flags & IB_SEND_INLINE)) {
860*4882a593Smuzhiyun 				siw_copy_sgl(wr->sg_list, &sqe->sge[0],
861*4882a593Smuzhiyun 					     wr->num_sge);
862*4882a593Smuzhiyun 				sqe->num_sge = wr->num_sge;
863*4882a593Smuzhiyun 			} else {
864*4882a593Smuzhiyun 				rv = siw_copy_inline_sgl(wr, sqe);
865*4882a593Smuzhiyun 				if (unlikely(rv < 0)) {
866*4882a593Smuzhiyun 					rv = -EINVAL;
867*4882a593Smuzhiyun 					break;
868*4882a593Smuzhiyun 				}
869*4882a593Smuzhiyun 				sqe->flags |= SIW_WQE_INLINE;
870*4882a593Smuzhiyun 				sqe->num_sge = 1;
871*4882a593Smuzhiyun 			}
872*4882a593Smuzhiyun 			sqe->raddr = rdma_wr(wr)->remote_addr;
873*4882a593Smuzhiyun 			sqe->rkey = rdma_wr(wr)->rkey;
874*4882a593Smuzhiyun 			sqe->opcode = SIW_OP_WRITE;
875*4882a593Smuzhiyun 			break;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 		case IB_WR_REG_MR:
878*4882a593Smuzhiyun 			sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
879*4882a593Smuzhiyun 			sqe->rkey = reg_wr(wr)->key;
880*4882a593Smuzhiyun 			sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
881*4882a593Smuzhiyun 			sqe->opcode = SIW_OP_REG_MR;
882*4882a593Smuzhiyun 			break;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 		case IB_WR_LOCAL_INV:
885*4882a593Smuzhiyun 			sqe->rkey = wr->ex.invalidate_rkey;
886*4882a593Smuzhiyun 			sqe->opcode = SIW_OP_INVAL_STAG;
887*4882a593Smuzhiyun 			break;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 		default:
890*4882a593Smuzhiyun 			siw_dbg_qp(qp, "ib wr type %d unsupported\n",
891*4882a593Smuzhiyun 				   wr->opcode);
892*4882a593Smuzhiyun 			rv = -EINVAL;
893*4882a593Smuzhiyun 			break;
894*4882a593Smuzhiyun 		}
895*4882a593Smuzhiyun 		siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
896*4882a593Smuzhiyun 			   sqe->opcode, sqe->flags,
897*4882a593Smuzhiyun 			   (void *)(uintptr_t)sqe->id);
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 		if (unlikely(rv < 0))
900*4882a593Smuzhiyun 			break;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 		/* make SQE only valid after completely written */
903*4882a593Smuzhiyun 		smp_wmb();
904*4882a593Smuzhiyun 		sqe->flags |= SIW_WQE_VALID;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 		qp->sq_put++;
907*4882a593Smuzhiyun 		wr = wr->next;
908*4882a593Smuzhiyun 	}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	/*
911*4882a593Smuzhiyun 	 * Send directly if SQ processing is not in progress.
912*4882a593Smuzhiyun 	 * Eventual immediate errors (rv < 0) do not affect the involved
913*4882a593Smuzhiyun 	 * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
914*4882a593Smuzhiyun 	 * processing, if new work is already pending. But rv must be passed
915*4882a593Smuzhiyun 	 * to caller.
916*4882a593Smuzhiyun 	 */
917*4882a593Smuzhiyun 	if (wqe->wr_status != SIW_WR_IDLE) {
918*4882a593Smuzhiyun 		spin_unlock_irqrestore(&qp->sq_lock, flags);
919*4882a593Smuzhiyun 		goto skip_direct_sending;
920*4882a593Smuzhiyun 	}
921*4882a593Smuzhiyun 	rv = siw_activate_tx(qp);
922*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qp->sq_lock, flags);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	if (rv <= 0)
925*4882a593Smuzhiyun 		goto skip_direct_sending;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	if (rdma_is_kernel_res(&qp->base_qp.res)) {
928*4882a593Smuzhiyun 		rv = siw_sq_start(qp);
929*4882a593Smuzhiyun 	} else {
930*4882a593Smuzhiyun 		qp->tx_ctx.in_syscall = 1;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 		if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend))
933*4882a593Smuzhiyun 			siw_qp_cm_drop(qp, 0);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 		qp->tx_ctx.in_syscall = 0;
936*4882a593Smuzhiyun 	}
937*4882a593Smuzhiyun skip_direct_sending:
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	up_read(&qp->state_lock);
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	if (rv >= 0)
942*4882a593Smuzhiyun 		return 0;
943*4882a593Smuzhiyun 	/*
944*4882a593Smuzhiyun 	 * Immediate error
945*4882a593Smuzhiyun 	 */
946*4882a593Smuzhiyun 	siw_dbg_qp(qp, "error %d\n", rv);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	*bad_wr = wr;
949*4882a593Smuzhiyun 	return rv;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun /*
953*4882a593Smuzhiyun  * siw_post_receive()
954*4882a593Smuzhiyun  *
955*4882a593Smuzhiyun  * Post a list of R-WR's to a RQ.
956*4882a593Smuzhiyun  *
957*4882a593Smuzhiyun  * @base_qp:	Base QP contained in siw QP
958*4882a593Smuzhiyun  * @wr:		Null terminated list of user WR's
959*4882a593Smuzhiyun  * @bad_wr:	Points to failing WR in case of synchronous failure.
960*4882a593Smuzhiyun  */
siw_post_receive(struct ib_qp * base_qp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)961*4882a593Smuzhiyun int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
962*4882a593Smuzhiyun 		     const struct ib_recv_wr **bad_wr)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun 	struct siw_qp *qp = to_siw_qp(base_qp);
965*4882a593Smuzhiyun 	unsigned long flags;
966*4882a593Smuzhiyun 	int rv = 0;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	if (qp->srq || qp->attrs.rq_size == 0) {
969*4882a593Smuzhiyun 		*bad_wr = wr;
970*4882a593Smuzhiyun 		return -EINVAL;
971*4882a593Smuzhiyun 	}
972*4882a593Smuzhiyun 	if (!rdma_is_kernel_res(&qp->base_qp.res)) {
973*4882a593Smuzhiyun 		siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
974*4882a593Smuzhiyun 		*bad_wr = wr;
975*4882a593Smuzhiyun 		return -EINVAL;
976*4882a593Smuzhiyun 	}
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	/*
979*4882a593Smuzhiyun 	 * Try to acquire QP state lock. Must be non-blocking
980*4882a593Smuzhiyun 	 * to accommodate kernel clients needs.
981*4882a593Smuzhiyun 	 */
982*4882a593Smuzhiyun 	if (!down_read_trylock(&qp->state_lock)) {
983*4882a593Smuzhiyun 		if (qp->attrs.state == SIW_QP_STATE_ERROR) {
984*4882a593Smuzhiyun 			/*
985*4882a593Smuzhiyun 			 * ERROR state is final, so we can be sure
986*4882a593Smuzhiyun 			 * this state will not change as long as the QP
987*4882a593Smuzhiyun 			 * exists.
988*4882a593Smuzhiyun 			 *
989*4882a593Smuzhiyun 			 * This handles an ib_drain_rq() call with
990*4882a593Smuzhiyun 			 * a concurrent request to set the QP state
991*4882a593Smuzhiyun 			 * to ERROR.
992*4882a593Smuzhiyun 			 */
993*4882a593Smuzhiyun 			rv = siw_rq_flush_wr(qp, wr, bad_wr);
994*4882a593Smuzhiyun 		} else {
995*4882a593Smuzhiyun 			siw_dbg_qp(qp, "QP locked, state %d\n",
996*4882a593Smuzhiyun 				   qp->attrs.state);
997*4882a593Smuzhiyun 			*bad_wr = wr;
998*4882a593Smuzhiyun 			rv = -ENOTCONN;
999*4882a593Smuzhiyun 		}
1000*4882a593Smuzhiyun 		return rv;
1001*4882a593Smuzhiyun 	}
1002*4882a593Smuzhiyun 	if (qp->attrs.state > SIW_QP_STATE_RTS) {
1003*4882a593Smuzhiyun 		if (qp->attrs.state == SIW_QP_STATE_ERROR) {
1004*4882a593Smuzhiyun 			/*
1005*4882a593Smuzhiyun 			 * Immediately flush this WR to CQ, if QP
1006*4882a593Smuzhiyun 			 * is in ERROR state. RQ is guaranteed to
1007*4882a593Smuzhiyun 			 * be empty, so WR complets in-order.
1008*4882a593Smuzhiyun 			 *
1009*4882a593Smuzhiyun 			 * Typically triggered by ib_drain_rq().
1010*4882a593Smuzhiyun 			 */
1011*4882a593Smuzhiyun 			rv = siw_rq_flush_wr(qp, wr, bad_wr);
1012*4882a593Smuzhiyun 		} else {
1013*4882a593Smuzhiyun 			siw_dbg_qp(qp, "QP out of state %d\n",
1014*4882a593Smuzhiyun 				   qp->attrs.state);
1015*4882a593Smuzhiyun 			*bad_wr = wr;
1016*4882a593Smuzhiyun 			rv = -ENOTCONN;
1017*4882a593Smuzhiyun 		}
1018*4882a593Smuzhiyun 		up_read(&qp->state_lock);
1019*4882a593Smuzhiyun 		return rv;
1020*4882a593Smuzhiyun 	}
1021*4882a593Smuzhiyun 	/*
1022*4882a593Smuzhiyun 	 * Serialize potentially multiple producers.
1023*4882a593Smuzhiyun 	 * Not needed for single threaded consumer side.
1024*4882a593Smuzhiyun 	 */
1025*4882a593Smuzhiyun 	spin_lock_irqsave(&qp->rq_lock, flags);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	while (wr) {
1028*4882a593Smuzhiyun 		u32 idx = qp->rq_put % qp->attrs.rq_size;
1029*4882a593Smuzhiyun 		struct siw_rqe *rqe = &qp->recvq[idx];
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 		if (rqe->flags) {
1032*4882a593Smuzhiyun 			siw_dbg_qp(qp, "RQ full\n");
1033*4882a593Smuzhiyun 			rv = -ENOMEM;
1034*4882a593Smuzhiyun 			break;
1035*4882a593Smuzhiyun 		}
1036*4882a593Smuzhiyun 		if (wr->num_sge > qp->attrs.rq_max_sges) {
1037*4882a593Smuzhiyun 			siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
1038*4882a593Smuzhiyun 			rv = -EINVAL;
1039*4882a593Smuzhiyun 			break;
1040*4882a593Smuzhiyun 		}
1041*4882a593Smuzhiyun 		rqe->id = wr->wr_id;
1042*4882a593Smuzhiyun 		rqe->num_sge = wr->num_sge;
1043*4882a593Smuzhiyun 		siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		/* make sure RQE is completely written before valid */
1046*4882a593Smuzhiyun 		smp_wmb();
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 		rqe->flags = SIW_WQE_VALID;
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 		qp->rq_put++;
1051*4882a593Smuzhiyun 		wr = wr->next;
1052*4882a593Smuzhiyun 	}
1053*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qp->rq_lock, flags);
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	up_read(&qp->state_lock);
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	if (rv < 0) {
1058*4882a593Smuzhiyun 		siw_dbg_qp(qp, "error %d\n", rv);
1059*4882a593Smuzhiyun 		*bad_wr = wr;
1060*4882a593Smuzhiyun 	}
1061*4882a593Smuzhiyun 	return rv > 0 ? 0 : rv;
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun 
siw_destroy_cq(struct ib_cq * base_cq,struct ib_udata * udata)1064*4882a593Smuzhiyun int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun 	struct siw_cq *cq = to_siw_cq(base_cq);
1067*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(base_cq->device);
1068*4882a593Smuzhiyun 	struct siw_ucontext *ctx =
1069*4882a593Smuzhiyun 		rdma_udata_to_drv_context(udata, struct siw_ucontext,
1070*4882a593Smuzhiyun 					  base_ucontext);
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	siw_dbg_cq(cq, "free CQ resources\n");
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	siw_cq_flush(cq);
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	if (ctx)
1077*4882a593Smuzhiyun 		rdma_user_mmap_entry_remove(cq->cq_entry);
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	atomic_dec(&sdev->num_cq);
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	vfree(cq->queue);
1082*4882a593Smuzhiyun 	return 0;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun /*
1086*4882a593Smuzhiyun  * siw_create_cq()
1087*4882a593Smuzhiyun  *
1088*4882a593Smuzhiyun  * Populate CQ of requested size
1089*4882a593Smuzhiyun  *
1090*4882a593Smuzhiyun  * @base_cq: CQ as allocated by RDMA midlayer
1091*4882a593Smuzhiyun  * @attr: Initial CQ attributes
1092*4882a593Smuzhiyun  * @udata: relates to user context
1093*4882a593Smuzhiyun  */
1094*4882a593Smuzhiyun 
siw_create_cq(struct ib_cq * base_cq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)1095*4882a593Smuzhiyun int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
1096*4882a593Smuzhiyun 		  struct ib_udata *udata)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(base_cq->device);
1099*4882a593Smuzhiyun 	struct siw_cq *cq = to_siw_cq(base_cq);
1100*4882a593Smuzhiyun 	int rv, size = attr->cqe;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
1103*4882a593Smuzhiyun 		siw_dbg(base_cq->device, "too many CQ's\n");
1104*4882a593Smuzhiyun 		rv = -ENOMEM;
1105*4882a593Smuzhiyun 		goto err_out;
1106*4882a593Smuzhiyun 	}
1107*4882a593Smuzhiyun 	if (size < 1 || size > sdev->attrs.max_cqe) {
1108*4882a593Smuzhiyun 		siw_dbg(base_cq->device, "CQ size error: %d\n", size);
1109*4882a593Smuzhiyun 		rv = -EINVAL;
1110*4882a593Smuzhiyun 		goto err_out;
1111*4882a593Smuzhiyun 	}
1112*4882a593Smuzhiyun 	size = roundup_pow_of_two(size);
1113*4882a593Smuzhiyun 	cq->base_cq.cqe = size;
1114*4882a593Smuzhiyun 	cq->num_cqe = size;
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	if (udata)
1117*4882a593Smuzhiyun 		cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
1118*4882a593Smuzhiyun 					 sizeof(struct siw_cq_ctrl));
1119*4882a593Smuzhiyun 	else
1120*4882a593Smuzhiyun 		cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
1121*4882a593Smuzhiyun 				    sizeof(struct siw_cq_ctrl));
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	if (cq->queue == NULL) {
1124*4882a593Smuzhiyun 		rv = -ENOMEM;
1125*4882a593Smuzhiyun 		goto err_out;
1126*4882a593Smuzhiyun 	}
1127*4882a593Smuzhiyun 	get_random_bytes(&cq->id, 4);
1128*4882a593Smuzhiyun 	siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id);
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	spin_lock_init(&cq->lock);
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	if (udata) {
1135*4882a593Smuzhiyun 		struct siw_uresp_create_cq uresp = {};
1136*4882a593Smuzhiyun 		struct siw_ucontext *ctx =
1137*4882a593Smuzhiyun 			rdma_udata_to_drv_context(udata, struct siw_ucontext,
1138*4882a593Smuzhiyun 						  base_ucontext);
1139*4882a593Smuzhiyun 		size_t length = size * sizeof(struct siw_cqe) +
1140*4882a593Smuzhiyun 			sizeof(struct siw_cq_ctrl);
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 		cq->cq_entry =
1143*4882a593Smuzhiyun 			siw_mmap_entry_insert(ctx, cq->queue,
1144*4882a593Smuzhiyun 					      length, &uresp.cq_key);
1145*4882a593Smuzhiyun 		if (!cq->cq_entry) {
1146*4882a593Smuzhiyun 			rv = -ENOMEM;
1147*4882a593Smuzhiyun 			goto err_out;
1148*4882a593Smuzhiyun 		}
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 		uresp.cq_id = cq->id;
1151*4882a593Smuzhiyun 		uresp.num_cqe = size;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 		if (udata->outlen < sizeof(uresp)) {
1154*4882a593Smuzhiyun 			rv = -EINVAL;
1155*4882a593Smuzhiyun 			goto err_out;
1156*4882a593Smuzhiyun 		}
1157*4882a593Smuzhiyun 		rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1158*4882a593Smuzhiyun 		if (rv)
1159*4882a593Smuzhiyun 			goto err_out;
1160*4882a593Smuzhiyun 	}
1161*4882a593Smuzhiyun 	return 0;
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun err_out:
1164*4882a593Smuzhiyun 	siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 	if (cq && cq->queue) {
1167*4882a593Smuzhiyun 		struct siw_ucontext *ctx =
1168*4882a593Smuzhiyun 			rdma_udata_to_drv_context(udata, struct siw_ucontext,
1169*4882a593Smuzhiyun 						  base_ucontext);
1170*4882a593Smuzhiyun 		if (ctx)
1171*4882a593Smuzhiyun 			rdma_user_mmap_entry_remove(cq->cq_entry);
1172*4882a593Smuzhiyun 		vfree(cq->queue);
1173*4882a593Smuzhiyun 	}
1174*4882a593Smuzhiyun 	atomic_dec(&sdev->num_cq);
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	return rv;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun /*
1180*4882a593Smuzhiyun  * siw_poll_cq()
1181*4882a593Smuzhiyun  *
1182*4882a593Smuzhiyun  * Reap CQ entries if available and copy work completion status into
1183*4882a593Smuzhiyun  * array of WC's provided by caller. Returns number of reaped CQE's.
1184*4882a593Smuzhiyun  *
1185*4882a593Smuzhiyun  * @base_cq:	Base CQ contained in siw CQ.
1186*4882a593Smuzhiyun  * @num_cqe:	Maximum number of CQE's to reap.
1187*4882a593Smuzhiyun  * @wc:		Array of work completions to be filled by siw.
1188*4882a593Smuzhiyun  */
siw_poll_cq(struct ib_cq * base_cq,int num_cqe,struct ib_wc * wc)1189*4882a593Smuzhiyun int siw_poll_cq(struct ib_cq *base_cq, int num_cqe, struct ib_wc *wc)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	struct siw_cq *cq = to_siw_cq(base_cq);
1192*4882a593Smuzhiyun 	int i;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	for (i = 0; i < num_cqe; i++) {
1195*4882a593Smuzhiyun 		if (!siw_reap_cqe(cq, wc))
1196*4882a593Smuzhiyun 			break;
1197*4882a593Smuzhiyun 		wc++;
1198*4882a593Smuzhiyun 	}
1199*4882a593Smuzhiyun 	return i;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun /*
1203*4882a593Smuzhiyun  * siw_req_notify_cq()
1204*4882a593Smuzhiyun  *
1205*4882a593Smuzhiyun  * Request notification for new CQE's added to that CQ.
1206*4882a593Smuzhiyun  * Defined flags:
1207*4882a593Smuzhiyun  * o SIW_CQ_NOTIFY_SOLICITED lets siw trigger a notification
1208*4882a593Smuzhiyun  *   event if a WQE with notification flag set enters the CQ
1209*4882a593Smuzhiyun  * o SIW_CQ_NOTIFY_NEXT_COMP lets siw trigger a notification
1210*4882a593Smuzhiyun  *   event if a WQE enters the CQ.
1211*4882a593Smuzhiyun  * o IB_CQ_REPORT_MISSED_EVENTS: return value will provide the
1212*4882a593Smuzhiyun  *   number of not reaped CQE's regardless of its notification
1213*4882a593Smuzhiyun  *   type and current or new CQ notification settings.
1214*4882a593Smuzhiyun  *
1215*4882a593Smuzhiyun  * @base_cq:	Base CQ contained in siw CQ.
1216*4882a593Smuzhiyun  * @flags:	Requested notification flags.
1217*4882a593Smuzhiyun  */
siw_req_notify_cq(struct ib_cq * base_cq,enum ib_cq_notify_flags flags)1218*4882a593Smuzhiyun int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun 	struct siw_cq *cq = to_siw_cq(base_cq);
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
1225*4882a593Smuzhiyun 		/*
1226*4882a593Smuzhiyun 		 * Enable CQ event for next solicited completion.
1227*4882a593Smuzhiyun 		 * and make it visible to all associated producers.
1228*4882a593Smuzhiyun 		 */
1229*4882a593Smuzhiyun 		smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
1230*4882a593Smuzhiyun 	else
1231*4882a593Smuzhiyun 		/*
1232*4882a593Smuzhiyun 		 * Enable CQ event for any signalled completion.
1233*4882a593Smuzhiyun 		 * and make it visible to all associated producers.
1234*4882a593Smuzhiyun 		 */
1235*4882a593Smuzhiyun 		smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1238*4882a593Smuzhiyun 		return cq->cq_put - cq->cq_get;
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	return 0;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun /*
1244*4882a593Smuzhiyun  * siw_dereg_mr()
1245*4882a593Smuzhiyun  *
1246*4882a593Smuzhiyun  * Release Memory Region.
1247*4882a593Smuzhiyun  *
1248*4882a593Smuzhiyun  * @base_mr: Base MR contained in siw MR.
1249*4882a593Smuzhiyun  * @udata: points to user context, unused.
1250*4882a593Smuzhiyun  */
siw_dereg_mr(struct ib_mr * base_mr,struct ib_udata * udata)1251*4882a593Smuzhiyun int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata)
1252*4882a593Smuzhiyun {
1253*4882a593Smuzhiyun 	struct siw_mr *mr = to_siw_mr(base_mr);
1254*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(base_mr->device);
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	siw_dbg_mem(mr->mem, "deregister MR\n");
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	atomic_dec(&sdev->num_mr);
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	siw_mr_drop_mem(mr);
1261*4882a593Smuzhiyun 	kfree_rcu(mr, rcu);
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	return 0;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun /*
1267*4882a593Smuzhiyun  * siw_reg_user_mr()
1268*4882a593Smuzhiyun  *
1269*4882a593Smuzhiyun  * Register Memory Region.
1270*4882a593Smuzhiyun  *
1271*4882a593Smuzhiyun  * @pd:		Protection Domain
1272*4882a593Smuzhiyun  * @start:	starting address of MR (virtual address)
1273*4882a593Smuzhiyun  * @len:	len of MR
1274*4882a593Smuzhiyun  * @rnic_va:	not used by siw
1275*4882a593Smuzhiyun  * @rights:	MR access rights
1276*4882a593Smuzhiyun  * @udata:	user buffer to communicate STag and Key.
1277*4882a593Smuzhiyun  */
siw_reg_user_mr(struct ib_pd * pd,u64 start,u64 len,u64 rnic_va,int rights,struct ib_udata * udata)1278*4882a593Smuzhiyun struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1279*4882a593Smuzhiyun 			      u64 rnic_va, int rights, struct ib_udata *udata)
1280*4882a593Smuzhiyun {
1281*4882a593Smuzhiyun 	struct siw_mr *mr = NULL;
1282*4882a593Smuzhiyun 	struct siw_umem *umem = NULL;
1283*4882a593Smuzhiyun 	struct siw_ureq_reg_mr ureq;
1284*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(pd->device);
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1287*4882a593Smuzhiyun 	int rv;
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1290*4882a593Smuzhiyun 		   (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
1291*4882a593Smuzhiyun 		   (unsigned long long)len);
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1294*4882a593Smuzhiyun 		siw_dbg_pd(pd, "too many mr's\n");
1295*4882a593Smuzhiyun 		rv = -ENOMEM;
1296*4882a593Smuzhiyun 		goto err_out;
1297*4882a593Smuzhiyun 	}
1298*4882a593Smuzhiyun 	if (!len) {
1299*4882a593Smuzhiyun 		rv = -EINVAL;
1300*4882a593Smuzhiyun 		goto err_out;
1301*4882a593Smuzhiyun 	}
1302*4882a593Smuzhiyun 	if (mem_limit != RLIM_INFINITY) {
1303*4882a593Smuzhiyun 		unsigned long num_pages =
1304*4882a593Smuzhiyun 			(PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT;
1305*4882a593Smuzhiyun 		mem_limit >>= PAGE_SHIFT;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 		if (num_pages > mem_limit - current->mm->locked_vm) {
1308*4882a593Smuzhiyun 			siw_dbg_pd(pd, "pages req %lu, max %lu, lock %lu\n",
1309*4882a593Smuzhiyun 				   num_pages, mem_limit,
1310*4882a593Smuzhiyun 				   current->mm->locked_vm);
1311*4882a593Smuzhiyun 			rv = -ENOMEM;
1312*4882a593Smuzhiyun 			goto err_out;
1313*4882a593Smuzhiyun 		}
1314*4882a593Smuzhiyun 	}
1315*4882a593Smuzhiyun 	umem = siw_umem_get(start, len, ib_access_writable(rights));
1316*4882a593Smuzhiyun 	if (IS_ERR(umem)) {
1317*4882a593Smuzhiyun 		rv = PTR_ERR(umem);
1318*4882a593Smuzhiyun 		siw_dbg_pd(pd, "getting user memory failed: %d\n", rv);
1319*4882a593Smuzhiyun 		umem = NULL;
1320*4882a593Smuzhiyun 		goto err_out;
1321*4882a593Smuzhiyun 	}
1322*4882a593Smuzhiyun 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1323*4882a593Smuzhiyun 	if (!mr) {
1324*4882a593Smuzhiyun 		rv = -ENOMEM;
1325*4882a593Smuzhiyun 		goto err_out;
1326*4882a593Smuzhiyun 	}
1327*4882a593Smuzhiyun 	rv = siw_mr_add_mem(mr, pd, umem, start, len, rights);
1328*4882a593Smuzhiyun 	if (rv)
1329*4882a593Smuzhiyun 		goto err_out;
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	if (udata) {
1332*4882a593Smuzhiyun 		struct siw_uresp_reg_mr uresp = {};
1333*4882a593Smuzhiyun 		struct siw_mem *mem = mr->mem;
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 		if (udata->inlen < sizeof(ureq)) {
1336*4882a593Smuzhiyun 			rv = -EINVAL;
1337*4882a593Smuzhiyun 			goto err_out;
1338*4882a593Smuzhiyun 		}
1339*4882a593Smuzhiyun 		rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1340*4882a593Smuzhiyun 		if (rv)
1341*4882a593Smuzhiyun 			goto err_out;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 		mr->base_mr.lkey |= ureq.stag_key;
1344*4882a593Smuzhiyun 		mr->base_mr.rkey |= ureq.stag_key;
1345*4882a593Smuzhiyun 		mem->stag |= ureq.stag_key;
1346*4882a593Smuzhiyun 		uresp.stag = mem->stag;
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 		if (udata->outlen < sizeof(uresp)) {
1349*4882a593Smuzhiyun 			rv = -EINVAL;
1350*4882a593Smuzhiyun 			goto err_out;
1351*4882a593Smuzhiyun 		}
1352*4882a593Smuzhiyun 		rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1353*4882a593Smuzhiyun 		if (rv)
1354*4882a593Smuzhiyun 			goto err_out;
1355*4882a593Smuzhiyun 	}
1356*4882a593Smuzhiyun 	mr->mem->stag_valid = 1;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	return &mr->base_mr;
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun err_out:
1361*4882a593Smuzhiyun 	atomic_dec(&sdev->num_mr);
1362*4882a593Smuzhiyun 	if (mr) {
1363*4882a593Smuzhiyun 		if (mr->mem)
1364*4882a593Smuzhiyun 			siw_mr_drop_mem(mr);
1365*4882a593Smuzhiyun 		kfree_rcu(mr, rcu);
1366*4882a593Smuzhiyun 	} else {
1367*4882a593Smuzhiyun 		if (umem)
1368*4882a593Smuzhiyun 			siw_umem_release(umem, false);
1369*4882a593Smuzhiyun 	}
1370*4882a593Smuzhiyun 	return ERR_PTR(rv);
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun 
siw_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_sge)1373*4882a593Smuzhiyun struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1374*4882a593Smuzhiyun 			   u32 max_sge)
1375*4882a593Smuzhiyun {
1376*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(pd->device);
1377*4882a593Smuzhiyun 	struct siw_mr *mr = NULL;
1378*4882a593Smuzhiyun 	struct siw_pbl *pbl = NULL;
1379*4882a593Smuzhiyun 	int rv;
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1382*4882a593Smuzhiyun 		siw_dbg_pd(pd, "too many mr's\n");
1383*4882a593Smuzhiyun 		rv = -ENOMEM;
1384*4882a593Smuzhiyun 		goto err_out;
1385*4882a593Smuzhiyun 	}
1386*4882a593Smuzhiyun 	if (mr_type != IB_MR_TYPE_MEM_REG) {
1387*4882a593Smuzhiyun 		siw_dbg_pd(pd, "mr type %d unsupported\n", mr_type);
1388*4882a593Smuzhiyun 		rv = -EOPNOTSUPP;
1389*4882a593Smuzhiyun 		goto err_out;
1390*4882a593Smuzhiyun 	}
1391*4882a593Smuzhiyun 	if (max_sge > SIW_MAX_SGE_PBL) {
1392*4882a593Smuzhiyun 		siw_dbg_pd(pd, "too many sge's: %d\n", max_sge);
1393*4882a593Smuzhiyun 		rv = -ENOMEM;
1394*4882a593Smuzhiyun 		goto err_out;
1395*4882a593Smuzhiyun 	}
1396*4882a593Smuzhiyun 	pbl = siw_pbl_alloc(max_sge);
1397*4882a593Smuzhiyun 	if (IS_ERR(pbl)) {
1398*4882a593Smuzhiyun 		rv = PTR_ERR(pbl);
1399*4882a593Smuzhiyun 		siw_dbg_pd(pd, "pbl allocation failed: %d\n", rv);
1400*4882a593Smuzhiyun 		pbl = NULL;
1401*4882a593Smuzhiyun 		goto err_out;
1402*4882a593Smuzhiyun 	}
1403*4882a593Smuzhiyun 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1404*4882a593Smuzhiyun 	if (!mr) {
1405*4882a593Smuzhiyun 		rv = -ENOMEM;
1406*4882a593Smuzhiyun 		goto err_out;
1407*4882a593Smuzhiyun 	}
1408*4882a593Smuzhiyun 	rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0);
1409*4882a593Smuzhiyun 	if (rv)
1410*4882a593Smuzhiyun 		goto err_out;
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	mr->mem->is_pbl = 1;
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	return &mr->base_mr;
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun err_out:
1419*4882a593Smuzhiyun 	atomic_dec(&sdev->num_mr);
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	if (!mr) {
1422*4882a593Smuzhiyun 		kfree(pbl);
1423*4882a593Smuzhiyun 	} else {
1424*4882a593Smuzhiyun 		if (mr->mem)
1425*4882a593Smuzhiyun 			siw_mr_drop_mem(mr);
1426*4882a593Smuzhiyun 		kfree_rcu(mr, rcu);
1427*4882a593Smuzhiyun 	}
1428*4882a593Smuzhiyun 	siw_dbg_pd(pd, "failed: %d\n", rv);
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 	return ERR_PTR(rv);
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun /* Just used to count number of pages being mapped */
siw_set_pbl_page(struct ib_mr * base_mr,u64 buf_addr)1434*4882a593Smuzhiyun static int siw_set_pbl_page(struct ib_mr *base_mr, u64 buf_addr)
1435*4882a593Smuzhiyun {
1436*4882a593Smuzhiyun 	return 0;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun 
siw_map_mr_sg(struct ib_mr * base_mr,struct scatterlist * sl,int num_sle,unsigned int * sg_off)1439*4882a593Smuzhiyun int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1440*4882a593Smuzhiyun 		  unsigned int *sg_off)
1441*4882a593Smuzhiyun {
1442*4882a593Smuzhiyun 	struct scatterlist *slp;
1443*4882a593Smuzhiyun 	struct siw_mr *mr = to_siw_mr(base_mr);
1444*4882a593Smuzhiyun 	struct siw_mem *mem = mr->mem;
1445*4882a593Smuzhiyun 	struct siw_pbl *pbl = mem->pbl;
1446*4882a593Smuzhiyun 	struct siw_pble *pble;
1447*4882a593Smuzhiyun 	unsigned long pbl_size;
1448*4882a593Smuzhiyun 	int i, rv;
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	if (!pbl) {
1451*4882a593Smuzhiyun 		siw_dbg_mem(mem, "no PBL allocated\n");
1452*4882a593Smuzhiyun 		return -EINVAL;
1453*4882a593Smuzhiyun 	}
1454*4882a593Smuzhiyun 	pble = pbl->pbe;
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 	if (pbl->max_buf < num_sle) {
1457*4882a593Smuzhiyun 		siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
1458*4882a593Smuzhiyun 			    mem->pbl->max_buf, num_sle);
1459*4882a593Smuzhiyun 		return -ENOMEM;
1460*4882a593Smuzhiyun 	}
1461*4882a593Smuzhiyun 	for_each_sg(sl, slp, num_sle, i) {
1462*4882a593Smuzhiyun 		if (sg_dma_len(slp) == 0) {
1463*4882a593Smuzhiyun 			siw_dbg_mem(mem, "empty SGE\n");
1464*4882a593Smuzhiyun 			return -EINVAL;
1465*4882a593Smuzhiyun 		}
1466*4882a593Smuzhiyun 		if (i == 0) {
1467*4882a593Smuzhiyun 			pble->addr = sg_dma_address(slp);
1468*4882a593Smuzhiyun 			pble->size = sg_dma_len(slp);
1469*4882a593Smuzhiyun 			pble->pbl_off = 0;
1470*4882a593Smuzhiyun 			pbl_size = pble->size;
1471*4882a593Smuzhiyun 			pbl->num_buf = 1;
1472*4882a593Smuzhiyun 		} else {
1473*4882a593Smuzhiyun 			/* Merge PBL entries if adjacent */
1474*4882a593Smuzhiyun 			if (pble->addr + pble->size == sg_dma_address(slp)) {
1475*4882a593Smuzhiyun 				pble->size += sg_dma_len(slp);
1476*4882a593Smuzhiyun 			} else {
1477*4882a593Smuzhiyun 				pble++;
1478*4882a593Smuzhiyun 				pbl->num_buf++;
1479*4882a593Smuzhiyun 				pble->addr = sg_dma_address(slp);
1480*4882a593Smuzhiyun 				pble->size = sg_dma_len(slp);
1481*4882a593Smuzhiyun 				pble->pbl_off = pbl_size;
1482*4882a593Smuzhiyun 			}
1483*4882a593Smuzhiyun 			pbl_size += sg_dma_len(slp);
1484*4882a593Smuzhiyun 		}
1485*4882a593Smuzhiyun 		siw_dbg_mem(mem,
1486*4882a593Smuzhiyun 			"sge[%d], size %u, addr 0x%p, total %lu\n",
1487*4882a593Smuzhiyun 			i, pble->size, (void *)(uintptr_t)pble->addr,
1488*4882a593Smuzhiyun 			pbl_size);
1489*4882a593Smuzhiyun 	}
1490*4882a593Smuzhiyun 	rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1491*4882a593Smuzhiyun 	if (rv > 0) {
1492*4882a593Smuzhiyun 		mem->len = base_mr->length;
1493*4882a593Smuzhiyun 		mem->va = base_mr->iova;
1494*4882a593Smuzhiyun 		siw_dbg_mem(mem,
1495*4882a593Smuzhiyun 			"%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1496*4882a593Smuzhiyun 			mem->len, (void *)(uintptr_t)mem->va, num_sle,
1497*4882a593Smuzhiyun 			pbl->num_buf);
1498*4882a593Smuzhiyun 	}
1499*4882a593Smuzhiyun 	return rv;
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun /*
1503*4882a593Smuzhiyun  * siw_get_dma_mr()
1504*4882a593Smuzhiyun  *
1505*4882a593Smuzhiyun  * Create a (empty) DMA memory region, where no umem is attached.
1506*4882a593Smuzhiyun  */
siw_get_dma_mr(struct ib_pd * pd,int rights)1507*4882a593Smuzhiyun struct ib_mr *siw_get_dma_mr(struct ib_pd *pd, int rights)
1508*4882a593Smuzhiyun {
1509*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(pd->device);
1510*4882a593Smuzhiyun 	struct siw_mr *mr = NULL;
1511*4882a593Smuzhiyun 	int rv;
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1514*4882a593Smuzhiyun 		siw_dbg_pd(pd, "too many mr's\n");
1515*4882a593Smuzhiyun 		rv = -ENOMEM;
1516*4882a593Smuzhiyun 		goto err_out;
1517*4882a593Smuzhiyun 	}
1518*4882a593Smuzhiyun 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1519*4882a593Smuzhiyun 	if (!mr) {
1520*4882a593Smuzhiyun 		rv = -ENOMEM;
1521*4882a593Smuzhiyun 		goto err_out;
1522*4882a593Smuzhiyun 	}
1523*4882a593Smuzhiyun 	rv = siw_mr_add_mem(mr, pd, NULL, 0, ULONG_MAX, rights);
1524*4882a593Smuzhiyun 	if (rv)
1525*4882a593Smuzhiyun 		goto err_out;
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 	mr->mem->stag_valid = 1;
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	return &mr->base_mr;
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun err_out:
1534*4882a593Smuzhiyun 	if (rv)
1535*4882a593Smuzhiyun 		kfree(mr);
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	atomic_dec(&sdev->num_mr);
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 	return ERR_PTR(rv);
1540*4882a593Smuzhiyun }
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun /*
1543*4882a593Smuzhiyun  * siw_create_srq()
1544*4882a593Smuzhiyun  *
1545*4882a593Smuzhiyun  * Create Shared Receive Queue of attributes @init_attrs
1546*4882a593Smuzhiyun  * within protection domain given by @pd.
1547*4882a593Smuzhiyun  *
1548*4882a593Smuzhiyun  * @base_srq:	Base SRQ contained in siw SRQ.
1549*4882a593Smuzhiyun  * @init_attrs:	SRQ init attributes.
1550*4882a593Smuzhiyun  * @udata:	points to user context
1551*4882a593Smuzhiyun  */
siw_create_srq(struct ib_srq * base_srq,struct ib_srq_init_attr * init_attrs,struct ib_udata * udata)1552*4882a593Smuzhiyun int siw_create_srq(struct ib_srq *base_srq,
1553*4882a593Smuzhiyun 		   struct ib_srq_init_attr *init_attrs, struct ib_udata *udata)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun 	struct siw_srq *srq = to_siw_srq(base_srq);
1556*4882a593Smuzhiyun 	struct ib_srq_attr *attrs = &init_attrs->attr;
1557*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(base_srq->device);
1558*4882a593Smuzhiyun 	struct siw_ucontext *ctx =
1559*4882a593Smuzhiyun 		rdma_udata_to_drv_context(udata, struct siw_ucontext,
1560*4882a593Smuzhiyun 					  base_ucontext);
1561*4882a593Smuzhiyun 	int rv;
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun 	if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) {
1564*4882a593Smuzhiyun 		siw_dbg_pd(base_srq->pd, "too many SRQ's\n");
1565*4882a593Smuzhiyun 		rv = -ENOMEM;
1566*4882a593Smuzhiyun 		goto err_out;
1567*4882a593Smuzhiyun 	}
1568*4882a593Smuzhiyun 	if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR ||
1569*4882a593Smuzhiyun 	    attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) {
1570*4882a593Smuzhiyun 		rv = -EINVAL;
1571*4882a593Smuzhiyun 		goto err_out;
1572*4882a593Smuzhiyun 	}
1573*4882a593Smuzhiyun 	srq->max_sge = attrs->max_sge;
1574*4882a593Smuzhiyun 	srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
1575*4882a593Smuzhiyun 	srq->limit = attrs->srq_limit;
1576*4882a593Smuzhiyun 	if (srq->limit)
1577*4882a593Smuzhiyun 		srq->armed = true;
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	srq->is_kernel_res = !udata;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	if (udata)
1582*4882a593Smuzhiyun 		srq->recvq =
1583*4882a593Smuzhiyun 			vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe));
1584*4882a593Smuzhiyun 	else
1585*4882a593Smuzhiyun 		srq->recvq = vzalloc(srq->num_rqe * sizeof(struct siw_rqe));
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	if (srq->recvq == NULL) {
1588*4882a593Smuzhiyun 		rv = -ENOMEM;
1589*4882a593Smuzhiyun 		goto err_out;
1590*4882a593Smuzhiyun 	}
1591*4882a593Smuzhiyun 	if (udata) {
1592*4882a593Smuzhiyun 		struct siw_uresp_create_srq uresp = {};
1593*4882a593Smuzhiyun 		size_t length = srq->num_rqe * sizeof(struct siw_rqe);
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 		srq->srq_entry =
1596*4882a593Smuzhiyun 			siw_mmap_entry_insert(ctx, srq->recvq,
1597*4882a593Smuzhiyun 					      length, &uresp.srq_key);
1598*4882a593Smuzhiyun 		if (!srq->srq_entry) {
1599*4882a593Smuzhiyun 			rv = -ENOMEM;
1600*4882a593Smuzhiyun 			goto err_out;
1601*4882a593Smuzhiyun 		}
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 		uresp.num_rqe = srq->num_rqe;
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 		if (udata->outlen < sizeof(uresp)) {
1606*4882a593Smuzhiyun 			rv = -EINVAL;
1607*4882a593Smuzhiyun 			goto err_out;
1608*4882a593Smuzhiyun 		}
1609*4882a593Smuzhiyun 		rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1610*4882a593Smuzhiyun 		if (rv)
1611*4882a593Smuzhiyun 			goto err_out;
1612*4882a593Smuzhiyun 	}
1613*4882a593Smuzhiyun 	spin_lock_init(&srq->lock);
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	return 0;
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun err_out:
1620*4882a593Smuzhiyun 	if (srq->recvq) {
1621*4882a593Smuzhiyun 		if (ctx)
1622*4882a593Smuzhiyun 			rdma_user_mmap_entry_remove(srq->srq_entry);
1623*4882a593Smuzhiyun 		vfree(srq->recvq);
1624*4882a593Smuzhiyun 	}
1625*4882a593Smuzhiyun 	atomic_dec(&sdev->num_srq);
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 	return rv;
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun /*
1631*4882a593Smuzhiyun  * siw_modify_srq()
1632*4882a593Smuzhiyun  *
1633*4882a593Smuzhiyun  * Modify SRQ. The caller may resize SRQ and/or set/reset notification
1634*4882a593Smuzhiyun  * limit and (re)arm IB_EVENT_SRQ_LIMIT_REACHED notification.
1635*4882a593Smuzhiyun  *
1636*4882a593Smuzhiyun  * NOTE: it is unclear if RDMA core allows for changing the MAX_SGE
1637*4882a593Smuzhiyun  * parameter. siw_modify_srq() does not check the attrs->max_sge param.
1638*4882a593Smuzhiyun  */
siw_modify_srq(struct ib_srq * base_srq,struct ib_srq_attr * attrs,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)1639*4882a593Smuzhiyun int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
1640*4882a593Smuzhiyun 		   enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1641*4882a593Smuzhiyun {
1642*4882a593Smuzhiyun 	struct siw_srq *srq = to_siw_srq(base_srq);
1643*4882a593Smuzhiyun 	unsigned long flags;
1644*4882a593Smuzhiyun 	int rv = 0;
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	spin_lock_irqsave(&srq->lock, flags);
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	if (attr_mask & IB_SRQ_MAX_WR) {
1649*4882a593Smuzhiyun 		/* resize request not yet supported */
1650*4882a593Smuzhiyun 		rv = -EOPNOTSUPP;
1651*4882a593Smuzhiyun 		goto out;
1652*4882a593Smuzhiyun 	}
1653*4882a593Smuzhiyun 	if (attr_mask & IB_SRQ_LIMIT) {
1654*4882a593Smuzhiyun 		if (attrs->srq_limit) {
1655*4882a593Smuzhiyun 			if (unlikely(attrs->srq_limit > srq->num_rqe)) {
1656*4882a593Smuzhiyun 				rv = -EINVAL;
1657*4882a593Smuzhiyun 				goto out;
1658*4882a593Smuzhiyun 			}
1659*4882a593Smuzhiyun 			srq->armed = true;
1660*4882a593Smuzhiyun 		} else {
1661*4882a593Smuzhiyun 			srq->armed = false;
1662*4882a593Smuzhiyun 		}
1663*4882a593Smuzhiyun 		srq->limit = attrs->srq_limit;
1664*4882a593Smuzhiyun 	}
1665*4882a593Smuzhiyun out:
1666*4882a593Smuzhiyun 	spin_unlock_irqrestore(&srq->lock, flags);
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	return rv;
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun /*
1672*4882a593Smuzhiyun  * siw_query_srq()
1673*4882a593Smuzhiyun  *
1674*4882a593Smuzhiyun  * Query SRQ attributes.
1675*4882a593Smuzhiyun  */
siw_query_srq(struct ib_srq * base_srq,struct ib_srq_attr * attrs)1676*4882a593Smuzhiyun int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs)
1677*4882a593Smuzhiyun {
1678*4882a593Smuzhiyun 	struct siw_srq *srq = to_siw_srq(base_srq);
1679*4882a593Smuzhiyun 	unsigned long flags;
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	spin_lock_irqsave(&srq->lock, flags);
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 	attrs->max_wr = srq->num_rqe;
1684*4882a593Smuzhiyun 	attrs->max_sge = srq->max_sge;
1685*4882a593Smuzhiyun 	attrs->srq_limit = srq->limit;
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 	spin_unlock_irqrestore(&srq->lock, flags);
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 	return 0;
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun /*
1693*4882a593Smuzhiyun  * siw_destroy_srq()
1694*4882a593Smuzhiyun  *
1695*4882a593Smuzhiyun  * Destroy SRQ.
1696*4882a593Smuzhiyun  * It is assumed that the SRQ is not referenced by any
1697*4882a593Smuzhiyun  * QP anymore - the code trusts the RDMA core environment to keep track
1698*4882a593Smuzhiyun  * of QP references.
1699*4882a593Smuzhiyun  */
siw_destroy_srq(struct ib_srq * base_srq,struct ib_udata * udata)1700*4882a593Smuzhiyun int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
1701*4882a593Smuzhiyun {
1702*4882a593Smuzhiyun 	struct siw_srq *srq = to_siw_srq(base_srq);
1703*4882a593Smuzhiyun 	struct siw_device *sdev = to_siw_dev(base_srq->device);
1704*4882a593Smuzhiyun 	struct siw_ucontext *ctx =
1705*4882a593Smuzhiyun 		rdma_udata_to_drv_context(udata, struct siw_ucontext,
1706*4882a593Smuzhiyun 					  base_ucontext);
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	if (ctx)
1709*4882a593Smuzhiyun 		rdma_user_mmap_entry_remove(srq->srq_entry);
1710*4882a593Smuzhiyun 	vfree(srq->recvq);
1711*4882a593Smuzhiyun 	atomic_dec(&sdev->num_srq);
1712*4882a593Smuzhiyun 	return 0;
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun /*
1716*4882a593Smuzhiyun  * siw_post_srq_recv()
1717*4882a593Smuzhiyun  *
1718*4882a593Smuzhiyun  * Post a list of receive queue elements to SRQ.
1719*4882a593Smuzhiyun  * NOTE: The function does not check or lock a certain SRQ state
1720*4882a593Smuzhiyun  *       during the post operation. The code simply trusts the
1721*4882a593Smuzhiyun  *       RDMA core environment.
1722*4882a593Smuzhiyun  *
1723*4882a593Smuzhiyun  * @base_srq:	Base SRQ contained in siw SRQ
1724*4882a593Smuzhiyun  * @wr:		List of R-WR's
1725*4882a593Smuzhiyun  * @bad_wr:	Updated to failing WR if posting fails.
1726*4882a593Smuzhiyun  */
siw_post_srq_recv(struct ib_srq * base_srq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1727*4882a593Smuzhiyun int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1728*4882a593Smuzhiyun 		      const struct ib_recv_wr **bad_wr)
1729*4882a593Smuzhiyun {
1730*4882a593Smuzhiyun 	struct siw_srq *srq = to_siw_srq(base_srq);
1731*4882a593Smuzhiyun 	unsigned long flags;
1732*4882a593Smuzhiyun 	int rv = 0;
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	if (unlikely(!srq->is_kernel_res)) {
1735*4882a593Smuzhiyun 		siw_dbg_pd(base_srq->pd,
1736*4882a593Smuzhiyun 			   "[SRQ]: no kernel post_recv for mapped srq\n");
1737*4882a593Smuzhiyun 		rv = -EINVAL;
1738*4882a593Smuzhiyun 		goto out;
1739*4882a593Smuzhiyun 	}
1740*4882a593Smuzhiyun 	/*
1741*4882a593Smuzhiyun 	 * Serialize potentially multiple producers.
1742*4882a593Smuzhiyun 	 * Also needed to serialize potentially multiple
1743*4882a593Smuzhiyun 	 * consumers.
1744*4882a593Smuzhiyun 	 */
1745*4882a593Smuzhiyun 	spin_lock_irqsave(&srq->lock, flags);
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	while (wr) {
1748*4882a593Smuzhiyun 		u32 idx = srq->rq_put % srq->num_rqe;
1749*4882a593Smuzhiyun 		struct siw_rqe *rqe = &srq->recvq[idx];
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 		if (rqe->flags) {
1752*4882a593Smuzhiyun 			siw_dbg_pd(base_srq->pd, "SRQ full\n");
1753*4882a593Smuzhiyun 			rv = -ENOMEM;
1754*4882a593Smuzhiyun 			break;
1755*4882a593Smuzhiyun 		}
1756*4882a593Smuzhiyun 		if (unlikely(wr->num_sge > srq->max_sge)) {
1757*4882a593Smuzhiyun 			siw_dbg_pd(base_srq->pd,
1758*4882a593Smuzhiyun 				   "[SRQ]: too many sge's: %d\n", wr->num_sge);
1759*4882a593Smuzhiyun 			rv = -EINVAL;
1760*4882a593Smuzhiyun 			break;
1761*4882a593Smuzhiyun 		}
1762*4882a593Smuzhiyun 		rqe->id = wr->wr_id;
1763*4882a593Smuzhiyun 		rqe->num_sge = wr->num_sge;
1764*4882a593Smuzhiyun 		siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 		/* Make sure S-RQE is completely written before valid */
1767*4882a593Smuzhiyun 		smp_wmb();
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 		rqe->flags = SIW_WQE_VALID;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 		srq->rq_put++;
1772*4882a593Smuzhiyun 		wr = wr->next;
1773*4882a593Smuzhiyun 	}
1774*4882a593Smuzhiyun 	spin_unlock_irqrestore(&srq->lock, flags);
1775*4882a593Smuzhiyun out:
1776*4882a593Smuzhiyun 	if (unlikely(rv < 0)) {
1777*4882a593Smuzhiyun 		siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1778*4882a593Smuzhiyun 		*bad_wr = wr;
1779*4882a593Smuzhiyun 	}
1780*4882a593Smuzhiyun 	return rv;
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun 
siw_qp_event(struct siw_qp * qp,enum ib_event_type etype)1783*4882a593Smuzhiyun void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun 	struct ib_event event;
1786*4882a593Smuzhiyun 	struct ib_qp *base_qp = &qp->base_qp;
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun 	/*
1789*4882a593Smuzhiyun 	 * Do not report asynchronous errors on QP which gets
1790*4882a593Smuzhiyun 	 * destroyed via verbs interface (siw_destroy_qp())
1791*4882a593Smuzhiyun 	 */
1792*4882a593Smuzhiyun 	if (qp->attrs.flags & SIW_QP_IN_DESTROY)
1793*4882a593Smuzhiyun 		return;
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 	event.event = etype;
1796*4882a593Smuzhiyun 	event.device = base_qp->device;
1797*4882a593Smuzhiyun 	event.element.qp = base_qp;
1798*4882a593Smuzhiyun 
1799*4882a593Smuzhiyun 	if (base_qp->event_handler) {
1800*4882a593Smuzhiyun 		siw_dbg_qp(qp, "reporting event %d\n", etype);
1801*4882a593Smuzhiyun 		base_qp->event_handler(&event, base_qp->qp_context);
1802*4882a593Smuzhiyun 	}
1803*4882a593Smuzhiyun }
1804*4882a593Smuzhiyun 
siw_cq_event(struct siw_cq * cq,enum ib_event_type etype)1805*4882a593Smuzhiyun void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype)
1806*4882a593Smuzhiyun {
1807*4882a593Smuzhiyun 	struct ib_event event;
1808*4882a593Smuzhiyun 	struct ib_cq *base_cq = &cq->base_cq;
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	event.event = etype;
1811*4882a593Smuzhiyun 	event.device = base_cq->device;
1812*4882a593Smuzhiyun 	event.element.cq = base_cq;
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	if (base_cq->event_handler) {
1815*4882a593Smuzhiyun 		siw_dbg_cq(cq, "reporting CQ event %d\n", etype);
1816*4882a593Smuzhiyun 		base_cq->event_handler(&event, base_cq->cq_context);
1817*4882a593Smuzhiyun 	}
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun 
siw_srq_event(struct siw_srq * srq,enum ib_event_type etype)1820*4882a593Smuzhiyun void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype)
1821*4882a593Smuzhiyun {
1822*4882a593Smuzhiyun 	struct ib_event event;
1823*4882a593Smuzhiyun 	struct ib_srq *base_srq = &srq->base_srq;
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun 	event.event = etype;
1826*4882a593Smuzhiyun 	event.device = base_srq->device;
1827*4882a593Smuzhiyun 	event.element.srq = base_srq;
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 	if (base_srq->event_handler) {
1830*4882a593Smuzhiyun 		siw_dbg_pd(srq->base_srq.pd,
1831*4882a593Smuzhiyun 			   "reporting SRQ event %d\n", etype);
1832*4882a593Smuzhiyun 		base_srq->event_handler(&event, base_srq->srq_context);
1833*4882a593Smuzhiyun 	}
1834*4882a593Smuzhiyun }
1835*4882a593Smuzhiyun 
siw_port_event(struct siw_device * sdev,u8 port,enum ib_event_type etype)1836*4882a593Smuzhiyun void siw_port_event(struct siw_device *sdev, u8 port, enum ib_event_type etype)
1837*4882a593Smuzhiyun {
1838*4882a593Smuzhiyun 	struct ib_event event;
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	event.event = etype;
1841*4882a593Smuzhiyun 	event.device = &sdev->base_dev;
1842*4882a593Smuzhiyun 	event.element.port_num = port;
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype);
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 	ib_dispatch_event(&event);
1847*4882a593Smuzhiyun }
1848