xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/i40iw/i40iw_verbs.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun * licenses.  You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun * OpenFabrics.org BSD license below:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun *   Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun *   without modification, are permitted provided that the following
13*4882a593Smuzhiyun *   conditions are met:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun *    - Redistributions of source code must retain the above
16*4882a593Smuzhiyun *	copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun *	disclaimer.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun *    - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun *	copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun *	disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun *	provided with the distribution.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun * SOFTWARE.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun *******************************************************************************/
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linux/module.h>
36*4882a593Smuzhiyun #include <linux/moduleparam.h>
37*4882a593Smuzhiyun #include <linux/random.h>
38*4882a593Smuzhiyun #include <linux/highmem.h>
39*4882a593Smuzhiyun #include <linux/time.h>
40*4882a593Smuzhiyun #include <linux/hugetlb.h>
41*4882a593Smuzhiyun #include <linux/irq.h>
42*4882a593Smuzhiyun #include <asm/byteorder.h>
43*4882a593Smuzhiyun #include <net/ip.h>
44*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
45*4882a593Smuzhiyun #include <rdma/iw_cm.h>
46*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
47*4882a593Smuzhiyun #include <rdma/ib_umem.h>
48*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
49*4882a593Smuzhiyun #include "i40iw.h"
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /**
52*4882a593Smuzhiyun  * i40iw_query_device - get device attributes
53*4882a593Smuzhiyun  * @ibdev: device pointer from stack
54*4882a593Smuzhiyun  * @props: returning device attributes
55*4882a593Smuzhiyun  * @udata: user data
56*4882a593Smuzhiyun  */
i40iw_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * udata)57*4882a593Smuzhiyun static int i40iw_query_device(struct ib_device *ibdev,
58*4882a593Smuzhiyun 			      struct ib_device_attr *props,
59*4882a593Smuzhiyun 			      struct ib_udata *udata)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(ibdev);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	if (udata->inlen || udata->outlen)
64*4882a593Smuzhiyun 		return -EINVAL;
65*4882a593Smuzhiyun 	memset(props, 0, sizeof(*props));
66*4882a593Smuzhiyun 	ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
67*4882a593Smuzhiyun 	props->fw_ver = i40iw_fw_major_ver(&iwdev->sc_dev) << 32 |
68*4882a593Smuzhiyun 			i40iw_fw_minor_ver(&iwdev->sc_dev);
69*4882a593Smuzhiyun 	props->device_cap_flags = iwdev->device_cap_flags;
70*4882a593Smuzhiyun 	props->vendor_id = iwdev->ldev->pcidev->vendor;
71*4882a593Smuzhiyun 	props->vendor_part_id = iwdev->ldev->pcidev->device;
72*4882a593Smuzhiyun 	props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
73*4882a593Smuzhiyun 	props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
74*4882a593Smuzhiyun 	props->max_qp = iwdev->max_qp - iwdev->used_qps;
75*4882a593Smuzhiyun 	props->max_qp_wr = I40IW_MAX_QP_WRS;
76*4882a593Smuzhiyun 	props->max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
77*4882a593Smuzhiyun 	props->max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
78*4882a593Smuzhiyun 	props->max_cq = iwdev->max_cq - iwdev->used_cqs;
79*4882a593Smuzhiyun 	props->max_cqe = iwdev->max_cqe;
80*4882a593Smuzhiyun 	props->max_mr = iwdev->max_mr - iwdev->used_mrs;
81*4882a593Smuzhiyun 	props->max_pd = iwdev->max_pd - iwdev->used_pds;
82*4882a593Smuzhiyun 	props->max_sge_rd = I40IW_MAX_SGE_RD;
83*4882a593Smuzhiyun 	props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
84*4882a593Smuzhiyun 	props->max_qp_init_rd_atom = props->max_qp_rd_atom;
85*4882a593Smuzhiyun 	props->atomic_cap = IB_ATOMIC_NONE;
86*4882a593Smuzhiyun 	props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
87*4882a593Smuzhiyun 	return 0;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun  * i40iw_query_port - get port attrubutes
92*4882a593Smuzhiyun  * @ibdev: device pointer from stack
93*4882a593Smuzhiyun  * @port: port number for query
94*4882a593Smuzhiyun  * @props: returning device attributes
95*4882a593Smuzhiyun  */
i40iw_query_port(struct ib_device * ibdev,u8 port,struct ib_port_attr * props)96*4882a593Smuzhiyun static int i40iw_query_port(struct ib_device *ibdev,
97*4882a593Smuzhiyun 			    u8 port,
98*4882a593Smuzhiyun 			    struct ib_port_attr *props)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	props->lid = 1;
101*4882a593Smuzhiyun 	props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
102*4882a593Smuzhiyun 		IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
103*4882a593Smuzhiyun 	props->gid_tbl_len = 1;
104*4882a593Smuzhiyun 	props->active_width = IB_WIDTH_4X;
105*4882a593Smuzhiyun 	props->active_speed = 1;
106*4882a593Smuzhiyun 	props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
107*4882a593Smuzhiyun 	return 0;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /**
111*4882a593Smuzhiyun  * i40iw_alloc_ucontext - Allocate the user context data structure
112*4882a593Smuzhiyun  * @uctx: Uverbs context pointer from stack
113*4882a593Smuzhiyun  * @udata: user data
114*4882a593Smuzhiyun  *
115*4882a593Smuzhiyun  * This keeps track of all objects associated with a particular
116*4882a593Smuzhiyun  * user-mode client.
117*4882a593Smuzhiyun  */
i40iw_alloc_ucontext(struct ib_ucontext * uctx,struct ib_udata * udata)118*4882a593Smuzhiyun static int i40iw_alloc_ucontext(struct ib_ucontext *uctx,
119*4882a593Smuzhiyun 				struct ib_udata *udata)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct ib_device *ibdev = uctx->device;
122*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(ibdev);
123*4882a593Smuzhiyun 	struct i40iw_alloc_ucontext_req req;
124*4882a593Smuzhiyun 	struct i40iw_alloc_ucontext_resp uresp = {};
125*4882a593Smuzhiyun 	struct i40iw_ucontext *ucontext = to_ucontext(uctx);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (ib_copy_from_udata(&req, udata, sizeof(req)))
128*4882a593Smuzhiyun 		return -EINVAL;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
131*4882a593Smuzhiyun 		i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
132*4882a593Smuzhiyun 		return -EINVAL;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	uresp.max_qps = iwdev->max_qp;
136*4882a593Smuzhiyun 	uresp.max_pds = iwdev->max_pd;
137*4882a593Smuzhiyun 	uresp.wq_size = iwdev->max_qp_wr * 2;
138*4882a593Smuzhiyun 	uresp.kernel_ver = req.userspace_ver;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	ucontext->iwdev = iwdev;
141*4882a593Smuzhiyun 	ucontext->abi_ver = req.userspace_ver;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (ib_copy_to_udata(udata, &uresp, sizeof(uresp)))
144*4882a593Smuzhiyun 		return -EFAULT;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
147*4882a593Smuzhiyun 	spin_lock_init(&ucontext->cq_reg_mem_list_lock);
148*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
149*4882a593Smuzhiyun 	spin_lock_init(&ucontext->qp_reg_mem_list_lock);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	return 0;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun /**
155*4882a593Smuzhiyun  * i40iw_dealloc_ucontext - deallocate the user context data structure
156*4882a593Smuzhiyun  * @context: user context created during alloc
157*4882a593Smuzhiyun  */
i40iw_dealloc_ucontext(struct ib_ucontext * context)158*4882a593Smuzhiyun static void i40iw_dealloc_ucontext(struct ib_ucontext *context)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	return;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /**
164*4882a593Smuzhiyun  * i40iw_mmap - user memory map
165*4882a593Smuzhiyun  * @context: context created during alloc
166*4882a593Smuzhiyun  * @vma: kernel info for user memory map
167*4882a593Smuzhiyun  */
i40iw_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)168*4882a593Smuzhiyun static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct i40iw_ucontext *ucontext = to_ucontext(context);
171*4882a593Smuzhiyun 	u64 dbaddr;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
174*4882a593Smuzhiyun 		return -EINVAL;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return rdma_user_mmap_io(context, vma, dbaddr >> PAGE_SHIFT, PAGE_SIZE,
179*4882a593Smuzhiyun 				 pgprot_noncached(vma->vm_page_prot), NULL);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun  * i40iw_alloc_push_page - allocate a push page for qp
184*4882a593Smuzhiyun  * @iwdev: iwarp device
185*4882a593Smuzhiyun  * @qp: hardware control qp
186*4882a593Smuzhiyun  */
i40iw_alloc_push_page(struct i40iw_device * iwdev,struct i40iw_sc_qp * qp)187*4882a593Smuzhiyun static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct i40iw_cqp_request *cqp_request;
190*4882a593Smuzhiyun 	struct cqp_commands_info *cqp_info;
191*4882a593Smuzhiyun 	enum i40iw_status_code status;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
194*4882a593Smuzhiyun 		return;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
197*4882a593Smuzhiyun 	if (!cqp_request)
198*4882a593Smuzhiyun 		return;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	atomic_inc(&cqp_request->refcount);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	cqp_info = &cqp_request->info;
203*4882a593Smuzhiyun 	cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
204*4882a593Smuzhiyun 	cqp_info->post_sq = 1;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
207*4882a593Smuzhiyun 	cqp_info->in.u.manage_push_page.info.free_page = 0;
208*4882a593Smuzhiyun 	cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
209*4882a593Smuzhiyun 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
212*4882a593Smuzhiyun 	if (!status)
213*4882a593Smuzhiyun 		qp->push_idx = cqp_request->compl_info.op_ret_val;
214*4882a593Smuzhiyun 	else
215*4882a593Smuzhiyun 		i40iw_pr_err("CQP-OP Push page fail");
216*4882a593Smuzhiyun 	i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun /**
220*4882a593Smuzhiyun  * i40iw_dealloc_push_page - free a push page for qp
221*4882a593Smuzhiyun  * @iwdev: iwarp device
222*4882a593Smuzhiyun  * @qp: hardware control qp
223*4882a593Smuzhiyun  */
i40iw_dealloc_push_page(struct i40iw_device * iwdev,struct i40iw_sc_qp * qp)224*4882a593Smuzhiyun static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	struct i40iw_cqp_request *cqp_request;
227*4882a593Smuzhiyun 	struct cqp_commands_info *cqp_info;
228*4882a593Smuzhiyun 	enum i40iw_status_code status;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
231*4882a593Smuzhiyun 		return;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
234*4882a593Smuzhiyun 	if (!cqp_request)
235*4882a593Smuzhiyun 		return;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	cqp_info = &cqp_request->info;
238*4882a593Smuzhiyun 	cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
239*4882a593Smuzhiyun 	cqp_info->post_sq = 1;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
242*4882a593Smuzhiyun 	cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
243*4882a593Smuzhiyun 	cqp_info->in.u.manage_push_page.info.free_page = 1;
244*4882a593Smuzhiyun 	cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
245*4882a593Smuzhiyun 	cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
248*4882a593Smuzhiyun 	if (!status)
249*4882a593Smuzhiyun 		qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
250*4882a593Smuzhiyun 	else
251*4882a593Smuzhiyun 		i40iw_pr_err("CQP-OP Push page fail");
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun  * i40iw_alloc_pd - allocate protection domain
256*4882a593Smuzhiyun  * @pd: PD pointer
257*4882a593Smuzhiyun  * @udata: user data
258*4882a593Smuzhiyun  */
i40iw_alloc_pd(struct ib_pd * pd,struct ib_udata * udata)259*4882a593Smuzhiyun static int i40iw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	struct i40iw_pd *iwpd = to_iwpd(pd);
262*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(pd->device);
263*4882a593Smuzhiyun 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
264*4882a593Smuzhiyun 	struct i40iw_alloc_pd_resp uresp;
265*4882a593Smuzhiyun 	struct i40iw_sc_pd *sc_pd;
266*4882a593Smuzhiyun 	u32 pd_id = 0;
267*4882a593Smuzhiyun 	int err;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if (iwdev->closing)
270*4882a593Smuzhiyun 		return -ENODEV;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
273*4882a593Smuzhiyun 				   iwdev->max_pd, &pd_id, &iwdev->next_pd);
274*4882a593Smuzhiyun 	if (err) {
275*4882a593Smuzhiyun 		i40iw_pr_err("alloc resource failed\n");
276*4882a593Smuzhiyun 		return err;
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	sc_pd = &iwpd->sc_pd;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	if (udata) {
282*4882a593Smuzhiyun 		struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
283*4882a593Smuzhiyun 			udata, struct i40iw_ucontext, ibucontext);
284*4882a593Smuzhiyun 		dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
285*4882a593Smuzhiyun 		memset(&uresp, 0, sizeof(uresp));
286*4882a593Smuzhiyun 		uresp.pd_id = pd_id;
287*4882a593Smuzhiyun 		if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
288*4882a593Smuzhiyun 			err = -EFAULT;
289*4882a593Smuzhiyun 			goto error;
290*4882a593Smuzhiyun 		}
291*4882a593Smuzhiyun 	} else {
292*4882a593Smuzhiyun 		dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	i40iw_add_pdusecount(iwpd);
296*4882a593Smuzhiyun 	return 0;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun error:
299*4882a593Smuzhiyun 	i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
300*4882a593Smuzhiyun 	return err;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun /**
304*4882a593Smuzhiyun  * i40iw_dealloc_pd - deallocate pd
305*4882a593Smuzhiyun  * @ibpd: ptr of pd to be deallocated
306*4882a593Smuzhiyun  * @udata: user data or null for kernel object
307*4882a593Smuzhiyun  */
i40iw_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)308*4882a593Smuzhiyun static int i40iw_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	struct i40iw_pd *iwpd = to_iwpd(ibpd);
311*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(ibpd->device);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	i40iw_rem_pdusecount(iwpd, iwdev);
314*4882a593Smuzhiyun 	return 0;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun /**
318*4882a593Smuzhiyun  * i40iw_get_pbl - Retrieve pbl from a list given a virtual
319*4882a593Smuzhiyun  * address
320*4882a593Smuzhiyun  * @va: user virtual address
321*4882a593Smuzhiyun  * @pbl_list: pbl list to search in (QP's or CQ's)
322*4882a593Smuzhiyun  */
i40iw_get_pbl(unsigned long va,struct list_head * pbl_list)323*4882a593Smuzhiyun static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
324*4882a593Smuzhiyun 				       struct list_head *pbl_list)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	list_for_each_entry(iwpbl, pbl_list, list) {
329*4882a593Smuzhiyun 		if (iwpbl->user_base == va) {
330*4882a593Smuzhiyun 			iwpbl->on_list = false;
331*4882a593Smuzhiyun 			list_del(&iwpbl->list);
332*4882a593Smuzhiyun 			return iwpbl;
333*4882a593Smuzhiyun 		}
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 	return NULL;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun /**
339*4882a593Smuzhiyun  * i40iw_free_qp_resources - free up memory resources for qp
340*4882a593Smuzhiyun  * @iwdev: iwarp device
341*4882a593Smuzhiyun  * @iwqp: qp ptr (user or kernel)
342*4882a593Smuzhiyun  * @qp_num: qp number assigned
343*4882a593Smuzhiyun  */
i40iw_free_qp_resources(struct i40iw_qp * iwqp)344*4882a593Smuzhiyun void i40iw_free_qp_resources(struct i40iw_qp *iwqp)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
347*4882a593Smuzhiyun 	struct i40iw_device *iwdev = iwqp->iwdev;
348*4882a593Smuzhiyun 	u32 qp_num = iwqp->ibqp.qp_num;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
351*4882a593Smuzhiyun 	i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
352*4882a593Smuzhiyun 	if (qp_num)
353*4882a593Smuzhiyun 		i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
354*4882a593Smuzhiyun 	if (iwpbl->pbl_allocated)
355*4882a593Smuzhiyun 		i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
356*4882a593Smuzhiyun 	i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
357*4882a593Smuzhiyun 	i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
358*4882a593Smuzhiyun 	kfree(iwqp->kqp.wrid_mem);
359*4882a593Smuzhiyun 	iwqp->kqp.wrid_mem = NULL;
360*4882a593Smuzhiyun 	kfree(iwqp);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun /**
364*4882a593Smuzhiyun  * i40iw_clean_cqes - clean cq entries for qp
365*4882a593Smuzhiyun  * @iwqp: qp ptr (user or kernel)
366*4882a593Smuzhiyun  * @iwcq: cq ptr
367*4882a593Smuzhiyun  */
i40iw_clean_cqes(struct i40iw_qp * iwqp,struct i40iw_cq * iwcq)368*4882a593Smuzhiyun static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun /**
376*4882a593Smuzhiyun  * i40iw_destroy_qp - destroy qp
377*4882a593Smuzhiyun  * @ibqp: qp's ib pointer also to get to device's qp address
378*4882a593Smuzhiyun  */
i40iw_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)379*4882a593Smuzhiyun static int i40iw_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	struct i40iw_qp *iwqp = to_iwqp(ibqp);
382*4882a593Smuzhiyun 	struct ib_qp_attr attr;
383*4882a593Smuzhiyun 	struct i40iw_device *iwdev = iwqp->iwdev;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	memset(&attr, 0, sizeof(attr));
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	iwqp->destroyed = 1;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
390*4882a593Smuzhiyun 		i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	if (!iwqp->user_mode) {
393*4882a593Smuzhiyun 		if (iwqp->iwscq) {
394*4882a593Smuzhiyun 			i40iw_clean_cqes(iwqp, iwqp->iwscq);
395*4882a593Smuzhiyun 			if (iwqp->iwrcq != iwqp->iwscq)
396*4882a593Smuzhiyun 				i40iw_clean_cqes(iwqp, iwqp->iwrcq);
397*4882a593Smuzhiyun 		}
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	attr.qp_state = IB_QPS_ERR;
401*4882a593Smuzhiyun 	i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
402*4882a593Smuzhiyun 	i40iw_qp_rem_ref(&iwqp->ibqp);
403*4882a593Smuzhiyun 	wait_for_completion(&iwqp->free_qp);
404*4882a593Smuzhiyun 	i40iw_cqp_qp_destroy_cmd(&iwdev->sc_dev, &iwqp->sc_qp);
405*4882a593Smuzhiyun 	i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
406*4882a593Smuzhiyun 	i40iw_free_qp_resources(iwqp);
407*4882a593Smuzhiyun 	i40iw_rem_devusecount(iwdev);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	return 0;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun /**
413*4882a593Smuzhiyun  * i40iw_setup_virt_qp - setup for allocation of virtual qp
414*4882a593Smuzhiyun  * @dev: iwarp device
415*4882a593Smuzhiyun  * @qp: qp ptr
416*4882a593Smuzhiyun  * @init_info: initialize info to return
417*4882a593Smuzhiyun  */
i40iw_setup_virt_qp(struct i40iw_device * iwdev,struct i40iw_qp * iwqp,struct i40iw_qp_init_info * init_info)418*4882a593Smuzhiyun static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
419*4882a593Smuzhiyun 			       struct i40iw_qp *iwqp,
420*4882a593Smuzhiyun 			       struct i40iw_qp_init_info *init_info)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
423*4882a593Smuzhiyun 	struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	iwqp->page = qpmr->sq_page;
426*4882a593Smuzhiyun 	init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
427*4882a593Smuzhiyun 	if (iwpbl->pbl_allocated) {
428*4882a593Smuzhiyun 		init_info->virtual_map = true;
429*4882a593Smuzhiyun 		init_info->sq_pa = qpmr->sq_pbl.idx;
430*4882a593Smuzhiyun 		init_info->rq_pa = qpmr->rq_pbl.idx;
431*4882a593Smuzhiyun 	} else {
432*4882a593Smuzhiyun 		init_info->sq_pa = qpmr->sq_pbl.addr;
433*4882a593Smuzhiyun 		init_info->rq_pa = qpmr->rq_pbl.addr;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 	return 0;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun /**
439*4882a593Smuzhiyun  * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
440*4882a593Smuzhiyun  * @iwdev: iwarp device
441*4882a593Smuzhiyun  * @iwqp: qp ptr (user or kernel)
442*4882a593Smuzhiyun  * @info: initialize info to return
443*4882a593Smuzhiyun  */
i40iw_setup_kmode_qp(struct i40iw_device * iwdev,struct i40iw_qp * iwqp,struct i40iw_qp_init_info * info)444*4882a593Smuzhiyun static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
445*4882a593Smuzhiyun 				struct i40iw_qp *iwqp,
446*4882a593Smuzhiyun 				struct i40iw_qp_init_info *info)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
449*4882a593Smuzhiyun 	u32 sqdepth, rqdepth;
450*4882a593Smuzhiyun 	u8 sqshift;
451*4882a593Smuzhiyun 	u32 size;
452*4882a593Smuzhiyun 	enum i40iw_status_code status;
453*4882a593Smuzhiyun 	struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	i40iw_get_wqe_shift(ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
456*4882a593Smuzhiyun 	status = i40iw_get_sqdepth(ukinfo->sq_size, sqshift, &sqdepth);
457*4882a593Smuzhiyun 	if (status)
458*4882a593Smuzhiyun 		return -ENOMEM;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	status = i40iw_get_rqdepth(ukinfo->rq_size, I40IW_MAX_RQ_WQE_SHIFT, &rqdepth);
461*4882a593Smuzhiyun 	if (status)
462*4882a593Smuzhiyun 		return -ENOMEM;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
465*4882a593Smuzhiyun 	iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
468*4882a593Smuzhiyun 	if (!ukinfo->sq_wrtrk_array)
469*4882a593Smuzhiyun 		return -ENOMEM;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
474*4882a593Smuzhiyun 	size += (I40IW_SHADOW_AREA_SIZE << 3);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
477*4882a593Smuzhiyun 	if (status) {
478*4882a593Smuzhiyun 		kfree(ukinfo->sq_wrtrk_array);
479*4882a593Smuzhiyun 		ukinfo->sq_wrtrk_array = NULL;
480*4882a593Smuzhiyun 		return -ENOMEM;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	ukinfo->sq = mem->va;
484*4882a593Smuzhiyun 	info->sq_pa = mem->pa;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	ukinfo->rq = &ukinfo->sq[sqdepth];
487*4882a593Smuzhiyun 	info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
490*4882a593Smuzhiyun 	info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	ukinfo->sq_size = sqdepth >> sqshift;
493*4882a593Smuzhiyun 	ukinfo->rq_size = rqdepth >> I40IW_MAX_RQ_WQE_SHIFT;
494*4882a593Smuzhiyun 	ukinfo->qp_id = iwqp->ibqp.qp_num;
495*4882a593Smuzhiyun 	return 0;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun /**
499*4882a593Smuzhiyun  * i40iw_create_qp - create qp
500*4882a593Smuzhiyun  * @ibpd: ptr of pd
501*4882a593Smuzhiyun  * @init_attr: attributes for qp
502*4882a593Smuzhiyun  * @udata: user data for create qp
503*4882a593Smuzhiyun  */
i40iw_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)504*4882a593Smuzhiyun static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
505*4882a593Smuzhiyun 				     struct ib_qp_init_attr *init_attr,
506*4882a593Smuzhiyun 				     struct ib_udata *udata)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	struct i40iw_pd *iwpd = to_iwpd(ibpd);
509*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(ibpd->device);
510*4882a593Smuzhiyun 	struct i40iw_cqp *iwcqp = &iwdev->cqp;
511*4882a593Smuzhiyun 	struct i40iw_qp *iwqp;
512*4882a593Smuzhiyun 	struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
513*4882a593Smuzhiyun 		udata, struct i40iw_ucontext, ibucontext);
514*4882a593Smuzhiyun 	struct i40iw_create_qp_req req;
515*4882a593Smuzhiyun 	struct i40iw_create_qp_resp uresp;
516*4882a593Smuzhiyun 	u32 qp_num = 0;
517*4882a593Smuzhiyun 	enum i40iw_status_code ret;
518*4882a593Smuzhiyun 	int err_code;
519*4882a593Smuzhiyun 	int sq_size;
520*4882a593Smuzhiyun 	int rq_size;
521*4882a593Smuzhiyun 	struct i40iw_sc_qp *qp;
522*4882a593Smuzhiyun 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
523*4882a593Smuzhiyun 	struct i40iw_qp_init_info init_info;
524*4882a593Smuzhiyun 	struct i40iw_create_qp_info *qp_info;
525*4882a593Smuzhiyun 	struct i40iw_cqp_request *cqp_request;
526*4882a593Smuzhiyun 	struct cqp_commands_info *cqp_info;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	struct i40iw_qp_host_ctx_info *ctx_info;
529*4882a593Smuzhiyun 	struct i40iwarp_offload_info *iwarp_info;
530*4882a593Smuzhiyun 	unsigned long flags;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	if (iwdev->closing)
533*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	if (init_attr->create_flags)
536*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
537*4882a593Smuzhiyun 	if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
538*4882a593Smuzhiyun 		init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
541*4882a593Smuzhiyun 		init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
544*4882a593Smuzhiyun 		init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	memset(&init_info, 0, sizeof(init_info));
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	sq_size = init_attr->cap.max_send_wr;
549*4882a593Smuzhiyun 	rq_size = init_attr->cap.max_recv_wr;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	init_info.vsi = &iwdev->vsi;
552*4882a593Smuzhiyun 	init_info.qp_uk_init_info.sq_size = sq_size;
553*4882a593Smuzhiyun 	init_info.qp_uk_init_info.rq_size = rq_size;
554*4882a593Smuzhiyun 	init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
555*4882a593Smuzhiyun 	init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
556*4882a593Smuzhiyun 	init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
559*4882a593Smuzhiyun 	if (!iwqp)
560*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	qp = &iwqp->sc_qp;
563*4882a593Smuzhiyun 	qp->back_qp = (void *)iwqp;
564*4882a593Smuzhiyun 	qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	iwqp->iwdev = iwdev;
567*4882a593Smuzhiyun 	iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	if (i40iw_allocate_dma_mem(dev->hw,
570*4882a593Smuzhiyun 				   &iwqp->q2_ctx_mem,
571*4882a593Smuzhiyun 				   I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
572*4882a593Smuzhiyun 				   256)) {
573*4882a593Smuzhiyun 		i40iw_pr_err("dma_mem failed\n");
574*4882a593Smuzhiyun 		err_code = -ENOMEM;
575*4882a593Smuzhiyun 		goto error;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	init_info.q2 = iwqp->q2_ctx_mem.va;
579*4882a593Smuzhiyun 	init_info.q2_pa = iwqp->q2_ctx_mem.pa;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
582*4882a593Smuzhiyun 	init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
585*4882a593Smuzhiyun 					&qp_num, &iwdev->next_qp);
586*4882a593Smuzhiyun 	if (err_code) {
587*4882a593Smuzhiyun 		i40iw_pr_err("qp resource\n");
588*4882a593Smuzhiyun 		goto error;
589*4882a593Smuzhiyun 	}
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	iwqp->iwpd = iwpd;
592*4882a593Smuzhiyun 	iwqp->ibqp.qp_num = qp_num;
593*4882a593Smuzhiyun 	qp = &iwqp->sc_qp;
594*4882a593Smuzhiyun 	iwqp->iwscq = to_iwcq(init_attr->send_cq);
595*4882a593Smuzhiyun 	iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	iwqp->host_ctx.va = init_info.host_ctx;
598*4882a593Smuzhiyun 	iwqp->host_ctx.pa = init_info.host_ctx_pa;
599*4882a593Smuzhiyun 	iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	init_info.pd = &iwpd->sc_pd;
602*4882a593Smuzhiyun 	init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
603*4882a593Smuzhiyun 	iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	if (init_attr->qp_type != IB_QPT_RC) {
606*4882a593Smuzhiyun 		err_code = -EOPNOTSUPP;
607*4882a593Smuzhiyun 		goto error;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 	if (iwdev->push_mode)
610*4882a593Smuzhiyun 		i40iw_alloc_push_page(iwdev, qp);
611*4882a593Smuzhiyun 	if (udata) {
612*4882a593Smuzhiyun 		err_code = ib_copy_from_udata(&req, udata, sizeof(req));
613*4882a593Smuzhiyun 		if (err_code) {
614*4882a593Smuzhiyun 			i40iw_pr_err("ib_copy_from_data\n");
615*4882a593Smuzhiyun 			goto error;
616*4882a593Smuzhiyun 		}
617*4882a593Smuzhiyun 		iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
618*4882a593Smuzhiyun 		iwqp->user_mode = 1;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 		if (req.user_wqe_buffers) {
621*4882a593Smuzhiyun 			struct i40iw_pbl *iwpbl;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 			spin_lock_irqsave(
624*4882a593Smuzhiyun 			    &ucontext->qp_reg_mem_list_lock, flags);
625*4882a593Smuzhiyun 			iwpbl = i40iw_get_pbl(
626*4882a593Smuzhiyun 			    (unsigned long)req.user_wqe_buffers,
627*4882a593Smuzhiyun 			    &ucontext->qp_reg_mem_list);
628*4882a593Smuzhiyun 			spin_unlock_irqrestore(
629*4882a593Smuzhiyun 			    &ucontext->qp_reg_mem_list_lock, flags);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 			if (!iwpbl) {
632*4882a593Smuzhiyun 				err_code = -ENODATA;
633*4882a593Smuzhiyun 				i40iw_pr_err("no pbl info\n");
634*4882a593Smuzhiyun 				goto error;
635*4882a593Smuzhiyun 			}
636*4882a593Smuzhiyun 			memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
637*4882a593Smuzhiyun 		}
638*4882a593Smuzhiyun 		err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
639*4882a593Smuzhiyun 	} else {
640*4882a593Smuzhiyun 		err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	if (err_code) {
644*4882a593Smuzhiyun 		i40iw_pr_err("setup qp failed\n");
645*4882a593Smuzhiyun 		goto error;
646*4882a593Smuzhiyun 	}
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	init_info.type = I40IW_QP_TYPE_IWARP;
649*4882a593Smuzhiyun 	ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
650*4882a593Smuzhiyun 	if (ret) {
651*4882a593Smuzhiyun 		err_code = -EPROTO;
652*4882a593Smuzhiyun 		i40iw_pr_err("qp_init fail\n");
653*4882a593Smuzhiyun 		goto error;
654*4882a593Smuzhiyun 	}
655*4882a593Smuzhiyun 	ctx_info = &iwqp->ctx_info;
656*4882a593Smuzhiyun 	iwarp_info = &iwqp->iwarp_info;
657*4882a593Smuzhiyun 	iwarp_info->rd_enable = true;
658*4882a593Smuzhiyun 	iwarp_info->wr_rdresp_en = true;
659*4882a593Smuzhiyun 	if (!iwqp->user_mode) {
660*4882a593Smuzhiyun 		iwarp_info->fast_reg_en = true;
661*4882a593Smuzhiyun 		iwarp_info->priv_mode_en = true;
662*4882a593Smuzhiyun 	}
663*4882a593Smuzhiyun 	iwarp_info->ddp_ver = 1;
664*4882a593Smuzhiyun 	iwarp_info->rdmap_ver = 1;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	ctx_info->iwarp_info_valid = true;
667*4882a593Smuzhiyun 	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
668*4882a593Smuzhiyun 	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
669*4882a593Smuzhiyun 	if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
670*4882a593Smuzhiyun 		ctx_info->push_mode_en = false;
671*4882a593Smuzhiyun 	} else {
672*4882a593Smuzhiyun 		ctx_info->push_mode_en = true;
673*4882a593Smuzhiyun 		ctx_info->push_idx = qp->push_idx;
674*4882a593Smuzhiyun 	}
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
677*4882a593Smuzhiyun 					     (u64 *)iwqp->host_ctx.va,
678*4882a593Smuzhiyun 					     ctx_info);
679*4882a593Smuzhiyun 	ctx_info->iwarp_info_valid = false;
680*4882a593Smuzhiyun 	cqp_request = i40iw_get_cqp_request(iwcqp, true);
681*4882a593Smuzhiyun 	if (!cqp_request) {
682*4882a593Smuzhiyun 		err_code = -ENOMEM;
683*4882a593Smuzhiyun 		goto error;
684*4882a593Smuzhiyun 	}
685*4882a593Smuzhiyun 	cqp_info = &cqp_request->info;
686*4882a593Smuzhiyun 	qp_info = &cqp_request->info.in.u.qp_create.info;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	memset(qp_info, 0, sizeof(*qp_info));
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	qp_info->cq_num_valid = true;
691*4882a593Smuzhiyun 	qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	cqp_info->cqp_cmd = OP_QP_CREATE;
694*4882a593Smuzhiyun 	cqp_info->post_sq = 1;
695*4882a593Smuzhiyun 	cqp_info->in.u.qp_create.qp = qp;
696*4882a593Smuzhiyun 	cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
697*4882a593Smuzhiyun 	ret = i40iw_handle_cqp_op(iwdev, cqp_request);
698*4882a593Smuzhiyun 	if (ret) {
699*4882a593Smuzhiyun 		i40iw_pr_err("CQP-OP QP create fail");
700*4882a593Smuzhiyun 		err_code = -EACCES;
701*4882a593Smuzhiyun 		goto error;
702*4882a593Smuzhiyun 	}
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	refcount_set(&iwqp->refcount, 1);
705*4882a593Smuzhiyun 	spin_lock_init(&iwqp->lock);
706*4882a593Smuzhiyun 	iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
707*4882a593Smuzhiyun 	iwdev->qp_table[qp_num] = iwqp;
708*4882a593Smuzhiyun 	i40iw_add_pdusecount(iwqp->iwpd);
709*4882a593Smuzhiyun 	i40iw_add_devusecount(iwdev);
710*4882a593Smuzhiyun 	if (udata) {
711*4882a593Smuzhiyun 		memset(&uresp, 0, sizeof(uresp));
712*4882a593Smuzhiyun 		uresp.actual_sq_size = sq_size;
713*4882a593Smuzhiyun 		uresp.actual_rq_size = rq_size;
714*4882a593Smuzhiyun 		uresp.qp_id = qp_num;
715*4882a593Smuzhiyun 		uresp.push_idx = qp->push_idx;
716*4882a593Smuzhiyun 		err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
717*4882a593Smuzhiyun 		if (err_code) {
718*4882a593Smuzhiyun 			i40iw_pr_err("copy_to_udata failed\n");
719*4882a593Smuzhiyun 			i40iw_destroy_qp(&iwqp->ibqp, udata);
720*4882a593Smuzhiyun 			/* let the completion of the qp destroy free the qp */
721*4882a593Smuzhiyun 			return ERR_PTR(err_code);
722*4882a593Smuzhiyun 		}
723*4882a593Smuzhiyun 	}
724*4882a593Smuzhiyun 	init_completion(&iwqp->sq_drained);
725*4882a593Smuzhiyun 	init_completion(&iwqp->rq_drained);
726*4882a593Smuzhiyun 	init_completion(&iwqp->free_qp);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	return &iwqp->ibqp;
729*4882a593Smuzhiyun error:
730*4882a593Smuzhiyun 	i40iw_free_qp_resources(iwqp);
731*4882a593Smuzhiyun 	return ERR_PTR(err_code);
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun /**
735*4882a593Smuzhiyun  * i40iw_query - query qp attributes
736*4882a593Smuzhiyun  * @ibqp: qp pointer
737*4882a593Smuzhiyun  * @attr: attributes pointer
738*4882a593Smuzhiyun  * @attr_mask: Not used
739*4882a593Smuzhiyun  * @init_attr: qp attributes to return
740*4882a593Smuzhiyun  */
i40iw_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)741*4882a593Smuzhiyun static int i40iw_query_qp(struct ib_qp *ibqp,
742*4882a593Smuzhiyun 			  struct ib_qp_attr *attr,
743*4882a593Smuzhiyun 			  int attr_mask,
744*4882a593Smuzhiyun 			  struct ib_qp_init_attr *init_attr)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	struct i40iw_qp *iwqp = to_iwqp(ibqp);
747*4882a593Smuzhiyun 	struct i40iw_sc_qp *qp = &iwqp->sc_qp;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	attr->qp_state = iwqp->ibqp_state;
750*4882a593Smuzhiyun 	attr->cur_qp_state = attr->qp_state;
751*4882a593Smuzhiyun 	attr->qp_access_flags = 0;
752*4882a593Smuzhiyun 	attr->cap.max_send_wr = qp->qp_uk.sq_size;
753*4882a593Smuzhiyun 	attr->cap.max_recv_wr = qp->qp_uk.rq_size;
754*4882a593Smuzhiyun 	attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
755*4882a593Smuzhiyun 	attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
756*4882a593Smuzhiyun 	attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
757*4882a593Smuzhiyun 	attr->port_num = 1;
758*4882a593Smuzhiyun 	init_attr->event_handler = iwqp->ibqp.event_handler;
759*4882a593Smuzhiyun 	init_attr->qp_context = iwqp->ibqp.qp_context;
760*4882a593Smuzhiyun 	init_attr->send_cq = iwqp->ibqp.send_cq;
761*4882a593Smuzhiyun 	init_attr->recv_cq = iwqp->ibqp.recv_cq;
762*4882a593Smuzhiyun 	init_attr->srq = iwqp->ibqp.srq;
763*4882a593Smuzhiyun 	init_attr->cap = attr->cap;
764*4882a593Smuzhiyun 	init_attr->port_num = 1;
765*4882a593Smuzhiyun 	return 0;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun /**
769*4882a593Smuzhiyun  * i40iw_hw_modify_qp - setup cqp for modify qp
770*4882a593Smuzhiyun  * @iwdev: iwarp device
771*4882a593Smuzhiyun  * @iwqp: qp ptr (user or kernel)
772*4882a593Smuzhiyun  * @info: info for modify qp
773*4882a593Smuzhiyun  * @wait: flag to wait or not for modify qp completion
774*4882a593Smuzhiyun  */
i40iw_hw_modify_qp(struct i40iw_device * iwdev,struct i40iw_qp * iwqp,struct i40iw_modify_qp_info * info,bool wait)775*4882a593Smuzhiyun void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
776*4882a593Smuzhiyun 			struct i40iw_modify_qp_info *info, bool wait)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun 	struct i40iw_cqp_request *cqp_request;
779*4882a593Smuzhiyun 	struct cqp_commands_info *cqp_info;
780*4882a593Smuzhiyun 	struct i40iw_modify_qp_info *m_info;
781*4882a593Smuzhiyun 	struct i40iw_gen_ae_info ae_info;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
784*4882a593Smuzhiyun 	if (!cqp_request)
785*4882a593Smuzhiyun 		return;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	cqp_info = &cqp_request->info;
788*4882a593Smuzhiyun 	m_info = &cqp_info->in.u.qp_modify.info;
789*4882a593Smuzhiyun 	memcpy(m_info, info, sizeof(*m_info));
790*4882a593Smuzhiyun 	cqp_info->cqp_cmd = OP_QP_MODIFY;
791*4882a593Smuzhiyun 	cqp_info->post_sq = 1;
792*4882a593Smuzhiyun 	cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
793*4882a593Smuzhiyun 	cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
794*4882a593Smuzhiyun 	if (!i40iw_handle_cqp_op(iwdev, cqp_request))
795*4882a593Smuzhiyun 		return;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	switch (m_info->next_iwarp_state) {
798*4882a593Smuzhiyun 	case I40IW_QP_STATE_RTS:
799*4882a593Smuzhiyun 		if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE)
800*4882a593Smuzhiyun 			i40iw_send_reset(iwqp->cm_node);
801*4882a593Smuzhiyun 		fallthrough;
802*4882a593Smuzhiyun 	case I40IW_QP_STATE_IDLE:
803*4882a593Smuzhiyun 	case I40IW_QP_STATE_TERMINATE:
804*4882a593Smuzhiyun 	case I40IW_QP_STATE_CLOSING:
805*4882a593Smuzhiyun 		ae_info.ae_code = I40IW_AE_BAD_CLOSE;
806*4882a593Smuzhiyun 		ae_info.ae_source = 0;
807*4882a593Smuzhiyun 		i40iw_gen_ae(iwdev, &iwqp->sc_qp, &ae_info, false);
808*4882a593Smuzhiyun 		break;
809*4882a593Smuzhiyun 	case I40IW_QP_STATE_ERROR:
810*4882a593Smuzhiyun 	default:
811*4882a593Smuzhiyun 		break;
812*4882a593Smuzhiyun 	}
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun /**
816*4882a593Smuzhiyun  * i40iw_modify_qp - modify qp request
817*4882a593Smuzhiyun  * @ibqp: qp's pointer for modify
818*4882a593Smuzhiyun  * @attr: access attributes
819*4882a593Smuzhiyun  * @attr_mask: state mask
820*4882a593Smuzhiyun  * @udata: user data
821*4882a593Smuzhiyun  */
i40iw_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)822*4882a593Smuzhiyun int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
823*4882a593Smuzhiyun 		    int attr_mask, struct ib_udata *udata)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun 	struct i40iw_qp *iwqp = to_iwqp(ibqp);
826*4882a593Smuzhiyun 	struct i40iw_device *iwdev = iwqp->iwdev;
827*4882a593Smuzhiyun 	struct i40iw_qp_host_ctx_info *ctx_info;
828*4882a593Smuzhiyun 	struct i40iwarp_offload_info *iwarp_info;
829*4882a593Smuzhiyun 	struct i40iw_modify_qp_info info;
830*4882a593Smuzhiyun 	u8 issue_modify_qp = 0;
831*4882a593Smuzhiyun 	u8 dont_wait = 0;
832*4882a593Smuzhiyun 	u32 err;
833*4882a593Smuzhiyun 	unsigned long flags;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	memset(&info, 0, sizeof(info));
836*4882a593Smuzhiyun 	ctx_info = &iwqp->ctx_info;
837*4882a593Smuzhiyun 	iwarp_info = &iwqp->iwarp_info;
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	spin_lock_irqsave(&iwqp->lock, flags);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	if (attr_mask & IB_QP_STATE) {
842*4882a593Smuzhiyun 		if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
843*4882a593Smuzhiyun 			err = -EINVAL;
844*4882a593Smuzhiyun 			goto exit;
845*4882a593Smuzhiyun 		}
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 		switch (attr->qp_state) {
848*4882a593Smuzhiyun 		case IB_QPS_INIT:
849*4882a593Smuzhiyun 		case IB_QPS_RTR:
850*4882a593Smuzhiyun 			if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
851*4882a593Smuzhiyun 				err = -EINVAL;
852*4882a593Smuzhiyun 				goto exit;
853*4882a593Smuzhiyun 			}
854*4882a593Smuzhiyun 			if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
855*4882a593Smuzhiyun 				info.next_iwarp_state = I40IW_QP_STATE_IDLE;
856*4882a593Smuzhiyun 				issue_modify_qp = 1;
857*4882a593Smuzhiyun 			}
858*4882a593Smuzhiyun 			break;
859*4882a593Smuzhiyun 		case IB_QPS_RTS:
860*4882a593Smuzhiyun 			if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
861*4882a593Smuzhiyun 			    (!iwqp->cm_id)) {
862*4882a593Smuzhiyun 				err = -EINVAL;
863*4882a593Smuzhiyun 				goto exit;
864*4882a593Smuzhiyun 			}
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 			issue_modify_qp = 1;
867*4882a593Smuzhiyun 			iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
868*4882a593Smuzhiyun 			iwqp->hte_added = 1;
869*4882a593Smuzhiyun 			info.next_iwarp_state = I40IW_QP_STATE_RTS;
870*4882a593Smuzhiyun 			info.tcp_ctx_valid = true;
871*4882a593Smuzhiyun 			info.ord_valid = true;
872*4882a593Smuzhiyun 			info.arp_cache_idx_valid = true;
873*4882a593Smuzhiyun 			info.cq_num_valid = true;
874*4882a593Smuzhiyun 			break;
875*4882a593Smuzhiyun 		case IB_QPS_SQD:
876*4882a593Smuzhiyun 			if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
877*4882a593Smuzhiyun 				err = 0;
878*4882a593Smuzhiyun 				goto exit;
879*4882a593Smuzhiyun 			}
880*4882a593Smuzhiyun 			if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
881*4882a593Smuzhiyun 			    (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
882*4882a593Smuzhiyun 				err = 0;
883*4882a593Smuzhiyun 				goto exit;
884*4882a593Smuzhiyun 			}
885*4882a593Smuzhiyun 			if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
886*4882a593Smuzhiyun 				err = -EINVAL;
887*4882a593Smuzhiyun 				goto exit;
888*4882a593Smuzhiyun 			}
889*4882a593Smuzhiyun 			info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
890*4882a593Smuzhiyun 			issue_modify_qp = 1;
891*4882a593Smuzhiyun 			break;
892*4882a593Smuzhiyun 		case IB_QPS_SQE:
893*4882a593Smuzhiyun 			if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
894*4882a593Smuzhiyun 				err = -EINVAL;
895*4882a593Smuzhiyun 				goto exit;
896*4882a593Smuzhiyun 			}
897*4882a593Smuzhiyun 			info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
898*4882a593Smuzhiyun 			issue_modify_qp = 1;
899*4882a593Smuzhiyun 			break;
900*4882a593Smuzhiyun 		case IB_QPS_ERR:
901*4882a593Smuzhiyun 		case IB_QPS_RESET:
902*4882a593Smuzhiyun 			if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
903*4882a593Smuzhiyun 				err = -EINVAL;
904*4882a593Smuzhiyun 				goto exit;
905*4882a593Smuzhiyun 			}
906*4882a593Smuzhiyun 			if (iwqp->sc_qp.term_flags)
907*4882a593Smuzhiyun 				i40iw_terminate_del_timer(&iwqp->sc_qp);
908*4882a593Smuzhiyun 			info.next_iwarp_state = I40IW_QP_STATE_ERROR;
909*4882a593Smuzhiyun 			if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
910*4882a593Smuzhiyun 			    iwdev->iw_status &&
911*4882a593Smuzhiyun 			    (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
912*4882a593Smuzhiyun 				info.reset_tcp_conn = true;
913*4882a593Smuzhiyun 			else
914*4882a593Smuzhiyun 				dont_wait = 1;
915*4882a593Smuzhiyun 			issue_modify_qp = 1;
916*4882a593Smuzhiyun 			info.next_iwarp_state = I40IW_QP_STATE_ERROR;
917*4882a593Smuzhiyun 			break;
918*4882a593Smuzhiyun 		default:
919*4882a593Smuzhiyun 			err = -EINVAL;
920*4882a593Smuzhiyun 			goto exit;
921*4882a593Smuzhiyun 		}
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 		iwqp->ibqp_state = attr->qp_state;
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	}
926*4882a593Smuzhiyun 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
927*4882a593Smuzhiyun 		ctx_info->iwarp_info_valid = true;
928*4882a593Smuzhiyun 		if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
929*4882a593Smuzhiyun 			iwarp_info->wr_rdresp_en = true;
930*4882a593Smuzhiyun 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
931*4882a593Smuzhiyun 			iwarp_info->wr_rdresp_en = true;
932*4882a593Smuzhiyun 		if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
933*4882a593Smuzhiyun 			iwarp_info->rd_enable = true;
934*4882a593Smuzhiyun 		if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
935*4882a593Smuzhiyun 			iwarp_info->bind_en = true;
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 		if (iwqp->user_mode) {
938*4882a593Smuzhiyun 			iwarp_info->rd_enable = true;
939*4882a593Smuzhiyun 			iwarp_info->wr_rdresp_en = true;
940*4882a593Smuzhiyun 			iwarp_info->priv_mode_en = false;
941*4882a593Smuzhiyun 		}
942*4882a593Smuzhiyun 	}
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	if (ctx_info->iwarp_info_valid) {
945*4882a593Smuzhiyun 		struct i40iw_sc_dev *dev = &iwdev->sc_dev;
946*4882a593Smuzhiyun 		int ret;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 		ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
949*4882a593Smuzhiyun 		ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
950*4882a593Smuzhiyun 		ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
951*4882a593Smuzhiyun 						     (u64 *)iwqp->host_ctx.va,
952*4882a593Smuzhiyun 						     ctx_info);
953*4882a593Smuzhiyun 		if (ret) {
954*4882a593Smuzhiyun 			i40iw_pr_err("setting QP context\n");
955*4882a593Smuzhiyun 			err = -EINVAL;
956*4882a593Smuzhiyun 			goto exit;
957*4882a593Smuzhiyun 		}
958*4882a593Smuzhiyun 	}
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	spin_unlock_irqrestore(&iwqp->lock, flags);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	if (issue_modify_qp) {
963*4882a593Smuzhiyun 		i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 		spin_lock_irqsave(&iwqp->lock, flags);
966*4882a593Smuzhiyun 		iwqp->iwarp_state = info.next_iwarp_state;
967*4882a593Smuzhiyun 		spin_unlock_irqrestore(&iwqp->lock, flags);
968*4882a593Smuzhiyun 	}
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
971*4882a593Smuzhiyun 		if (dont_wait) {
972*4882a593Smuzhiyun 			if (iwqp->cm_id && iwqp->hw_tcp_state) {
973*4882a593Smuzhiyun 				spin_lock_irqsave(&iwqp->lock, flags);
974*4882a593Smuzhiyun 				iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
975*4882a593Smuzhiyun 				iwqp->last_aeq = I40IW_AE_RESET_SENT;
976*4882a593Smuzhiyun 				spin_unlock_irqrestore(&iwqp->lock, flags);
977*4882a593Smuzhiyun 				i40iw_cm_disconn(iwqp);
978*4882a593Smuzhiyun 			}
979*4882a593Smuzhiyun 		} else {
980*4882a593Smuzhiyun 			spin_lock_irqsave(&iwqp->lock, flags);
981*4882a593Smuzhiyun 			if (iwqp->cm_id) {
982*4882a593Smuzhiyun 				if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
983*4882a593Smuzhiyun 					iwqp->cm_id->add_ref(iwqp->cm_id);
984*4882a593Smuzhiyun 					i40iw_schedule_cm_timer(iwqp->cm_node,
985*4882a593Smuzhiyun 								(struct i40iw_puda_buf *)iwqp,
986*4882a593Smuzhiyun 								 I40IW_TIMER_TYPE_CLOSE, 1, 0);
987*4882a593Smuzhiyun 				}
988*4882a593Smuzhiyun 			}
989*4882a593Smuzhiyun 			spin_unlock_irqrestore(&iwqp->lock, flags);
990*4882a593Smuzhiyun 		}
991*4882a593Smuzhiyun 	}
992*4882a593Smuzhiyun 	return 0;
993*4882a593Smuzhiyun exit:
994*4882a593Smuzhiyun 	spin_unlock_irqrestore(&iwqp->lock, flags);
995*4882a593Smuzhiyun 	return err;
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun /**
999*4882a593Smuzhiyun  * cq_free_resources - free up recources for cq
1000*4882a593Smuzhiyun  * @iwdev: iwarp device
1001*4882a593Smuzhiyun  * @iwcq: cq ptr
1002*4882a593Smuzhiyun  */
cq_free_resources(struct i40iw_device * iwdev,struct i40iw_cq * iwcq)1003*4882a593Smuzhiyun static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun 	struct i40iw_sc_cq *cq = &iwcq->sc_cq;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	if (!iwcq->user_mode)
1008*4882a593Smuzhiyun 		i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
1009*4882a593Smuzhiyun 	i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun /**
1013*4882a593Smuzhiyun  * i40iw_cq_wq_destroy - send cq destroy cqp
1014*4882a593Smuzhiyun  * @iwdev: iwarp device
1015*4882a593Smuzhiyun  * @cq: hardware control cq
1016*4882a593Smuzhiyun  */
i40iw_cq_wq_destroy(struct i40iw_device * iwdev,struct i40iw_sc_cq * cq)1017*4882a593Smuzhiyun void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
1018*4882a593Smuzhiyun {
1019*4882a593Smuzhiyun 	enum i40iw_status_code status;
1020*4882a593Smuzhiyun 	struct i40iw_cqp_request *cqp_request;
1021*4882a593Smuzhiyun 	struct cqp_commands_info *cqp_info;
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1024*4882a593Smuzhiyun 	if (!cqp_request)
1025*4882a593Smuzhiyun 		return;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	cqp_info = &cqp_request->info;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	cqp_info->cqp_cmd = OP_CQ_DESTROY;
1030*4882a593Smuzhiyun 	cqp_info->post_sq = 1;
1031*4882a593Smuzhiyun 	cqp_info->in.u.cq_destroy.cq = cq;
1032*4882a593Smuzhiyun 	cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1033*4882a593Smuzhiyun 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1034*4882a593Smuzhiyun 	if (status)
1035*4882a593Smuzhiyun 		i40iw_pr_err("CQP-OP Destroy QP fail");
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun /**
1039*4882a593Smuzhiyun  * i40iw_destroy_cq - destroy cq
1040*4882a593Smuzhiyun  * @ib_cq: cq pointer
1041*4882a593Smuzhiyun  * @udata: user data or NULL for kernel object
1042*4882a593Smuzhiyun  */
i40iw_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)1043*4882a593Smuzhiyun static int i40iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun 	struct i40iw_cq *iwcq;
1046*4882a593Smuzhiyun 	struct i40iw_device *iwdev;
1047*4882a593Smuzhiyun 	struct i40iw_sc_cq *cq;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	iwcq = to_iwcq(ib_cq);
1050*4882a593Smuzhiyun 	iwdev = to_iwdev(ib_cq->device);
1051*4882a593Smuzhiyun 	cq = &iwcq->sc_cq;
1052*4882a593Smuzhiyun 	i40iw_cq_wq_destroy(iwdev, cq);
1053*4882a593Smuzhiyun 	cq_free_resources(iwdev, iwcq);
1054*4882a593Smuzhiyun 	i40iw_rem_devusecount(iwdev);
1055*4882a593Smuzhiyun 	return 0;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun /**
1059*4882a593Smuzhiyun  * i40iw_create_cq - create cq
1060*4882a593Smuzhiyun  * @ibcq: CQ allocated
1061*4882a593Smuzhiyun  * @attr: attributes for cq
1062*4882a593Smuzhiyun  * @udata: user data
1063*4882a593Smuzhiyun  */
i40iw_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)1064*4882a593Smuzhiyun static int i40iw_create_cq(struct ib_cq *ibcq,
1065*4882a593Smuzhiyun 			   const struct ib_cq_init_attr *attr,
1066*4882a593Smuzhiyun 			   struct ib_udata *udata)
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun 	struct ib_device *ibdev = ibcq->device;
1069*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(ibdev);
1070*4882a593Smuzhiyun 	struct i40iw_cq *iwcq = to_iwcq(ibcq);
1071*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl;
1072*4882a593Smuzhiyun 	u32 cq_num = 0;
1073*4882a593Smuzhiyun 	struct i40iw_sc_cq *cq;
1074*4882a593Smuzhiyun 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1075*4882a593Smuzhiyun 	struct i40iw_cq_init_info info = {};
1076*4882a593Smuzhiyun 	enum i40iw_status_code status;
1077*4882a593Smuzhiyun 	struct i40iw_cqp_request *cqp_request;
1078*4882a593Smuzhiyun 	struct cqp_commands_info *cqp_info;
1079*4882a593Smuzhiyun 	struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1080*4882a593Smuzhiyun 	unsigned long flags;
1081*4882a593Smuzhiyun 	int err_code;
1082*4882a593Smuzhiyun 	int entries = attr->cqe;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	if (iwdev->closing)
1085*4882a593Smuzhiyun 		return -ENODEV;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	if (entries > iwdev->max_cqe)
1088*4882a593Smuzhiyun 		return -EINVAL;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
1091*4882a593Smuzhiyun 					iwdev->max_cq, &cq_num,
1092*4882a593Smuzhiyun 					&iwdev->next_cq);
1093*4882a593Smuzhiyun 	if (err_code)
1094*4882a593Smuzhiyun 		return err_code;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	cq = &iwcq->sc_cq;
1097*4882a593Smuzhiyun 	cq->back_cq = (void *)iwcq;
1098*4882a593Smuzhiyun 	spin_lock_init(&iwcq->lock);
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	info.dev = dev;
1101*4882a593Smuzhiyun 	ukinfo->cq_size = max(entries, 4);
1102*4882a593Smuzhiyun 	ukinfo->cq_id = cq_num;
1103*4882a593Smuzhiyun 	iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1104*4882a593Smuzhiyun 	info.ceqe_mask = 0;
1105*4882a593Smuzhiyun 	if (attr->comp_vector < iwdev->ceqs_count)
1106*4882a593Smuzhiyun 		info.ceq_id = attr->comp_vector;
1107*4882a593Smuzhiyun 	info.ceq_id_valid = true;
1108*4882a593Smuzhiyun 	info.ceqe_mask = 1;
1109*4882a593Smuzhiyun 	info.type = I40IW_CQ_TYPE_IWARP;
1110*4882a593Smuzhiyun 	if (udata) {
1111*4882a593Smuzhiyun 		struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
1112*4882a593Smuzhiyun 			udata, struct i40iw_ucontext, ibucontext);
1113*4882a593Smuzhiyun 		struct i40iw_create_cq_req req;
1114*4882a593Smuzhiyun 		struct i40iw_cq_mr *cqmr;
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 		memset(&req, 0, sizeof(req));
1117*4882a593Smuzhiyun 		iwcq->user_mode = true;
1118*4882a593Smuzhiyun 		if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
1119*4882a593Smuzhiyun 			err_code = -EFAULT;
1120*4882a593Smuzhiyun 			goto cq_free_resources;
1121*4882a593Smuzhiyun 		}
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1124*4882a593Smuzhiyun 		iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
1125*4882a593Smuzhiyun 				      &ucontext->cq_reg_mem_list);
1126*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1127*4882a593Smuzhiyun 		if (!iwpbl) {
1128*4882a593Smuzhiyun 			err_code = -EPROTO;
1129*4882a593Smuzhiyun 			goto cq_free_resources;
1130*4882a593Smuzhiyun 		}
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 		iwcq->iwpbl = iwpbl;
1133*4882a593Smuzhiyun 		iwcq->cq_mem_size = 0;
1134*4882a593Smuzhiyun 		cqmr = &iwpbl->cq_mr;
1135*4882a593Smuzhiyun 		info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
1136*4882a593Smuzhiyun 		if (iwpbl->pbl_allocated) {
1137*4882a593Smuzhiyun 			info.virtual_map = true;
1138*4882a593Smuzhiyun 			info.pbl_chunk_size = 1;
1139*4882a593Smuzhiyun 			info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1140*4882a593Smuzhiyun 		} else {
1141*4882a593Smuzhiyun 			info.cq_base_pa = cqmr->cq_pbl.addr;
1142*4882a593Smuzhiyun 		}
1143*4882a593Smuzhiyun 	} else {
1144*4882a593Smuzhiyun 		/* Kmode allocations */
1145*4882a593Smuzhiyun 		int rsize;
1146*4882a593Smuzhiyun 		int shadow;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 		rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
1149*4882a593Smuzhiyun 		rsize = round_up(rsize, 256);
1150*4882a593Smuzhiyun 		shadow = I40IW_SHADOW_AREA_SIZE << 3;
1151*4882a593Smuzhiyun 		status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
1152*4882a593Smuzhiyun 						rsize + shadow, 256);
1153*4882a593Smuzhiyun 		if (status) {
1154*4882a593Smuzhiyun 			err_code = -ENOMEM;
1155*4882a593Smuzhiyun 			goto cq_free_resources;
1156*4882a593Smuzhiyun 		}
1157*4882a593Smuzhiyun 		ukinfo->cq_base = iwcq->kmem.va;
1158*4882a593Smuzhiyun 		info.cq_base_pa = iwcq->kmem.pa;
1159*4882a593Smuzhiyun 		info.shadow_area_pa = info.cq_base_pa + rsize;
1160*4882a593Smuzhiyun 		ukinfo->shadow_area = iwcq->kmem.va + rsize;
1161*4882a593Smuzhiyun 	}
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
1164*4882a593Smuzhiyun 		i40iw_pr_err("init cq fail\n");
1165*4882a593Smuzhiyun 		err_code = -EPROTO;
1166*4882a593Smuzhiyun 		goto cq_free_resources;
1167*4882a593Smuzhiyun 	}
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1170*4882a593Smuzhiyun 	if (!cqp_request) {
1171*4882a593Smuzhiyun 		err_code = -ENOMEM;
1172*4882a593Smuzhiyun 		goto cq_free_resources;
1173*4882a593Smuzhiyun 	}
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	cqp_info = &cqp_request->info;
1176*4882a593Smuzhiyun 	cqp_info->cqp_cmd = OP_CQ_CREATE;
1177*4882a593Smuzhiyun 	cqp_info->post_sq = 1;
1178*4882a593Smuzhiyun 	cqp_info->in.u.cq_create.cq = cq;
1179*4882a593Smuzhiyun 	cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1180*4882a593Smuzhiyun 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1181*4882a593Smuzhiyun 	if (status) {
1182*4882a593Smuzhiyun 		i40iw_pr_err("CQP-OP Create QP fail");
1183*4882a593Smuzhiyun 		err_code = -EPROTO;
1184*4882a593Smuzhiyun 		goto cq_free_resources;
1185*4882a593Smuzhiyun 	}
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	if (udata) {
1188*4882a593Smuzhiyun 		struct i40iw_create_cq_resp resp;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 		memset(&resp, 0, sizeof(resp));
1191*4882a593Smuzhiyun 		resp.cq_id = info.cq_uk_init_info.cq_id;
1192*4882a593Smuzhiyun 		resp.cq_size = info.cq_uk_init_info.cq_size;
1193*4882a593Smuzhiyun 		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1194*4882a593Smuzhiyun 			i40iw_pr_err("copy to user data\n");
1195*4882a593Smuzhiyun 			err_code = -EPROTO;
1196*4882a593Smuzhiyun 			goto cq_destroy;
1197*4882a593Smuzhiyun 		}
1198*4882a593Smuzhiyun 	}
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	i40iw_add_devusecount(iwdev);
1201*4882a593Smuzhiyun 	return 0;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun cq_destroy:
1204*4882a593Smuzhiyun 	i40iw_cq_wq_destroy(iwdev, cq);
1205*4882a593Smuzhiyun cq_free_resources:
1206*4882a593Smuzhiyun 	cq_free_resources(iwdev, iwcq);
1207*4882a593Smuzhiyun 	return err_code;
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun /**
1211*4882a593Smuzhiyun  * i40iw_get_user_access - get hw access from IB access
1212*4882a593Smuzhiyun  * @acc: IB access to return hw access
1213*4882a593Smuzhiyun  */
i40iw_get_user_access(int acc)1214*4882a593Smuzhiyun static inline u16 i40iw_get_user_access(int acc)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun 	u16 access = 0;
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
1219*4882a593Smuzhiyun 	access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
1220*4882a593Smuzhiyun 	access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
1221*4882a593Smuzhiyun 	access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
1222*4882a593Smuzhiyun 	return access;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun /**
1226*4882a593Smuzhiyun  * i40iw_free_stag - free stag resource
1227*4882a593Smuzhiyun  * @iwdev: iwarp device
1228*4882a593Smuzhiyun  * @stag: stag to free
1229*4882a593Smuzhiyun  */
i40iw_free_stag(struct i40iw_device * iwdev,u32 stag)1230*4882a593Smuzhiyun static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun 	u32 stag_idx;
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1235*4882a593Smuzhiyun 	i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
1236*4882a593Smuzhiyun 	i40iw_rem_devusecount(iwdev);
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun /**
1240*4882a593Smuzhiyun  * i40iw_create_stag - create random stag
1241*4882a593Smuzhiyun  * @iwdev: iwarp device
1242*4882a593Smuzhiyun  */
i40iw_create_stag(struct i40iw_device * iwdev)1243*4882a593Smuzhiyun static u32 i40iw_create_stag(struct i40iw_device *iwdev)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun 	u32 stag = 0;
1246*4882a593Smuzhiyun 	u32 stag_index = 0;
1247*4882a593Smuzhiyun 	u32 next_stag_index;
1248*4882a593Smuzhiyun 	u32 driver_key;
1249*4882a593Smuzhiyun 	u32 random;
1250*4882a593Smuzhiyun 	u8 consumer_key;
1251*4882a593Smuzhiyun 	int ret;
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	get_random_bytes(&random, sizeof(random));
1254*4882a593Smuzhiyun 	consumer_key = (u8)random;
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	driver_key = random & ~iwdev->mr_stagmask;
1257*4882a593Smuzhiyun 	next_stag_index = (random & iwdev->mr_stagmask) >> 8;
1258*4882a593Smuzhiyun 	next_stag_index %= iwdev->max_mr;
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	ret = i40iw_alloc_resource(iwdev,
1261*4882a593Smuzhiyun 				   iwdev->allocated_mrs, iwdev->max_mr,
1262*4882a593Smuzhiyun 				   &stag_index, &next_stag_index);
1263*4882a593Smuzhiyun 	if (!ret) {
1264*4882a593Smuzhiyun 		stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
1265*4882a593Smuzhiyun 		stag |= driver_key;
1266*4882a593Smuzhiyun 		stag += (u32)consumer_key;
1267*4882a593Smuzhiyun 		i40iw_add_devusecount(iwdev);
1268*4882a593Smuzhiyun 	}
1269*4882a593Smuzhiyun 	return stag;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun /**
1273*4882a593Smuzhiyun  * i40iw_next_pbl_addr - Get next pbl address
1274*4882a593Smuzhiyun  * @pbl: pointer to a pble
1275*4882a593Smuzhiyun  * @pinfo: info pointer
1276*4882a593Smuzhiyun  * @idx: index
1277*4882a593Smuzhiyun  */
i40iw_next_pbl_addr(u64 * pbl,struct i40iw_pble_info ** pinfo,u32 * idx)1278*4882a593Smuzhiyun static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
1279*4882a593Smuzhiyun 				       struct i40iw_pble_info **pinfo,
1280*4882a593Smuzhiyun 				       u32 *idx)
1281*4882a593Smuzhiyun {
1282*4882a593Smuzhiyun 	*idx += 1;
1283*4882a593Smuzhiyun 	if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
1284*4882a593Smuzhiyun 		return ++pbl;
1285*4882a593Smuzhiyun 	*idx = 0;
1286*4882a593Smuzhiyun 	(*pinfo)++;
1287*4882a593Smuzhiyun 	return (u64 *)(*pinfo)->addr;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun /**
1291*4882a593Smuzhiyun  * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
1292*4882a593Smuzhiyun  * @iwmr: iwmr for IB's user page addresses
1293*4882a593Smuzhiyun  * @pbl: ple pointer to save 1 level or 0 level pble
1294*4882a593Smuzhiyun  * @level: indicated level 0, 1 or 2
1295*4882a593Smuzhiyun  */
i40iw_copy_user_pgaddrs(struct i40iw_mr * iwmr,u64 * pbl,enum i40iw_pble_level level)1296*4882a593Smuzhiyun static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
1297*4882a593Smuzhiyun 				    u64 *pbl,
1298*4882a593Smuzhiyun 				    enum i40iw_pble_level level)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun 	struct ib_umem *region = iwmr->region;
1301*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1302*4882a593Smuzhiyun 	struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1303*4882a593Smuzhiyun 	struct i40iw_pble_info *pinfo;
1304*4882a593Smuzhiyun 	struct ib_block_iter biter;
1305*4882a593Smuzhiyun 	u32 idx = 0;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	if (iwmr->type == IW_MEMREG_TYPE_QP)
1310*4882a593Smuzhiyun 		iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
1313*4882a593Smuzhiyun 		*pbl = rdma_block_iter_dma_address(&biter);
1314*4882a593Smuzhiyun 		pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
1315*4882a593Smuzhiyun 	}
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun /**
1319*4882a593Smuzhiyun  * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
1320*4882a593Smuzhiyun  * @arr: lvl1 pbl array
1321*4882a593Smuzhiyun  * @npages: page count
1322*4882a593Smuzhiyun  * pg_size: page size
1323*4882a593Smuzhiyun  *
1324*4882a593Smuzhiyun  */
i40iw_check_mem_contiguous(u64 * arr,u32 npages,u32 pg_size)1325*4882a593Smuzhiyun static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun 	u32 pg_idx;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1330*4882a593Smuzhiyun 		if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1331*4882a593Smuzhiyun 			return false;
1332*4882a593Smuzhiyun 	}
1333*4882a593Smuzhiyun 	return true;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun /**
1337*4882a593Smuzhiyun  * i40iw_check_mr_contiguous - check if MR is physically contiguous
1338*4882a593Smuzhiyun  * @palloc: pbl allocation struct
1339*4882a593Smuzhiyun  * pg_size: page size
1340*4882a593Smuzhiyun  */
i40iw_check_mr_contiguous(struct i40iw_pble_alloc * palloc,u32 pg_size)1341*4882a593Smuzhiyun static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
1342*4882a593Smuzhiyun {
1343*4882a593Smuzhiyun 	struct i40iw_pble_level2 *lvl2 = &palloc->level2;
1344*4882a593Smuzhiyun 	struct i40iw_pble_info *leaf = lvl2->leaf;
1345*4882a593Smuzhiyun 	u64 *arr = NULL;
1346*4882a593Smuzhiyun 	u64 *start_addr = NULL;
1347*4882a593Smuzhiyun 	int i;
1348*4882a593Smuzhiyun 	bool ret;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	if (palloc->level == I40IW_LEVEL_1) {
1351*4882a593Smuzhiyun 		arr = (u64 *)palloc->level1.addr;
1352*4882a593Smuzhiyun 		ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
1353*4882a593Smuzhiyun 		return ret;
1354*4882a593Smuzhiyun 	}
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	start_addr = (u64 *)leaf->addr;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1359*4882a593Smuzhiyun 		arr = (u64 *)leaf->addr;
1360*4882a593Smuzhiyun 		if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1361*4882a593Smuzhiyun 			return false;
1362*4882a593Smuzhiyun 		ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
1363*4882a593Smuzhiyun 		if (!ret)
1364*4882a593Smuzhiyun 			return false;
1365*4882a593Smuzhiyun 	}
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	return true;
1368*4882a593Smuzhiyun }
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun /**
1371*4882a593Smuzhiyun  * i40iw_setup_pbles - copy user pg address to pble's
1372*4882a593Smuzhiyun  * @iwdev: iwarp device
1373*4882a593Smuzhiyun  * @iwmr: mr pointer for this memory registration
1374*4882a593Smuzhiyun  * @use_pbles: flag if to use pble's
1375*4882a593Smuzhiyun  */
i40iw_setup_pbles(struct i40iw_device * iwdev,struct i40iw_mr * iwmr,bool use_pbles)1376*4882a593Smuzhiyun static int i40iw_setup_pbles(struct i40iw_device *iwdev,
1377*4882a593Smuzhiyun 			     struct i40iw_mr *iwmr,
1378*4882a593Smuzhiyun 			     bool use_pbles)
1379*4882a593Smuzhiyun {
1380*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1381*4882a593Smuzhiyun 	struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1382*4882a593Smuzhiyun 	struct i40iw_pble_info *pinfo;
1383*4882a593Smuzhiyun 	u64 *pbl;
1384*4882a593Smuzhiyun 	enum i40iw_status_code status;
1385*4882a593Smuzhiyun 	enum i40iw_pble_level level = I40IW_LEVEL_1;
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	if (use_pbles) {
1388*4882a593Smuzhiyun 		mutex_lock(&iwdev->pbl_mutex);
1389*4882a593Smuzhiyun 		status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1390*4882a593Smuzhiyun 		mutex_unlock(&iwdev->pbl_mutex);
1391*4882a593Smuzhiyun 		if (status)
1392*4882a593Smuzhiyun 			return -ENOMEM;
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 		iwpbl->pbl_allocated = true;
1395*4882a593Smuzhiyun 		level = palloc->level;
1396*4882a593Smuzhiyun 		pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
1397*4882a593Smuzhiyun 		pbl = (u64 *)pinfo->addr;
1398*4882a593Smuzhiyun 	} else {
1399*4882a593Smuzhiyun 		pbl = iwmr->pgaddrmem;
1400*4882a593Smuzhiyun 	}
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	i40iw_copy_user_pgaddrs(iwmr, pbl, level);
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	if (use_pbles)
1405*4882a593Smuzhiyun 		iwmr->pgaddrmem[0] = *pbl;
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	return 0;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun /**
1411*4882a593Smuzhiyun  * i40iw_handle_q_mem - handle memory for qp and cq
1412*4882a593Smuzhiyun  * @iwdev: iwarp device
1413*4882a593Smuzhiyun  * @req: information for q memory management
1414*4882a593Smuzhiyun  * @iwpbl: pble struct
1415*4882a593Smuzhiyun  * @use_pbles: flag to use pble
1416*4882a593Smuzhiyun  */
i40iw_handle_q_mem(struct i40iw_device * iwdev,struct i40iw_mem_reg_req * req,struct i40iw_pbl * iwpbl,bool use_pbles)1417*4882a593Smuzhiyun static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
1418*4882a593Smuzhiyun 			      struct i40iw_mem_reg_req *req,
1419*4882a593Smuzhiyun 			      struct i40iw_pbl *iwpbl,
1420*4882a593Smuzhiyun 			      bool use_pbles)
1421*4882a593Smuzhiyun {
1422*4882a593Smuzhiyun 	struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1423*4882a593Smuzhiyun 	struct i40iw_mr *iwmr = iwpbl->iwmr;
1424*4882a593Smuzhiyun 	struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
1425*4882a593Smuzhiyun 	struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
1426*4882a593Smuzhiyun 	struct i40iw_hmc_pble *hmc_p;
1427*4882a593Smuzhiyun 	u64 *arr = iwmr->pgaddrmem;
1428*4882a593Smuzhiyun 	u32 pg_size;
1429*4882a593Smuzhiyun 	int err;
1430*4882a593Smuzhiyun 	int total;
1431*4882a593Smuzhiyun 	bool ret = true;
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	total = req->sq_pages + req->rq_pages + req->cq_pages;
1434*4882a593Smuzhiyun 	pg_size = iwmr->page_size;
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1437*4882a593Smuzhiyun 	if (err)
1438*4882a593Smuzhiyun 		return err;
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 	if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
1441*4882a593Smuzhiyun 		i40iw_free_pble(iwdev->pble_rsrc, palloc);
1442*4882a593Smuzhiyun 		iwpbl->pbl_allocated = false;
1443*4882a593Smuzhiyun 		return -ENOMEM;
1444*4882a593Smuzhiyun 	}
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 	if (use_pbles)
1447*4882a593Smuzhiyun 		arr = (u64 *)palloc->level1.addr;
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	if (iwmr->type == IW_MEMREG_TYPE_QP) {
1450*4882a593Smuzhiyun 		hmc_p = &qpmr->sq_pbl;
1451*4882a593Smuzhiyun 		qpmr->shadow = (dma_addr_t)arr[total];
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 		if (use_pbles) {
1454*4882a593Smuzhiyun 			ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
1455*4882a593Smuzhiyun 			if (ret)
1456*4882a593Smuzhiyun 				ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
1457*4882a593Smuzhiyun 		}
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 		if (!ret) {
1460*4882a593Smuzhiyun 			hmc_p->idx = palloc->level1.idx;
1461*4882a593Smuzhiyun 			hmc_p = &qpmr->rq_pbl;
1462*4882a593Smuzhiyun 			hmc_p->idx = palloc->level1.idx + req->sq_pages;
1463*4882a593Smuzhiyun 		} else {
1464*4882a593Smuzhiyun 			hmc_p->addr = arr[0];
1465*4882a593Smuzhiyun 			hmc_p = &qpmr->rq_pbl;
1466*4882a593Smuzhiyun 			hmc_p->addr = arr[req->sq_pages];
1467*4882a593Smuzhiyun 		}
1468*4882a593Smuzhiyun 	} else {		/* CQ */
1469*4882a593Smuzhiyun 		hmc_p = &cqmr->cq_pbl;
1470*4882a593Smuzhiyun 		cqmr->shadow = (dma_addr_t)arr[total];
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 		if (use_pbles)
1473*4882a593Smuzhiyun 			ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 		if (!ret)
1476*4882a593Smuzhiyun 			hmc_p->idx = palloc->level1.idx;
1477*4882a593Smuzhiyun 		else
1478*4882a593Smuzhiyun 			hmc_p->addr = arr[0];
1479*4882a593Smuzhiyun 	}
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	if (use_pbles && ret) {
1482*4882a593Smuzhiyun 		i40iw_free_pble(iwdev->pble_rsrc, palloc);
1483*4882a593Smuzhiyun 		iwpbl->pbl_allocated = false;
1484*4882a593Smuzhiyun 	}
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	return err;
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun /**
1490*4882a593Smuzhiyun  * i40iw_hw_alloc_stag - cqp command to allocate stag
1491*4882a593Smuzhiyun  * @iwdev: iwarp device
1492*4882a593Smuzhiyun  * @iwmr: iwarp mr pointer
1493*4882a593Smuzhiyun  */
i40iw_hw_alloc_stag(struct i40iw_device * iwdev,struct i40iw_mr * iwmr)1494*4882a593Smuzhiyun static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun 	struct i40iw_allocate_stag_info *info;
1497*4882a593Smuzhiyun 	struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1498*4882a593Smuzhiyun 	enum i40iw_status_code status;
1499*4882a593Smuzhiyun 	int err = 0;
1500*4882a593Smuzhiyun 	struct i40iw_cqp_request *cqp_request;
1501*4882a593Smuzhiyun 	struct cqp_commands_info *cqp_info;
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun 	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1504*4882a593Smuzhiyun 	if (!cqp_request)
1505*4882a593Smuzhiyun 		return -ENOMEM;
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 	cqp_info = &cqp_request->info;
1508*4882a593Smuzhiyun 	info = &cqp_info->in.u.alloc_stag.info;
1509*4882a593Smuzhiyun 	memset(info, 0, sizeof(*info));
1510*4882a593Smuzhiyun 	info->page_size = PAGE_SIZE;
1511*4882a593Smuzhiyun 	info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1512*4882a593Smuzhiyun 	info->pd_id = iwpd->sc_pd.pd_id;
1513*4882a593Smuzhiyun 	info->total_len = iwmr->length;
1514*4882a593Smuzhiyun 	info->remote_access = true;
1515*4882a593Smuzhiyun 	cqp_info->cqp_cmd = OP_ALLOC_STAG;
1516*4882a593Smuzhiyun 	cqp_info->post_sq = 1;
1517*4882a593Smuzhiyun 	cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
1518*4882a593Smuzhiyun 	cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1521*4882a593Smuzhiyun 	if (status) {
1522*4882a593Smuzhiyun 		err = -ENOMEM;
1523*4882a593Smuzhiyun 		i40iw_pr_err("CQP-OP MR Reg fail");
1524*4882a593Smuzhiyun 	}
1525*4882a593Smuzhiyun 	return err;
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun /**
1529*4882a593Smuzhiyun  * i40iw_alloc_mr - register stag for fast memory registration
1530*4882a593Smuzhiyun  * @pd: ibpd pointer
1531*4882a593Smuzhiyun  * @mr_type: memory for stag registrion
1532*4882a593Smuzhiyun  * @max_num_sg: man number of pages
1533*4882a593Smuzhiyun  */
i40iw_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)1534*4882a593Smuzhiyun static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1535*4882a593Smuzhiyun 				    u32 max_num_sg)
1536*4882a593Smuzhiyun {
1537*4882a593Smuzhiyun 	struct i40iw_pd *iwpd = to_iwpd(pd);
1538*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(pd->device);
1539*4882a593Smuzhiyun 	struct i40iw_pble_alloc *palloc;
1540*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl;
1541*4882a593Smuzhiyun 	struct i40iw_mr *iwmr;
1542*4882a593Smuzhiyun 	enum i40iw_status_code status;
1543*4882a593Smuzhiyun 	u32 stag;
1544*4882a593Smuzhiyun 	int err_code = -ENOMEM;
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1547*4882a593Smuzhiyun 	if (!iwmr)
1548*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	stag = i40iw_create_stag(iwdev);
1551*4882a593Smuzhiyun 	if (!stag) {
1552*4882a593Smuzhiyun 		err_code = -EOVERFLOW;
1553*4882a593Smuzhiyun 		goto err;
1554*4882a593Smuzhiyun 	}
1555*4882a593Smuzhiyun 	stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
1556*4882a593Smuzhiyun 	iwmr->stag = stag;
1557*4882a593Smuzhiyun 	iwmr->ibmr.rkey = stag;
1558*4882a593Smuzhiyun 	iwmr->ibmr.lkey = stag;
1559*4882a593Smuzhiyun 	iwmr->ibmr.pd = pd;
1560*4882a593Smuzhiyun 	iwmr->ibmr.device = pd->device;
1561*4882a593Smuzhiyun 	iwpbl = &iwmr->iwpbl;
1562*4882a593Smuzhiyun 	iwpbl->iwmr = iwmr;
1563*4882a593Smuzhiyun 	iwmr->type = IW_MEMREG_TYPE_MEM;
1564*4882a593Smuzhiyun 	palloc = &iwpbl->pble_alloc;
1565*4882a593Smuzhiyun 	iwmr->page_cnt = max_num_sg;
1566*4882a593Smuzhiyun 	mutex_lock(&iwdev->pbl_mutex);
1567*4882a593Smuzhiyun 	status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1568*4882a593Smuzhiyun 	mutex_unlock(&iwdev->pbl_mutex);
1569*4882a593Smuzhiyun 	if (status)
1570*4882a593Smuzhiyun 		goto err1;
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 	if (palloc->level != I40IW_LEVEL_1)
1573*4882a593Smuzhiyun 		goto err2;
1574*4882a593Smuzhiyun 	err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
1575*4882a593Smuzhiyun 	if (err_code)
1576*4882a593Smuzhiyun 		goto err2;
1577*4882a593Smuzhiyun 	iwpbl->pbl_allocated = true;
1578*4882a593Smuzhiyun 	i40iw_add_pdusecount(iwpd);
1579*4882a593Smuzhiyun 	return &iwmr->ibmr;
1580*4882a593Smuzhiyun err2:
1581*4882a593Smuzhiyun 	i40iw_free_pble(iwdev->pble_rsrc, palloc);
1582*4882a593Smuzhiyun err1:
1583*4882a593Smuzhiyun 	i40iw_free_stag(iwdev, stag);
1584*4882a593Smuzhiyun err:
1585*4882a593Smuzhiyun 	kfree(iwmr);
1586*4882a593Smuzhiyun 	return ERR_PTR(err_code);
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun /**
1590*4882a593Smuzhiyun  * i40iw_set_page - populate pbl list for fmr
1591*4882a593Smuzhiyun  * @ibmr: ib mem to access iwarp mr pointer
1592*4882a593Smuzhiyun  * @addr: page dma address fro pbl list
1593*4882a593Smuzhiyun  */
i40iw_set_page(struct ib_mr * ibmr,u64 addr)1594*4882a593Smuzhiyun static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
1595*4882a593Smuzhiyun {
1596*4882a593Smuzhiyun 	struct i40iw_mr *iwmr = to_iwmr(ibmr);
1597*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1598*4882a593Smuzhiyun 	struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1599*4882a593Smuzhiyun 	u64 *pbl;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	if (unlikely(iwmr->npages == iwmr->page_cnt))
1602*4882a593Smuzhiyun 		return -ENOMEM;
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	pbl = (u64 *)palloc->level1.addr;
1605*4882a593Smuzhiyun 	pbl[iwmr->npages++] = cpu_to_le64(addr);
1606*4882a593Smuzhiyun 	return 0;
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun /**
1610*4882a593Smuzhiyun  * i40iw_map_mr_sg - map of sg list for fmr
1611*4882a593Smuzhiyun  * @ibmr: ib mem to access iwarp mr pointer
1612*4882a593Smuzhiyun  * @sg: scatter gather list for fmr
1613*4882a593Smuzhiyun  * @sg_nents: number of sg pages
1614*4882a593Smuzhiyun  */
i40iw_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)1615*4882a593Smuzhiyun static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1616*4882a593Smuzhiyun 			   int sg_nents, unsigned int *sg_offset)
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun 	struct i40iw_mr *iwmr = to_iwmr(ibmr);
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	iwmr->npages = 0;
1621*4882a593Smuzhiyun 	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun /**
1625*4882a593Smuzhiyun  * i40iw_drain_sq - drain the send queue
1626*4882a593Smuzhiyun  * @ibqp: ib qp pointer
1627*4882a593Smuzhiyun  */
i40iw_drain_sq(struct ib_qp * ibqp)1628*4882a593Smuzhiyun static void i40iw_drain_sq(struct ib_qp *ibqp)
1629*4882a593Smuzhiyun {
1630*4882a593Smuzhiyun 	struct i40iw_qp *iwqp = to_iwqp(ibqp);
1631*4882a593Smuzhiyun 	struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
1634*4882a593Smuzhiyun 		wait_for_completion(&iwqp->sq_drained);
1635*4882a593Smuzhiyun }
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun /**
1638*4882a593Smuzhiyun  * i40iw_drain_rq - drain the receive queue
1639*4882a593Smuzhiyun  * @ibqp: ib qp pointer
1640*4882a593Smuzhiyun  */
i40iw_drain_rq(struct ib_qp * ibqp)1641*4882a593Smuzhiyun static void i40iw_drain_rq(struct ib_qp *ibqp)
1642*4882a593Smuzhiyun {
1643*4882a593Smuzhiyun 	struct i40iw_qp *iwqp = to_iwqp(ibqp);
1644*4882a593Smuzhiyun 	struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
1647*4882a593Smuzhiyun 		wait_for_completion(&iwqp->rq_drained);
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun /**
1651*4882a593Smuzhiyun  * i40iw_hwreg_mr - send cqp command for memory registration
1652*4882a593Smuzhiyun  * @iwdev: iwarp device
1653*4882a593Smuzhiyun  * @iwmr: iwarp mr pointer
1654*4882a593Smuzhiyun  * @access: access for MR
1655*4882a593Smuzhiyun  */
i40iw_hwreg_mr(struct i40iw_device * iwdev,struct i40iw_mr * iwmr,u16 access)1656*4882a593Smuzhiyun static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
1657*4882a593Smuzhiyun 			  struct i40iw_mr *iwmr,
1658*4882a593Smuzhiyun 			  u16 access)
1659*4882a593Smuzhiyun {
1660*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1661*4882a593Smuzhiyun 	struct i40iw_reg_ns_stag_info *stag_info;
1662*4882a593Smuzhiyun 	struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1663*4882a593Smuzhiyun 	struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1664*4882a593Smuzhiyun 	enum i40iw_status_code status;
1665*4882a593Smuzhiyun 	int err = 0;
1666*4882a593Smuzhiyun 	struct i40iw_cqp_request *cqp_request;
1667*4882a593Smuzhiyun 	struct cqp_commands_info *cqp_info;
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun 	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1670*4882a593Smuzhiyun 	if (!cqp_request)
1671*4882a593Smuzhiyun 		return -ENOMEM;
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	cqp_info = &cqp_request->info;
1674*4882a593Smuzhiyun 	stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
1675*4882a593Smuzhiyun 	memset(stag_info, 0, sizeof(*stag_info));
1676*4882a593Smuzhiyun 	stag_info->va = (void *)(unsigned long)iwpbl->user_base;
1677*4882a593Smuzhiyun 	stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1678*4882a593Smuzhiyun 	stag_info->stag_key = (u8)iwmr->stag;
1679*4882a593Smuzhiyun 	stag_info->total_len = iwmr->length;
1680*4882a593Smuzhiyun 	stag_info->access_rights = access;
1681*4882a593Smuzhiyun 	stag_info->pd_id = iwpd->sc_pd.pd_id;
1682*4882a593Smuzhiyun 	stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
1683*4882a593Smuzhiyun 	stag_info->page_size = iwmr->page_size;
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	if (iwpbl->pbl_allocated) {
1686*4882a593Smuzhiyun 		if (palloc->level == I40IW_LEVEL_1) {
1687*4882a593Smuzhiyun 			stag_info->first_pm_pbl_index = palloc->level1.idx;
1688*4882a593Smuzhiyun 			stag_info->chunk_size = 1;
1689*4882a593Smuzhiyun 		} else {
1690*4882a593Smuzhiyun 			stag_info->first_pm_pbl_index = palloc->level2.root.idx;
1691*4882a593Smuzhiyun 			stag_info->chunk_size = 3;
1692*4882a593Smuzhiyun 		}
1693*4882a593Smuzhiyun 	} else {
1694*4882a593Smuzhiyun 		stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
1695*4882a593Smuzhiyun 	}
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
1698*4882a593Smuzhiyun 	cqp_info->post_sq = 1;
1699*4882a593Smuzhiyun 	cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
1700*4882a593Smuzhiyun 	cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
1703*4882a593Smuzhiyun 	if (status) {
1704*4882a593Smuzhiyun 		err = -ENOMEM;
1705*4882a593Smuzhiyun 		i40iw_pr_err("CQP-OP MR Reg fail");
1706*4882a593Smuzhiyun 	}
1707*4882a593Smuzhiyun 	return err;
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun /**
1711*4882a593Smuzhiyun  * i40iw_reg_user_mr - Register a user memory region
1712*4882a593Smuzhiyun  * @pd: ptr of pd
1713*4882a593Smuzhiyun  * @start: virtual start address
1714*4882a593Smuzhiyun  * @length: length of mr
1715*4882a593Smuzhiyun  * @virt: virtual address
1716*4882a593Smuzhiyun  * @acc: access of mr
1717*4882a593Smuzhiyun  * @udata: user data
1718*4882a593Smuzhiyun  */
i40iw_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt,int acc,struct ib_udata * udata)1719*4882a593Smuzhiyun static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1720*4882a593Smuzhiyun 				       u64 start,
1721*4882a593Smuzhiyun 				       u64 length,
1722*4882a593Smuzhiyun 				       u64 virt,
1723*4882a593Smuzhiyun 				       int acc,
1724*4882a593Smuzhiyun 				       struct ib_udata *udata)
1725*4882a593Smuzhiyun {
1726*4882a593Smuzhiyun 	struct i40iw_pd *iwpd = to_iwpd(pd);
1727*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(pd->device);
1728*4882a593Smuzhiyun 	struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
1729*4882a593Smuzhiyun 		udata, struct i40iw_ucontext, ibucontext);
1730*4882a593Smuzhiyun 	struct i40iw_pble_alloc *palloc;
1731*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl;
1732*4882a593Smuzhiyun 	struct i40iw_mr *iwmr;
1733*4882a593Smuzhiyun 	struct ib_umem *region;
1734*4882a593Smuzhiyun 	struct i40iw_mem_reg_req req;
1735*4882a593Smuzhiyun 	u32 stag = 0;
1736*4882a593Smuzhiyun 	u16 access;
1737*4882a593Smuzhiyun 	bool use_pbles = false;
1738*4882a593Smuzhiyun 	unsigned long flags;
1739*4882a593Smuzhiyun 	int err = -ENOSYS;
1740*4882a593Smuzhiyun 	int ret;
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun 	if (!udata)
1743*4882a593Smuzhiyun 		return ERR_PTR(-EOPNOTSUPP);
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	if (iwdev->closing)
1746*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	if (length > I40IW_MAX_MR_SIZE)
1749*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
1750*4882a593Smuzhiyun 	region = ib_umem_get(pd->device, start, length, acc);
1751*4882a593Smuzhiyun 	if (IS_ERR(region))
1752*4882a593Smuzhiyun 		return (struct ib_mr *)region;
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	if (ib_copy_from_udata(&req, udata, sizeof(req))) {
1755*4882a593Smuzhiyun 		ib_umem_release(region);
1756*4882a593Smuzhiyun 		return ERR_PTR(-EFAULT);
1757*4882a593Smuzhiyun 	}
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1760*4882a593Smuzhiyun 	if (!iwmr) {
1761*4882a593Smuzhiyun 		ib_umem_release(region);
1762*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1763*4882a593Smuzhiyun 	}
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	iwpbl = &iwmr->iwpbl;
1766*4882a593Smuzhiyun 	iwpbl->iwmr = iwmr;
1767*4882a593Smuzhiyun 	iwmr->region = region;
1768*4882a593Smuzhiyun 	iwmr->ibmr.pd = pd;
1769*4882a593Smuzhiyun 	iwmr->ibmr.device = pd->device;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	iwmr->page_size = PAGE_SIZE;
1772*4882a593Smuzhiyun 	if (req.reg_type == IW_MEMREG_TYPE_MEM)
1773*4882a593Smuzhiyun 		iwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M,
1774*4882a593Smuzhiyun 							 virt);
1775*4882a593Smuzhiyun 	iwmr->length = region->length;
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	iwpbl->user_base = virt;
1778*4882a593Smuzhiyun 	palloc = &iwpbl->pble_alloc;
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun 	iwmr->type = req.reg_type;
1781*4882a593Smuzhiyun 	iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	switch (req.reg_type) {
1784*4882a593Smuzhiyun 	case IW_MEMREG_TYPE_QP:
1785*4882a593Smuzhiyun 		use_pbles = ((req.sq_pages + req.rq_pages) > 2);
1786*4882a593Smuzhiyun 		err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1787*4882a593Smuzhiyun 		if (err)
1788*4882a593Smuzhiyun 			goto error;
1789*4882a593Smuzhiyun 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1790*4882a593Smuzhiyun 		list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1791*4882a593Smuzhiyun 		iwpbl->on_list = true;
1792*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1793*4882a593Smuzhiyun 		break;
1794*4882a593Smuzhiyun 	case IW_MEMREG_TYPE_CQ:
1795*4882a593Smuzhiyun 		use_pbles = (req.cq_pages > 1);
1796*4882a593Smuzhiyun 		err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1797*4882a593Smuzhiyun 		if (err)
1798*4882a593Smuzhiyun 			goto error;
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1801*4882a593Smuzhiyun 		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1802*4882a593Smuzhiyun 		iwpbl->on_list = true;
1803*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1804*4882a593Smuzhiyun 		break;
1805*4882a593Smuzhiyun 	case IW_MEMREG_TYPE_MEM:
1806*4882a593Smuzhiyun 		use_pbles = (iwmr->page_cnt != 1);
1807*4882a593Smuzhiyun 		access = I40IW_ACCESS_FLAGS_LOCALREAD;
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun 		err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1810*4882a593Smuzhiyun 		if (err)
1811*4882a593Smuzhiyun 			goto error;
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 		if (use_pbles) {
1814*4882a593Smuzhiyun 			ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
1815*4882a593Smuzhiyun 			if (ret) {
1816*4882a593Smuzhiyun 				i40iw_free_pble(iwdev->pble_rsrc, palloc);
1817*4882a593Smuzhiyun 				iwpbl->pbl_allocated = false;
1818*4882a593Smuzhiyun 			}
1819*4882a593Smuzhiyun 		}
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 		access |= i40iw_get_user_access(acc);
1822*4882a593Smuzhiyun 		stag = i40iw_create_stag(iwdev);
1823*4882a593Smuzhiyun 		if (!stag) {
1824*4882a593Smuzhiyun 			err = -ENOMEM;
1825*4882a593Smuzhiyun 			goto error;
1826*4882a593Smuzhiyun 		}
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 		iwmr->stag = stag;
1829*4882a593Smuzhiyun 		iwmr->ibmr.rkey = stag;
1830*4882a593Smuzhiyun 		iwmr->ibmr.lkey = stag;
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 		err = i40iw_hwreg_mr(iwdev, iwmr, access);
1833*4882a593Smuzhiyun 		if (err) {
1834*4882a593Smuzhiyun 			i40iw_free_stag(iwdev, stag);
1835*4882a593Smuzhiyun 			goto error;
1836*4882a593Smuzhiyun 		}
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun 		break;
1839*4882a593Smuzhiyun 	default:
1840*4882a593Smuzhiyun 		goto error;
1841*4882a593Smuzhiyun 	}
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 	iwmr->type = req.reg_type;
1844*4882a593Smuzhiyun 	if (req.reg_type == IW_MEMREG_TYPE_MEM)
1845*4882a593Smuzhiyun 		i40iw_add_pdusecount(iwpd);
1846*4882a593Smuzhiyun 	return &iwmr->ibmr;
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun error:
1849*4882a593Smuzhiyun 	if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
1850*4882a593Smuzhiyun 		i40iw_free_pble(iwdev->pble_rsrc, palloc);
1851*4882a593Smuzhiyun 	ib_umem_release(region);
1852*4882a593Smuzhiyun 	kfree(iwmr);
1853*4882a593Smuzhiyun 	return ERR_PTR(err);
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun /**
1857*4882a593Smuzhiyun  * i40iw_reg_phys_mr - register kernel physical memory
1858*4882a593Smuzhiyun  * @pd: ibpd pointer
1859*4882a593Smuzhiyun  * @addr: physical address of memory to register
1860*4882a593Smuzhiyun  * @size: size of memory to register
1861*4882a593Smuzhiyun  * @acc: Access rights
1862*4882a593Smuzhiyun  * @iova_start: start of virtual address for physical buffers
1863*4882a593Smuzhiyun  */
i40iw_reg_phys_mr(struct ib_pd * pd,u64 addr,u64 size,int acc,u64 * iova_start)1864*4882a593Smuzhiyun struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
1865*4882a593Smuzhiyun 				u64 addr,
1866*4882a593Smuzhiyun 				u64 size,
1867*4882a593Smuzhiyun 				int acc,
1868*4882a593Smuzhiyun 				u64 *iova_start)
1869*4882a593Smuzhiyun {
1870*4882a593Smuzhiyun 	struct i40iw_pd *iwpd = to_iwpd(pd);
1871*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(pd->device);
1872*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl;
1873*4882a593Smuzhiyun 	struct i40iw_mr *iwmr;
1874*4882a593Smuzhiyun 	enum i40iw_status_code status;
1875*4882a593Smuzhiyun 	u32 stag;
1876*4882a593Smuzhiyun 	u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1877*4882a593Smuzhiyun 	int ret;
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun 	iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1880*4882a593Smuzhiyun 	if (!iwmr)
1881*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1882*4882a593Smuzhiyun 	iwmr->ibmr.pd = pd;
1883*4882a593Smuzhiyun 	iwmr->ibmr.device = pd->device;
1884*4882a593Smuzhiyun 	iwpbl = &iwmr->iwpbl;
1885*4882a593Smuzhiyun 	iwpbl->iwmr = iwmr;
1886*4882a593Smuzhiyun 	iwmr->type = IW_MEMREG_TYPE_MEM;
1887*4882a593Smuzhiyun 	iwpbl->user_base = *iova_start;
1888*4882a593Smuzhiyun 	stag = i40iw_create_stag(iwdev);
1889*4882a593Smuzhiyun 	if (!stag) {
1890*4882a593Smuzhiyun 		ret = -EOVERFLOW;
1891*4882a593Smuzhiyun 		goto err;
1892*4882a593Smuzhiyun 	}
1893*4882a593Smuzhiyun 	access |= i40iw_get_user_access(acc);
1894*4882a593Smuzhiyun 	iwmr->stag = stag;
1895*4882a593Smuzhiyun 	iwmr->ibmr.rkey = stag;
1896*4882a593Smuzhiyun 	iwmr->ibmr.lkey = stag;
1897*4882a593Smuzhiyun 	iwmr->page_cnt = 1;
1898*4882a593Smuzhiyun 	iwmr->pgaddrmem[0]  = addr;
1899*4882a593Smuzhiyun 	iwmr->length = size;
1900*4882a593Smuzhiyun 	status = i40iw_hwreg_mr(iwdev, iwmr, access);
1901*4882a593Smuzhiyun 	if (status) {
1902*4882a593Smuzhiyun 		i40iw_free_stag(iwdev, stag);
1903*4882a593Smuzhiyun 		ret = -ENOMEM;
1904*4882a593Smuzhiyun 		goto err;
1905*4882a593Smuzhiyun 	}
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 	i40iw_add_pdusecount(iwpd);
1908*4882a593Smuzhiyun 	return &iwmr->ibmr;
1909*4882a593Smuzhiyun  err:
1910*4882a593Smuzhiyun 	kfree(iwmr);
1911*4882a593Smuzhiyun 	return ERR_PTR(ret);
1912*4882a593Smuzhiyun }
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun /**
1915*4882a593Smuzhiyun  * i40iw_get_dma_mr - register physical mem
1916*4882a593Smuzhiyun  * @pd: ptr of pd
1917*4882a593Smuzhiyun  * @acc: access for memory
1918*4882a593Smuzhiyun  */
i40iw_get_dma_mr(struct ib_pd * pd,int acc)1919*4882a593Smuzhiyun static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
1920*4882a593Smuzhiyun {
1921*4882a593Smuzhiyun 	u64 kva = 0;
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 	return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun /**
1927*4882a593Smuzhiyun  * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
1928*4882a593Smuzhiyun  * @iwmr: iwmr for IB's user page addresses
1929*4882a593Smuzhiyun  * @ucontext: ptr to user context
1930*4882a593Smuzhiyun  */
i40iw_del_memlist(struct i40iw_mr * iwmr,struct i40iw_ucontext * ucontext)1931*4882a593Smuzhiyun static void i40iw_del_memlist(struct i40iw_mr *iwmr,
1932*4882a593Smuzhiyun 			      struct i40iw_ucontext *ucontext)
1933*4882a593Smuzhiyun {
1934*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1935*4882a593Smuzhiyun 	unsigned long flags;
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	switch (iwmr->type) {
1938*4882a593Smuzhiyun 	case IW_MEMREG_TYPE_CQ:
1939*4882a593Smuzhiyun 		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1940*4882a593Smuzhiyun 		if (iwpbl->on_list) {
1941*4882a593Smuzhiyun 			iwpbl->on_list = false;
1942*4882a593Smuzhiyun 			list_del(&iwpbl->list);
1943*4882a593Smuzhiyun 		}
1944*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1945*4882a593Smuzhiyun 		break;
1946*4882a593Smuzhiyun 	case IW_MEMREG_TYPE_QP:
1947*4882a593Smuzhiyun 		spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1948*4882a593Smuzhiyun 		if (iwpbl->on_list) {
1949*4882a593Smuzhiyun 			iwpbl->on_list = false;
1950*4882a593Smuzhiyun 			list_del(&iwpbl->list);
1951*4882a593Smuzhiyun 		}
1952*4882a593Smuzhiyun 		spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1953*4882a593Smuzhiyun 		break;
1954*4882a593Smuzhiyun 	default:
1955*4882a593Smuzhiyun 		break;
1956*4882a593Smuzhiyun 	}
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun /**
1960*4882a593Smuzhiyun  * i40iw_dereg_mr - deregister mr
1961*4882a593Smuzhiyun  * @ib_mr: mr ptr for dereg
1962*4882a593Smuzhiyun  */
i40iw_dereg_mr(struct ib_mr * ib_mr,struct ib_udata * udata)1963*4882a593Smuzhiyun static int i40iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
1964*4882a593Smuzhiyun {
1965*4882a593Smuzhiyun 	struct ib_pd *ibpd = ib_mr->pd;
1966*4882a593Smuzhiyun 	struct i40iw_pd *iwpd = to_iwpd(ibpd);
1967*4882a593Smuzhiyun 	struct i40iw_mr *iwmr = to_iwmr(ib_mr);
1968*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
1969*4882a593Smuzhiyun 	enum i40iw_status_code status;
1970*4882a593Smuzhiyun 	struct i40iw_dealloc_stag_info *info;
1971*4882a593Smuzhiyun 	struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1972*4882a593Smuzhiyun 	struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1973*4882a593Smuzhiyun 	struct i40iw_cqp_request *cqp_request;
1974*4882a593Smuzhiyun 	struct cqp_commands_info *cqp_info;
1975*4882a593Smuzhiyun 	u32 stag_idx;
1976*4882a593Smuzhiyun 
1977*4882a593Smuzhiyun 	ib_umem_release(iwmr->region);
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 	if (iwmr->type != IW_MEMREG_TYPE_MEM) {
1980*4882a593Smuzhiyun 		/* region is released. only test for userness. */
1981*4882a593Smuzhiyun 		if (iwmr->region) {
1982*4882a593Smuzhiyun 			struct i40iw_ucontext *ucontext =
1983*4882a593Smuzhiyun 				rdma_udata_to_drv_context(
1984*4882a593Smuzhiyun 					udata,
1985*4882a593Smuzhiyun 					struct i40iw_ucontext,
1986*4882a593Smuzhiyun 					ibucontext);
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 			i40iw_del_memlist(iwmr, ucontext);
1989*4882a593Smuzhiyun 		}
1990*4882a593Smuzhiyun 		if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
1991*4882a593Smuzhiyun 			i40iw_free_pble(iwdev->pble_rsrc, palloc);
1992*4882a593Smuzhiyun 		kfree(iwmr);
1993*4882a593Smuzhiyun 		return 0;
1994*4882a593Smuzhiyun 	}
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun 	cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1997*4882a593Smuzhiyun 	if (!cqp_request)
1998*4882a593Smuzhiyun 		return -ENOMEM;
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	cqp_info = &cqp_request->info;
2001*4882a593Smuzhiyun 	info = &cqp_info->in.u.dealloc_stag.info;
2002*4882a593Smuzhiyun 	memset(info, 0, sizeof(*info));
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 	info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
2005*4882a593Smuzhiyun 	info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
2006*4882a593Smuzhiyun 	stag_idx = info->stag_idx;
2007*4882a593Smuzhiyun 	info->mr = true;
2008*4882a593Smuzhiyun 	if (iwpbl->pbl_allocated)
2009*4882a593Smuzhiyun 		info->dealloc_pbl = true;
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 	cqp_info->cqp_cmd = OP_DEALLOC_STAG;
2012*4882a593Smuzhiyun 	cqp_info->post_sq = 1;
2013*4882a593Smuzhiyun 	cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
2014*4882a593Smuzhiyun 	cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2015*4882a593Smuzhiyun 	status = i40iw_handle_cqp_op(iwdev, cqp_request);
2016*4882a593Smuzhiyun 	if (status)
2017*4882a593Smuzhiyun 		i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
2018*4882a593Smuzhiyun 	i40iw_rem_pdusecount(iwpd, iwdev);
2019*4882a593Smuzhiyun 	i40iw_free_stag(iwdev, iwmr->stag);
2020*4882a593Smuzhiyun 	if (iwpbl->pbl_allocated)
2021*4882a593Smuzhiyun 		i40iw_free_pble(iwdev->pble_rsrc, palloc);
2022*4882a593Smuzhiyun 	kfree(iwmr);
2023*4882a593Smuzhiyun 	return 0;
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun /**
2027*4882a593Smuzhiyun  * hw_rev_show
2028*4882a593Smuzhiyun  */
hw_rev_show(struct device * dev,struct device_attribute * attr,char * buf)2029*4882a593Smuzhiyun static ssize_t hw_rev_show(struct device *dev,
2030*4882a593Smuzhiyun 			   struct device_attribute *attr, char *buf)
2031*4882a593Smuzhiyun {
2032*4882a593Smuzhiyun 	struct i40iw_ib_device *iwibdev =
2033*4882a593Smuzhiyun 		rdma_device_to_drv_device(dev, struct i40iw_ib_device, ibdev);
2034*4882a593Smuzhiyun 	u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 	return sprintf(buf, "%x\n", hw_rev);
2037*4882a593Smuzhiyun }
2038*4882a593Smuzhiyun static DEVICE_ATTR_RO(hw_rev);
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun /**
2041*4882a593Smuzhiyun  * hca_type_show
2042*4882a593Smuzhiyun  */
hca_type_show(struct device * dev,struct device_attribute * attr,char * buf)2043*4882a593Smuzhiyun static ssize_t hca_type_show(struct device *dev,
2044*4882a593Smuzhiyun 			     struct device_attribute *attr, char *buf)
2045*4882a593Smuzhiyun {
2046*4882a593Smuzhiyun 	return sprintf(buf, "I40IW\n");
2047*4882a593Smuzhiyun }
2048*4882a593Smuzhiyun static DEVICE_ATTR_RO(hca_type);
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun /**
2051*4882a593Smuzhiyun  * board_id_show
2052*4882a593Smuzhiyun  */
board_id_show(struct device * dev,struct device_attribute * attr,char * buf)2053*4882a593Smuzhiyun static ssize_t board_id_show(struct device *dev,
2054*4882a593Smuzhiyun 			     struct device_attribute *attr, char *buf)
2055*4882a593Smuzhiyun {
2056*4882a593Smuzhiyun 	return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
2057*4882a593Smuzhiyun }
2058*4882a593Smuzhiyun static DEVICE_ATTR_RO(board_id);
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun static struct attribute *i40iw_dev_attributes[] = {
2061*4882a593Smuzhiyun 	&dev_attr_hw_rev.attr,
2062*4882a593Smuzhiyun 	&dev_attr_hca_type.attr,
2063*4882a593Smuzhiyun 	&dev_attr_board_id.attr,
2064*4882a593Smuzhiyun 	NULL
2065*4882a593Smuzhiyun };
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun static const struct attribute_group i40iw_attr_group = {
2068*4882a593Smuzhiyun 	.attrs = i40iw_dev_attributes,
2069*4882a593Smuzhiyun };
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun /**
2072*4882a593Smuzhiyun  * i40iw_copy_sg_list - copy sg list for qp
2073*4882a593Smuzhiyun  * @sg_list: copied into sg_list
2074*4882a593Smuzhiyun  * @sgl: copy from sgl
2075*4882a593Smuzhiyun  * @num_sges: count of sg entries
2076*4882a593Smuzhiyun  */
i40iw_copy_sg_list(struct i40iw_sge * sg_list,struct ib_sge * sgl,int num_sges)2077*4882a593Smuzhiyun static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
2078*4882a593Smuzhiyun {
2079*4882a593Smuzhiyun 	unsigned int i;
2080*4882a593Smuzhiyun 
2081*4882a593Smuzhiyun 	for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
2082*4882a593Smuzhiyun 		sg_list[i].tag_off = sgl[i].addr;
2083*4882a593Smuzhiyun 		sg_list[i].len = sgl[i].length;
2084*4882a593Smuzhiyun 		sg_list[i].stag = sgl[i].lkey;
2085*4882a593Smuzhiyun 	}
2086*4882a593Smuzhiyun }
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun /**
2089*4882a593Smuzhiyun  * i40iw_post_send -  kernel application wr
2090*4882a593Smuzhiyun  * @ibqp: qp ptr for wr
2091*4882a593Smuzhiyun  * @ib_wr: work request ptr
2092*4882a593Smuzhiyun  * @bad_wr: return of bad wr if err
2093*4882a593Smuzhiyun  */
i40iw_post_send(struct ib_qp * ibqp,const struct ib_send_wr * ib_wr,const struct ib_send_wr ** bad_wr)2094*4882a593Smuzhiyun static int i40iw_post_send(struct ib_qp *ibqp,
2095*4882a593Smuzhiyun 			   const struct ib_send_wr *ib_wr,
2096*4882a593Smuzhiyun 			   const struct ib_send_wr **bad_wr)
2097*4882a593Smuzhiyun {
2098*4882a593Smuzhiyun 	struct i40iw_qp *iwqp;
2099*4882a593Smuzhiyun 	struct i40iw_qp_uk *ukqp;
2100*4882a593Smuzhiyun 	struct i40iw_post_sq_info info;
2101*4882a593Smuzhiyun 	enum i40iw_status_code ret;
2102*4882a593Smuzhiyun 	int err = 0;
2103*4882a593Smuzhiyun 	unsigned long flags;
2104*4882a593Smuzhiyun 	bool inv_stag;
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun 	iwqp = (struct i40iw_qp *)ibqp;
2107*4882a593Smuzhiyun 	ukqp = &iwqp->sc_qp.qp_uk;
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 	spin_lock_irqsave(&iwqp->lock, flags);
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	if (iwqp->flush_issued) {
2112*4882a593Smuzhiyun 		err = -EINVAL;
2113*4882a593Smuzhiyun 		goto out;
2114*4882a593Smuzhiyun 	}
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 	while (ib_wr) {
2117*4882a593Smuzhiyun 		inv_stag = false;
2118*4882a593Smuzhiyun 		memset(&info, 0, sizeof(info));
2119*4882a593Smuzhiyun 		info.wr_id = (u64)(ib_wr->wr_id);
2120*4882a593Smuzhiyun 		if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2121*4882a593Smuzhiyun 			info.signaled = true;
2122*4882a593Smuzhiyun 		if (ib_wr->send_flags & IB_SEND_FENCE)
2123*4882a593Smuzhiyun 			info.read_fence = true;
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 		switch (ib_wr->opcode) {
2126*4882a593Smuzhiyun 		case IB_WR_SEND:
2127*4882a593Smuzhiyun 		case IB_WR_SEND_WITH_INV:
2128*4882a593Smuzhiyun 			if (ib_wr->opcode == IB_WR_SEND) {
2129*4882a593Smuzhiyun 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
2130*4882a593Smuzhiyun 					info.op_type = I40IW_OP_TYPE_SEND_SOL;
2131*4882a593Smuzhiyun 				else
2132*4882a593Smuzhiyun 					info.op_type = I40IW_OP_TYPE_SEND;
2133*4882a593Smuzhiyun 			} else {
2134*4882a593Smuzhiyun 				if (ib_wr->send_flags & IB_SEND_SOLICITED)
2135*4882a593Smuzhiyun 					info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
2136*4882a593Smuzhiyun 				else
2137*4882a593Smuzhiyun 					info.op_type = I40IW_OP_TYPE_SEND_INV;
2138*4882a593Smuzhiyun 			}
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun 			if (ib_wr->send_flags & IB_SEND_INLINE) {
2141*4882a593Smuzhiyun 				info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2142*4882a593Smuzhiyun 				info.op.inline_send.len = ib_wr->sg_list[0].length;
2143*4882a593Smuzhiyun 				ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2144*4882a593Smuzhiyun 			} else {
2145*4882a593Smuzhiyun 				info.op.send.num_sges = ib_wr->num_sge;
2146*4882a593Smuzhiyun 				info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
2147*4882a593Smuzhiyun 				ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2148*4882a593Smuzhiyun 			}
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun 			if (ret) {
2151*4882a593Smuzhiyun 				if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2152*4882a593Smuzhiyun 					err = -ENOMEM;
2153*4882a593Smuzhiyun 				else
2154*4882a593Smuzhiyun 					err = -EINVAL;
2155*4882a593Smuzhiyun 			}
2156*4882a593Smuzhiyun 			break;
2157*4882a593Smuzhiyun 		case IB_WR_RDMA_WRITE:
2158*4882a593Smuzhiyun 			info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 			if (ib_wr->send_flags & IB_SEND_INLINE) {
2161*4882a593Smuzhiyun 				info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2162*4882a593Smuzhiyun 				info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
2163*4882a593Smuzhiyun 				info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2164*4882a593Smuzhiyun 				info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2165*4882a593Smuzhiyun 				ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
2166*4882a593Smuzhiyun 			} else {
2167*4882a593Smuzhiyun 				info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2168*4882a593Smuzhiyun 				info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2169*4882a593Smuzhiyun 				info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2170*4882a593Smuzhiyun 				info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2171*4882a593Smuzhiyun 				ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
2172*4882a593Smuzhiyun 			}
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 			if (ret) {
2175*4882a593Smuzhiyun 				if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2176*4882a593Smuzhiyun 					err = -ENOMEM;
2177*4882a593Smuzhiyun 				else
2178*4882a593Smuzhiyun 					err = -EINVAL;
2179*4882a593Smuzhiyun 			}
2180*4882a593Smuzhiyun 			break;
2181*4882a593Smuzhiyun 		case IB_WR_RDMA_READ_WITH_INV:
2182*4882a593Smuzhiyun 			inv_stag = true;
2183*4882a593Smuzhiyun 			fallthrough;
2184*4882a593Smuzhiyun 		case IB_WR_RDMA_READ:
2185*4882a593Smuzhiyun 			if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
2186*4882a593Smuzhiyun 				err = -EINVAL;
2187*4882a593Smuzhiyun 				break;
2188*4882a593Smuzhiyun 			}
2189*4882a593Smuzhiyun 			info.op_type = I40IW_OP_TYPE_RDMA_READ;
2190*4882a593Smuzhiyun 			info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2191*4882a593Smuzhiyun 			info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2192*4882a593Smuzhiyun 			info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
2193*4882a593Smuzhiyun 			info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
2194*4882a593Smuzhiyun 			info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
2195*4882a593Smuzhiyun 			ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
2196*4882a593Smuzhiyun 			if (ret) {
2197*4882a593Smuzhiyun 				if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2198*4882a593Smuzhiyun 					err = -ENOMEM;
2199*4882a593Smuzhiyun 				else
2200*4882a593Smuzhiyun 					err = -EINVAL;
2201*4882a593Smuzhiyun 			}
2202*4882a593Smuzhiyun 			break;
2203*4882a593Smuzhiyun 		case IB_WR_LOCAL_INV:
2204*4882a593Smuzhiyun 			info.op_type = I40IW_OP_TYPE_INV_STAG;
2205*4882a593Smuzhiyun 			info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2206*4882a593Smuzhiyun 			ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
2207*4882a593Smuzhiyun 			if (ret)
2208*4882a593Smuzhiyun 				err = -ENOMEM;
2209*4882a593Smuzhiyun 			break;
2210*4882a593Smuzhiyun 		case IB_WR_REG_MR:
2211*4882a593Smuzhiyun 		{
2212*4882a593Smuzhiyun 			struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
2213*4882a593Smuzhiyun 			int flags = reg_wr(ib_wr)->access;
2214*4882a593Smuzhiyun 			struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2215*4882a593Smuzhiyun 			struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2216*4882a593Smuzhiyun 			struct i40iw_fast_reg_stag_info info;
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 			memset(&info, 0, sizeof(info));
2219*4882a593Smuzhiyun 			info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
2220*4882a593Smuzhiyun 			info.access_rights |= i40iw_get_user_access(flags);
2221*4882a593Smuzhiyun 			info.stag_key = reg_wr(ib_wr)->key & 0xff;
2222*4882a593Smuzhiyun 			info.stag_idx = reg_wr(ib_wr)->key >> 8;
2223*4882a593Smuzhiyun 			info.page_size = reg_wr(ib_wr)->mr->page_size;
2224*4882a593Smuzhiyun 			info.wr_id = ib_wr->wr_id;
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 			info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
2227*4882a593Smuzhiyun 			info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2228*4882a593Smuzhiyun 			info.total_len = iwmr->ibmr.length;
2229*4882a593Smuzhiyun 			info.reg_addr_pa = *(u64 *)palloc->level1.addr;
2230*4882a593Smuzhiyun 			info.first_pm_pbl_index = palloc->level1.idx;
2231*4882a593Smuzhiyun 			info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2232*4882a593Smuzhiyun 			info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
2233*4882a593Smuzhiyun 
2234*4882a593Smuzhiyun 			if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
2235*4882a593Smuzhiyun 				info.chunk_size = 1;
2236*4882a593Smuzhiyun 
2237*4882a593Smuzhiyun 			ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
2238*4882a593Smuzhiyun 			if (ret)
2239*4882a593Smuzhiyun 				err = -ENOMEM;
2240*4882a593Smuzhiyun 			break;
2241*4882a593Smuzhiyun 		}
2242*4882a593Smuzhiyun 		default:
2243*4882a593Smuzhiyun 			err = -EINVAL;
2244*4882a593Smuzhiyun 			i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
2245*4882a593Smuzhiyun 				     ib_wr->opcode);
2246*4882a593Smuzhiyun 			break;
2247*4882a593Smuzhiyun 		}
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 		if (err)
2250*4882a593Smuzhiyun 			break;
2251*4882a593Smuzhiyun 		ib_wr = ib_wr->next;
2252*4882a593Smuzhiyun 	}
2253*4882a593Smuzhiyun 
2254*4882a593Smuzhiyun out:
2255*4882a593Smuzhiyun 	if (err)
2256*4882a593Smuzhiyun 		*bad_wr = ib_wr;
2257*4882a593Smuzhiyun 	else
2258*4882a593Smuzhiyun 		ukqp->ops.iw_qp_post_wr(ukqp);
2259*4882a593Smuzhiyun 	spin_unlock_irqrestore(&iwqp->lock, flags);
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun 	return err;
2262*4882a593Smuzhiyun }
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun /**
2265*4882a593Smuzhiyun  * i40iw_post_recv - post receive wr for kernel application
2266*4882a593Smuzhiyun  * @ibqp: ib qp pointer
2267*4882a593Smuzhiyun  * @ib_wr: work request for receive
2268*4882a593Smuzhiyun  * @bad_wr: bad wr caused an error
2269*4882a593Smuzhiyun  */
i40iw_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * ib_wr,const struct ib_recv_wr ** bad_wr)2270*4882a593Smuzhiyun static int i40iw_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr,
2271*4882a593Smuzhiyun 			   const struct ib_recv_wr **bad_wr)
2272*4882a593Smuzhiyun {
2273*4882a593Smuzhiyun 	struct i40iw_qp *iwqp;
2274*4882a593Smuzhiyun 	struct i40iw_qp_uk *ukqp;
2275*4882a593Smuzhiyun 	struct i40iw_post_rq_info post_recv;
2276*4882a593Smuzhiyun 	struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
2277*4882a593Smuzhiyun 	enum i40iw_status_code ret = 0;
2278*4882a593Smuzhiyun 	unsigned long flags;
2279*4882a593Smuzhiyun 	int err = 0;
2280*4882a593Smuzhiyun 
2281*4882a593Smuzhiyun 	iwqp = (struct i40iw_qp *)ibqp;
2282*4882a593Smuzhiyun 	ukqp = &iwqp->sc_qp.qp_uk;
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	memset(&post_recv, 0, sizeof(post_recv));
2285*4882a593Smuzhiyun 	spin_lock_irqsave(&iwqp->lock, flags);
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	if (iwqp->flush_issued) {
2288*4882a593Smuzhiyun 		err = -EINVAL;
2289*4882a593Smuzhiyun 		goto out;
2290*4882a593Smuzhiyun 	}
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 	while (ib_wr) {
2293*4882a593Smuzhiyun 		post_recv.num_sges = ib_wr->num_sge;
2294*4882a593Smuzhiyun 		post_recv.wr_id = ib_wr->wr_id;
2295*4882a593Smuzhiyun 		i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2296*4882a593Smuzhiyun 		post_recv.sg_list = sg_list;
2297*4882a593Smuzhiyun 		ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
2298*4882a593Smuzhiyun 		if (ret) {
2299*4882a593Smuzhiyun 			i40iw_pr_err(" post_recv err %d\n", ret);
2300*4882a593Smuzhiyun 			if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2301*4882a593Smuzhiyun 				err = -ENOMEM;
2302*4882a593Smuzhiyun 			else
2303*4882a593Smuzhiyun 				err = -EINVAL;
2304*4882a593Smuzhiyun 			*bad_wr = ib_wr;
2305*4882a593Smuzhiyun 			goto out;
2306*4882a593Smuzhiyun 		}
2307*4882a593Smuzhiyun 		ib_wr = ib_wr->next;
2308*4882a593Smuzhiyun 	}
2309*4882a593Smuzhiyun  out:
2310*4882a593Smuzhiyun 	spin_unlock_irqrestore(&iwqp->lock, flags);
2311*4882a593Smuzhiyun 	return err;
2312*4882a593Smuzhiyun }
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun /**
2315*4882a593Smuzhiyun  * i40iw_poll_cq - poll cq for completion (kernel apps)
2316*4882a593Smuzhiyun  * @ibcq: cq to poll
2317*4882a593Smuzhiyun  * @num_entries: number of entries to poll
2318*4882a593Smuzhiyun  * @entry: wr of entry completed
2319*4882a593Smuzhiyun  */
i40iw_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * entry)2320*4882a593Smuzhiyun static int i40iw_poll_cq(struct ib_cq *ibcq,
2321*4882a593Smuzhiyun 			 int num_entries,
2322*4882a593Smuzhiyun 			 struct ib_wc *entry)
2323*4882a593Smuzhiyun {
2324*4882a593Smuzhiyun 	struct i40iw_cq *iwcq;
2325*4882a593Smuzhiyun 	int cqe_count = 0;
2326*4882a593Smuzhiyun 	struct i40iw_cq_poll_info cq_poll_info;
2327*4882a593Smuzhiyun 	enum i40iw_status_code ret;
2328*4882a593Smuzhiyun 	struct i40iw_cq_uk *ukcq;
2329*4882a593Smuzhiyun 	struct i40iw_sc_qp *qp;
2330*4882a593Smuzhiyun 	struct i40iw_qp *iwqp;
2331*4882a593Smuzhiyun 	unsigned long flags;
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	iwcq = (struct i40iw_cq *)ibcq;
2334*4882a593Smuzhiyun 	ukcq = &iwcq->sc_cq.cq_uk;
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	spin_lock_irqsave(&iwcq->lock, flags);
2337*4882a593Smuzhiyun 	while (cqe_count < num_entries) {
2338*4882a593Smuzhiyun 		ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
2339*4882a593Smuzhiyun 		if (ret == I40IW_ERR_QUEUE_EMPTY) {
2340*4882a593Smuzhiyun 			break;
2341*4882a593Smuzhiyun 		} else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
2342*4882a593Smuzhiyun 			continue;
2343*4882a593Smuzhiyun 		} else if (ret) {
2344*4882a593Smuzhiyun 			if (!cqe_count)
2345*4882a593Smuzhiyun 				cqe_count = -1;
2346*4882a593Smuzhiyun 			break;
2347*4882a593Smuzhiyun 		}
2348*4882a593Smuzhiyun 		entry->wc_flags = 0;
2349*4882a593Smuzhiyun 		entry->wr_id = cq_poll_info.wr_id;
2350*4882a593Smuzhiyun 		if (cq_poll_info.error) {
2351*4882a593Smuzhiyun 			entry->status = IB_WC_WR_FLUSH_ERR;
2352*4882a593Smuzhiyun 			entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
2353*4882a593Smuzhiyun 		} else {
2354*4882a593Smuzhiyun 			entry->status = IB_WC_SUCCESS;
2355*4882a593Smuzhiyun 		}
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 		switch (cq_poll_info.op_type) {
2358*4882a593Smuzhiyun 		case I40IW_OP_TYPE_RDMA_WRITE:
2359*4882a593Smuzhiyun 			entry->opcode = IB_WC_RDMA_WRITE;
2360*4882a593Smuzhiyun 			break;
2361*4882a593Smuzhiyun 		case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
2362*4882a593Smuzhiyun 		case I40IW_OP_TYPE_RDMA_READ:
2363*4882a593Smuzhiyun 			entry->opcode = IB_WC_RDMA_READ;
2364*4882a593Smuzhiyun 			break;
2365*4882a593Smuzhiyun 		case I40IW_OP_TYPE_SEND_SOL:
2366*4882a593Smuzhiyun 		case I40IW_OP_TYPE_SEND_SOL_INV:
2367*4882a593Smuzhiyun 		case I40IW_OP_TYPE_SEND_INV:
2368*4882a593Smuzhiyun 		case I40IW_OP_TYPE_SEND:
2369*4882a593Smuzhiyun 			entry->opcode = IB_WC_SEND;
2370*4882a593Smuzhiyun 			break;
2371*4882a593Smuzhiyun 		case I40IW_OP_TYPE_REC:
2372*4882a593Smuzhiyun 			entry->opcode = IB_WC_RECV;
2373*4882a593Smuzhiyun 			break;
2374*4882a593Smuzhiyun 		default:
2375*4882a593Smuzhiyun 			entry->opcode = IB_WC_RECV;
2376*4882a593Smuzhiyun 			break;
2377*4882a593Smuzhiyun 		}
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 		entry->ex.imm_data = 0;
2380*4882a593Smuzhiyun 		qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
2381*4882a593Smuzhiyun 		entry->qp = (struct ib_qp *)qp->back_qp;
2382*4882a593Smuzhiyun 		entry->src_qp = cq_poll_info.qp_id;
2383*4882a593Smuzhiyun 		iwqp = (struct i40iw_qp *)qp->back_qp;
2384*4882a593Smuzhiyun 		if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
2385*4882a593Smuzhiyun 			if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
2386*4882a593Smuzhiyun 				complete(&iwqp->sq_drained);
2387*4882a593Smuzhiyun 			if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
2388*4882a593Smuzhiyun 				complete(&iwqp->rq_drained);
2389*4882a593Smuzhiyun 		}
2390*4882a593Smuzhiyun 		entry->byte_len = cq_poll_info.bytes_xfered;
2391*4882a593Smuzhiyun 		entry++;
2392*4882a593Smuzhiyun 		cqe_count++;
2393*4882a593Smuzhiyun 	}
2394*4882a593Smuzhiyun 	spin_unlock_irqrestore(&iwcq->lock, flags);
2395*4882a593Smuzhiyun 	return cqe_count;
2396*4882a593Smuzhiyun }
2397*4882a593Smuzhiyun 
2398*4882a593Smuzhiyun /**
2399*4882a593Smuzhiyun  * i40iw_req_notify_cq - arm cq kernel application
2400*4882a593Smuzhiyun  * @ibcq: cq to arm
2401*4882a593Smuzhiyun  * @notify_flags: notofication flags
2402*4882a593Smuzhiyun  */
i40iw_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags notify_flags)2403*4882a593Smuzhiyun static int i40iw_req_notify_cq(struct ib_cq *ibcq,
2404*4882a593Smuzhiyun 			       enum ib_cq_notify_flags notify_flags)
2405*4882a593Smuzhiyun {
2406*4882a593Smuzhiyun 	struct i40iw_cq *iwcq;
2407*4882a593Smuzhiyun 	struct i40iw_cq_uk *ukcq;
2408*4882a593Smuzhiyun 	unsigned long flags;
2409*4882a593Smuzhiyun 	enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 	iwcq = (struct i40iw_cq *)ibcq;
2412*4882a593Smuzhiyun 	ukcq = &iwcq->sc_cq.cq_uk;
2413*4882a593Smuzhiyun 	if (notify_flags == IB_CQ_SOLICITED)
2414*4882a593Smuzhiyun 		cq_notify = IW_CQ_COMPL_SOLICITED;
2415*4882a593Smuzhiyun 	spin_lock_irqsave(&iwcq->lock, flags);
2416*4882a593Smuzhiyun 	ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
2417*4882a593Smuzhiyun 	spin_unlock_irqrestore(&iwcq->lock, flags);
2418*4882a593Smuzhiyun 	return 0;
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun /**
2422*4882a593Smuzhiyun  * i40iw_port_immutable - return port's immutable data
2423*4882a593Smuzhiyun  * @ibdev: ib dev struct
2424*4882a593Smuzhiyun  * @port_num: port number
2425*4882a593Smuzhiyun  * @immutable: immutable data for the port return
2426*4882a593Smuzhiyun  */
i40iw_port_immutable(struct ib_device * ibdev,u8 port_num,struct ib_port_immutable * immutable)2427*4882a593Smuzhiyun static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2428*4882a593Smuzhiyun 				struct ib_port_immutable *immutable)
2429*4882a593Smuzhiyun {
2430*4882a593Smuzhiyun 	struct ib_port_attr attr;
2431*4882a593Smuzhiyun 	int err;
2432*4882a593Smuzhiyun 
2433*4882a593Smuzhiyun 	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 	err = ib_query_port(ibdev, port_num, &attr);
2436*4882a593Smuzhiyun 
2437*4882a593Smuzhiyun 	if (err)
2438*4882a593Smuzhiyun 		return err;
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun 	immutable->gid_tbl_len = attr.gid_tbl_len;
2441*4882a593Smuzhiyun 
2442*4882a593Smuzhiyun 	return 0;
2443*4882a593Smuzhiyun }
2444*4882a593Smuzhiyun 
2445*4882a593Smuzhiyun static const char * const i40iw_hw_stat_names[] = {
2446*4882a593Smuzhiyun 	// 32bit names
2447*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2448*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2449*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2450*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2451*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2452*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2453*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
2454*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
2455*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
2456*4882a593Smuzhiyun 	// 64bit names
2457*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2458*4882a593Smuzhiyun 		"ip4InOctets",
2459*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2460*4882a593Smuzhiyun 		"ip4InPkts",
2461*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2462*4882a593Smuzhiyun 		"ip4InReasmRqd",
2463*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2464*4882a593Smuzhiyun 		"ip4InMcastPkts",
2465*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2466*4882a593Smuzhiyun 		"ip4OutOctets",
2467*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2468*4882a593Smuzhiyun 		"ip4OutPkts",
2469*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2470*4882a593Smuzhiyun 		"ip4OutSegRqd",
2471*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2472*4882a593Smuzhiyun 		"ip4OutMcastPkts",
2473*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2474*4882a593Smuzhiyun 		"ip6InOctets",
2475*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2476*4882a593Smuzhiyun 		"ip6InPkts",
2477*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2478*4882a593Smuzhiyun 		"ip6InReasmRqd",
2479*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2480*4882a593Smuzhiyun 		"ip6InMcastPkts",
2481*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2482*4882a593Smuzhiyun 		"ip6OutOctets",
2483*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2484*4882a593Smuzhiyun 		"ip6OutPkts",
2485*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2486*4882a593Smuzhiyun 		"ip6OutSegRqd",
2487*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2488*4882a593Smuzhiyun 		"ip6OutMcastPkts",
2489*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
2490*4882a593Smuzhiyun 		"tcpInSegs",
2491*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
2492*4882a593Smuzhiyun 		"tcpOutSegs",
2493*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2494*4882a593Smuzhiyun 		"iwInRdmaReads",
2495*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2496*4882a593Smuzhiyun 		"iwInRdmaSends",
2497*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2498*4882a593Smuzhiyun 		"iwInRdmaWrites",
2499*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2500*4882a593Smuzhiyun 		"iwOutRdmaReads",
2501*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2502*4882a593Smuzhiyun 		"iwOutRdmaSends",
2503*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2504*4882a593Smuzhiyun 		"iwOutRdmaWrites",
2505*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
2506*4882a593Smuzhiyun 		"iwRdmaBnd",
2507*4882a593Smuzhiyun 	[I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
2508*4882a593Smuzhiyun 		"iwRdmaInv"
2509*4882a593Smuzhiyun };
2510*4882a593Smuzhiyun 
i40iw_get_dev_fw_str(struct ib_device * dev,char * str)2511*4882a593Smuzhiyun static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
2512*4882a593Smuzhiyun {
2513*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(dev);
2514*4882a593Smuzhiyun 
2515*4882a593Smuzhiyun 	snprintf(str, IB_FW_VERSION_NAME_MAX, "%llu.%llu",
2516*4882a593Smuzhiyun 		 i40iw_fw_major_ver(&iwdev->sc_dev),
2517*4882a593Smuzhiyun 		 i40iw_fw_minor_ver(&iwdev->sc_dev));
2518*4882a593Smuzhiyun }
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun /**
2521*4882a593Smuzhiyun  * i40iw_alloc_hw_stats - Allocate a hw stats structure
2522*4882a593Smuzhiyun  * @ibdev: device pointer from stack
2523*4882a593Smuzhiyun  * @port_num: port number
2524*4882a593Smuzhiyun  */
i40iw_alloc_hw_stats(struct ib_device * ibdev,u8 port_num)2525*4882a593Smuzhiyun static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
2526*4882a593Smuzhiyun 						  u8 port_num)
2527*4882a593Smuzhiyun {
2528*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(ibdev);
2529*4882a593Smuzhiyun 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2530*4882a593Smuzhiyun 	int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
2531*4882a593Smuzhiyun 		I40IW_HW_STAT_INDEX_MAX_64;
2532*4882a593Smuzhiyun 	unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2533*4882a593Smuzhiyun 
2534*4882a593Smuzhiyun 	BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
2535*4882a593Smuzhiyun 		     (I40IW_HW_STAT_INDEX_MAX_32 +
2536*4882a593Smuzhiyun 		      I40IW_HW_STAT_INDEX_MAX_64));
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	/*
2539*4882a593Smuzhiyun 	 * PFs get the default update lifespan, but VFs only update once
2540*4882a593Smuzhiyun 	 * per second
2541*4882a593Smuzhiyun 	 */
2542*4882a593Smuzhiyun 	if (!dev->is_pf)
2543*4882a593Smuzhiyun 		lifespan = 1000;
2544*4882a593Smuzhiyun 	return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
2545*4882a593Smuzhiyun 					  lifespan);
2546*4882a593Smuzhiyun }
2547*4882a593Smuzhiyun 
2548*4882a593Smuzhiyun /**
2549*4882a593Smuzhiyun  * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
2550*4882a593Smuzhiyun  * @ibdev: device pointer from stack
2551*4882a593Smuzhiyun  * @stats: stats pointer from stack
2552*4882a593Smuzhiyun  * @port_num: port number
2553*4882a593Smuzhiyun  * @index: which hw counter the stack is requesting we update
2554*4882a593Smuzhiyun  */
i40iw_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u8 port_num,int index)2555*4882a593Smuzhiyun static int i40iw_get_hw_stats(struct ib_device *ibdev,
2556*4882a593Smuzhiyun 			      struct rdma_hw_stats *stats,
2557*4882a593Smuzhiyun 			      u8 port_num, int index)
2558*4882a593Smuzhiyun {
2559*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(ibdev);
2560*4882a593Smuzhiyun 	struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2561*4882a593Smuzhiyun 	struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
2562*4882a593Smuzhiyun 	struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 	if (dev->is_pf) {
2565*4882a593Smuzhiyun 		i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
2566*4882a593Smuzhiyun 	} else {
2567*4882a593Smuzhiyun 		if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
2568*4882a593Smuzhiyun 			return -ENOSYS;
2569*4882a593Smuzhiyun 	}
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun 	memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
2572*4882a593Smuzhiyun 
2573*4882a593Smuzhiyun 	return stats->num_counters;
2574*4882a593Smuzhiyun }
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun /**
2577*4882a593Smuzhiyun  * i40iw_query_gid - Query port GID
2578*4882a593Smuzhiyun  * @ibdev: device pointer from stack
2579*4882a593Smuzhiyun  * @port: port number
2580*4882a593Smuzhiyun  * @index: Entry index
2581*4882a593Smuzhiyun  * @gid: Global ID
2582*4882a593Smuzhiyun  */
i40iw_query_gid(struct ib_device * ibdev,u8 port,int index,union ib_gid * gid)2583*4882a593Smuzhiyun static int i40iw_query_gid(struct ib_device *ibdev,
2584*4882a593Smuzhiyun 			   u8 port,
2585*4882a593Smuzhiyun 			   int index,
2586*4882a593Smuzhiyun 			   union ib_gid *gid)
2587*4882a593Smuzhiyun {
2588*4882a593Smuzhiyun 	struct i40iw_device *iwdev = to_iwdev(ibdev);
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 	memset(gid->raw, 0, sizeof(gid->raw));
2591*4882a593Smuzhiyun 	ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
2592*4882a593Smuzhiyun 	return 0;
2593*4882a593Smuzhiyun }
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun static const struct ib_device_ops i40iw_dev_ops = {
2596*4882a593Smuzhiyun 	.owner = THIS_MODULE,
2597*4882a593Smuzhiyun 	.driver_id = RDMA_DRIVER_I40IW,
2598*4882a593Smuzhiyun 	/* NOTE: Older kernels wrongly use 0 for the uverbs_abi_ver */
2599*4882a593Smuzhiyun 	.uverbs_abi_ver = I40IW_ABI_VER,
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun 	.alloc_hw_stats = i40iw_alloc_hw_stats,
2602*4882a593Smuzhiyun 	.alloc_mr = i40iw_alloc_mr,
2603*4882a593Smuzhiyun 	.alloc_pd = i40iw_alloc_pd,
2604*4882a593Smuzhiyun 	.alloc_ucontext = i40iw_alloc_ucontext,
2605*4882a593Smuzhiyun 	.create_cq = i40iw_create_cq,
2606*4882a593Smuzhiyun 	.create_qp = i40iw_create_qp,
2607*4882a593Smuzhiyun 	.dealloc_pd = i40iw_dealloc_pd,
2608*4882a593Smuzhiyun 	.dealloc_ucontext = i40iw_dealloc_ucontext,
2609*4882a593Smuzhiyun 	.dereg_mr = i40iw_dereg_mr,
2610*4882a593Smuzhiyun 	.destroy_cq = i40iw_destroy_cq,
2611*4882a593Smuzhiyun 	.destroy_qp = i40iw_destroy_qp,
2612*4882a593Smuzhiyun 	.drain_rq = i40iw_drain_rq,
2613*4882a593Smuzhiyun 	.drain_sq = i40iw_drain_sq,
2614*4882a593Smuzhiyun 	.get_dev_fw_str = i40iw_get_dev_fw_str,
2615*4882a593Smuzhiyun 	.get_dma_mr = i40iw_get_dma_mr,
2616*4882a593Smuzhiyun 	.get_hw_stats = i40iw_get_hw_stats,
2617*4882a593Smuzhiyun 	.get_port_immutable = i40iw_port_immutable,
2618*4882a593Smuzhiyun 	.iw_accept = i40iw_accept,
2619*4882a593Smuzhiyun 	.iw_add_ref = i40iw_qp_add_ref,
2620*4882a593Smuzhiyun 	.iw_connect = i40iw_connect,
2621*4882a593Smuzhiyun 	.iw_create_listen = i40iw_create_listen,
2622*4882a593Smuzhiyun 	.iw_destroy_listen = i40iw_destroy_listen,
2623*4882a593Smuzhiyun 	.iw_get_qp = i40iw_get_qp,
2624*4882a593Smuzhiyun 	.iw_reject = i40iw_reject,
2625*4882a593Smuzhiyun 	.iw_rem_ref = i40iw_qp_rem_ref,
2626*4882a593Smuzhiyun 	.map_mr_sg = i40iw_map_mr_sg,
2627*4882a593Smuzhiyun 	.mmap = i40iw_mmap,
2628*4882a593Smuzhiyun 	.modify_qp = i40iw_modify_qp,
2629*4882a593Smuzhiyun 	.poll_cq = i40iw_poll_cq,
2630*4882a593Smuzhiyun 	.post_recv = i40iw_post_recv,
2631*4882a593Smuzhiyun 	.post_send = i40iw_post_send,
2632*4882a593Smuzhiyun 	.query_device = i40iw_query_device,
2633*4882a593Smuzhiyun 	.query_gid = i40iw_query_gid,
2634*4882a593Smuzhiyun 	.query_port = i40iw_query_port,
2635*4882a593Smuzhiyun 	.query_qp = i40iw_query_qp,
2636*4882a593Smuzhiyun 	.reg_user_mr = i40iw_reg_user_mr,
2637*4882a593Smuzhiyun 	.req_notify_cq = i40iw_req_notify_cq,
2638*4882a593Smuzhiyun 	INIT_RDMA_OBJ_SIZE(ib_pd, i40iw_pd, ibpd),
2639*4882a593Smuzhiyun 	INIT_RDMA_OBJ_SIZE(ib_cq, i40iw_cq, ibcq),
2640*4882a593Smuzhiyun 	INIT_RDMA_OBJ_SIZE(ib_ucontext, i40iw_ucontext, ibucontext),
2641*4882a593Smuzhiyun };
2642*4882a593Smuzhiyun 
2643*4882a593Smuzhiyun /**
2644*4882a593Smuzhiyun  * i40iw_init_rdma_device - initialization of iwarp device
2645*4882a593Smuzhiyun  * @iwdev: iwarp device
2646*4882a593Smuzhiyun  */
i40iw_init_rdma_device(struct i40iw_device * iwdev)2647*4882a593Smuzhiyun static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
2648*4882a593Smuzhiyun {
2649*4882a593Smuzhiyun 	struct i40iw_ib_device *iwibdev;
2650*4882a593Smuzhiyun 	struct net_device *netdev = iwdev->netdev;
2651*4882a593Smuzhiyun 	struct pci_dev *pcidev = iwdev->hw.pcidev;
2652*4882a593Smuzhiyun 
2653*4882a593Smuzhiyun 	iwibdev = ib_alloc_device(i40iw_ib_device, ibdev);
2654*4882a593Smuzhiyun 	if (!iwibdev) {
2655*4882a593Smuzhiyun 		i40iw_pr_err("iwdev == NULL\n");
2656*4882a593Smuzhiyun 		return NULL;
2657*4882a593Smuzhiyun 	}
2658*4882a593Smuzhiyun 	iwdev->iwibdev = iwibdev;
2659*4882a593Smuzhiyun 	iwibdev->iwdev = iwdev;
2660*4882a593Smuzhiyun 
2661*4882a593Smuzhiyun 	iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
2662*4882a593Smuzhiyun 	ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
2663*4882a593Smuzhiyun 
2664*4882a593Smuzhiyun 	iwibdev->ibdev.uverbs_cmd_mask =
2665*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2666*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2667*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2668*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2669*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2670*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
2671*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2672*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2673*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2674*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2675*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2676*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2677*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2678*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2679*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2680*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2681*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2682*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2683*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2684*4882a593Smuzhiyun 	    (1ull << IB_USER_VERBS_CMD_POST_SEND);
2685*4882a593Smuzhiyun 	iwibdev->ibdev.phys_port_cnt = 1;
2686*4882a593Smuzhiyun 	iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
2687*4882a593Smuzhiyun 	iwibdev->ibdev.dev.parent = &pcidev->dev;
2688*4882a593Smuzhiyun 	memcpy(iwibdev->ibdev.iw_ifname, netdev->name,
2689*4882a593Smuzhiyun 	       sizeof(iwibdev->ibdev.iw_ifname));
2690*4882a593Smuzhiyun 	ib_set_device_ops(&iwibdev->ibdev, &i40iw_dev_ops);
2691*4882a593Smuzhiyun 
2692*4882a593Smuzhiyun 	return iwibdev;
2693*4882a593Smuzhiyun }
2694*4882a593Smuzhiyun 
2695*4882a593Smuzhiyun /**
2696*4882a593Smuzhiyun  * i40iw_port_ibevent - indicate port event
2697*4882a593Smuzhiyun  * @iwdev: iwarp device
2698*4882a593Smuzhiyun  */
i40iw_port_ibevent(struct i40iw_device * iwdev)2699*4882a593Smuzhiyun void i40iw_port_ibevent(struct i40iw_device *iwdev)
2700*4882a593Smuzhiyun {
2701*4882a593Smuzhiyun 	struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
2702*4882a593Smuzhiyun 	struct ib_event event;
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun 	event.device = &iwibdev->ibdev;
2705*4882a593Smuzhiyun 	event.element.port_num = 1;
2706*4882a593Smuzhiyun 	event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2707*4882a593Smuzhiyun 	ib_dispatch_event(&event);
2708*4882a593Smuzhiyun }
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun /**
2711*4882a593Smuzhiyun  * i40iw_destroy_rdma_device - destroy rdma device and free resources
2712*4882a593Smuzhiyun  * @iwibdev: IB device ptr
2713*4882a593Smuzhiyun  */
i40iw_destroy_rdma_device(struct i40iw_ib_device * iwibdev)2714*4882a593Smuzhiyun void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
2715*4882a593Smuzhiyun {
2716*4882a593Smuzhiyun 	ib_unregister_device(&iwibdev->ibdev);
2717*4882a593Smuzhiyun 	wait_event_timeout(iwibdev->iwdev->close_wq,
2718*4882a593Smuzhiyun 			   !atomic64_read(&iwibdev->iwdev->use_count),
2719*4882a593Smuzhiyun 			   I40IW_EVENT_TIMEOUT);
2720*4882a593Smuzhiyun 	ib_dealloc_device(&iwibdev->ibdev);
2721*4882a593Smuzhiyun }
2722*4882a593Smuzhiyun 
2723*4882a593Smuzhiyun /**
2724*4882a593Smuzhiyun  * i40iw_register_rdma_device - register iwarp device to IB
2725*4882a593Smuzhiyun  * @iwdev: iwarp device
2726*4882a593Smuzhiyun  */
i40iw_register_rdma_device(struct i40iw_device * iwdev)2727*4882a593Smuzhiyun int i40iw_register_rdma_device(struct i40iw_device *iwdev)
2728*4882a593Smuzhiyun {
2729*4882a593Smuzhiyun 	int ret;
2730*4882a593Smuzhiyun 	struct i40iw_ib_device *iwibdev;
2731*4882a593Smuzhiyun 
2732*4882a593Smuzhiyun 	iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
2733*4882a593Smuzhiyun 	if (!iwdev->iwibdev)
2734*4882a593Smuzhiyun 		return -ENOMEM;
2735*4882a593Smuzhiyun 	iwibdev = iwdev->iwibdev;
2736*4882a593Smuzhiyun 	rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
2737*4882a593Smuzhiyun 	ret = ib_device_set_netdev(&iwibdev->ibdev, iwdev->netdev, 1);
2738*4882a593Smuzhiyun 	if (ret)
2739*4882a593Smuzhiyun 		goto error;
2740*4882a593Smuzhiyun 
2741*4882a593Smuzhiyun 	dma_set_max_seg_size(&iwdev->hw.pcidev->dev, UINT_MAX);
2742*4882a593Smuzhiyun 	ret = ib_register_device(&iwibdev->ibdev, "i40iw%d", &iwdev->hw.pcidev->dev);
2743*4882a593Smuzhiyun 	if (ret)
2744*4882a593Smuzhiyun 		goto error;
2745*4882a593Smuzhiyun 
2746*4882a593Smuzhiyun 	return 0;
2747*4882a593Smuzhiyun error:
2748*4882a593Smuzhiyun 	ib_dealloc_device(&iwdev->iwibdev->ibdev);
2749*4882a593Smuzhiyun 	return ret;
2750*4882a593Smuzhiyun }
2751