xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/hns/hns_roce_qp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2016 Hisilicon Limited.
3*4882a593Smuzhiyun  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun  * OpenIB.org BSD license below:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
13*4882a593Smuzhiyun  *     conditions are met:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
16*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun  *        disclaimer.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun  *        provided with the distribution.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun  * SOFTWARE.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include <linux/pci.h>
35*4882a593Smuzhiyun #include <linux/platform_device.h>
36*4882a593Smuzhiyun #include <rdma/ib_addr.h>
37*4882a593Smuzhiyun #include <rdma/ib_umem.h>
38*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
39*4882a593Smuzhiyun #include "hns_roce_common.h"
40*4882a593Smuzhiyun #include "hns_roce_device.h"
41*4882a593Smuzhiyun #include "hns_roce_hem.h"
42*4882a593Smuzhiyun #include <rdma/hns-abi.h>
43*4882a593Smuzhiyun 
flush_work_handle(struct work_struct * work)44*4882a593Smuzhiyun static void flush_work_handle(struct work_struct *work)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	struct hns_roce_work *flush_work = container_of(work,
47*4882a593Smuzhiyun 					struct hns_roce_work, work);
48*4882a593Smuzhiyun 	struct hns_roce_qp *hr_qp = container_of(flush_work,
49*4882a593Smuzhiyun 					struct hns_roce_qp, flush_work);
50*4882a593Smuzhiyun 	struct device *dev = flush_work->hr_dev->dev;
51*4882a593Smuzhiyun 	struct ib_qp_attr attr;
52*4882a593Smuzhiyun 	int attr_mask;
53*4882a593Smuzhiyun 	int ret;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	attr_mask = IB_QP_STATE;
56*4882a593Smuzhiyun 	attr.qp_state = IB_QPS_ERR;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
59*4882a593Smuzhiyun 		ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
60*4882a593Smuzhiyun 		if (ret)
61*4882a593Smuzhiyun 			dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
62*4882a593Smuzhiyun 				ret);
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	/*
66*4882a593Smuzhiyun 	 * make sure we signal QP destroy leg that flush QP was completed
67*4882a593Smuzhiyun 	 * so that it can safely proceed ahead now and destroy QP
68*4882a593Smuzhiyun 	 */
69*4882a593Smuzhiyun 	if (atomic_dec_and_test(&hr_qp->refcount))
70*4882a593Smuzhiyun 		complete(&hr_qp->free);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
init_flush_work(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)73*4882a593Smuzhiyun void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct hns_roce_work *flush_work = &hr_qp->flush_work;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	flush_work->hr_dev = hr_dev;
78*4882a593Smuzhiyun 	INIT_WORK(&flush_work->work, flush_work_handle);
79*4882a593Smuzhiyun 	atomic_inc(&hr_qp->refcount);
80*4882a593Smuzhiyun 	queue_work(hr_dev->irq_workq, &flush_work->work);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
hns_roce_qp_event(struct hns_roce_dev * hr_dev,u32 qpn,int event_type)83*4882a593Smuzhiyun void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	struct device *dev = hr_dev->dev;
86*4882a593Smuzhiyun 	struct hns_roce_qp *qp;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	xa_lock(&hr_dev->qp_table_xa);
89*4882a593Smuzhiyun 	qp = __hns_roce_qp_lookup(hr_dev, qpn);
90*4882a593Smuzhiyun 	if (qp)
91*4882a593Smuzhiyun 		atomic_inc(&qp->refcount);
92*4882a593Smuzhiyun 	xa_unlock(&hr_dev->qp_table_xa);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (!qp) {
95*4882a593Smuzhiyun 		dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
96*4882a593Smuzhiyun 		return;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
100*4882a593Smuzhiyun 	    (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
101*4882a593Smuzhiyun 	     event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
102*4882a593Smuzhiyun 	     event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
103*4882a593Smuzhiyun 		qp->state = IB_QPS_ERR;
104*4882a593Smuzhiyun 		if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
105*4882a593Smuzhiyun 			init_flush_work(hr_dev, qp);
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	qp->event(qp, (enum hns_roce_event)event_type);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (atomic_dec_and_test(&qp->refcount))
111*4882a593Smuzhiyun 		complete(&qp->free);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
hns_roce_ib_qp_event(struct hns_roce_qp * hr_qp,enum hns_roce_event type)114*4882a593Smuzhiyun static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
115*4882a593Smuzhiyun 				 enum hns_roce_event type)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct ib_event event;
118*4882a593Smuzhiyun 	struct ib_qp *ibqp = &hr_qp->ibqp;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (ibqp->event_handler) {
121*4882a593Smuzhiyun 		event.device = ibqp->device;
122*4882a593Smuzhiyun 		event.element.qp = ibqp;
123*4882a593Smuzhiyun 		switch (type) {
124*4882a593Smuzhiyun 		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
125*4882a593Smuzhiyun 			event.event = IB_EVENT_PATH_MIG;
126*4882a593Smuzhiyun 			break;
127*4882a593Smuzhiyun 		case HNS_ROCE_EVENT_TYPE_COMM_EST:
128*4882a593Smuzhiyun 			event.event = IB_EVENT_COMM_EST;
129*4882a593Smuzhiyun 			break;
130*4882a593Smuzhiyun 		case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
131*4882a593Smuzhiyun 			event.event = IB_EVENT_SQ_DRAINED;
132*4882a593Smuzhiyun 			break;
133*4882a593Smuzhiyun 		case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
134*4882a593Smuzhiyun 			event.event = IB_EVENT_QP_LAST_WQE_REACHED;
135*4882a593Smuzhiyun 			break;
136*4882a593Smuzhiyun 		case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
137*4882a593Smuzhiyun 			event.event = IB_EVENT_QP_FATAL;
138*4882a593Smuzhiyun 			break;
139*4882a593Smuzhiyun 		case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
140*4882a593Smuzhiyun 			event.event = IB_EVENT_PATH_MIG_ERR;
141*4882a593Smuzhiyun 			break;
142*4882a593Smuzhiyun 		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
143*4882a593Smuzhiyun 			event.event = IB_EVENT_QP_REQ_ERR;
144*4882a593Smuzhiyun 			break;
145*4882a593Smuzhiyun 		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
146*4882a593Smuzhiyun 			event.event = IB_EVENT_QP_ACCESS_ERR;
147*4882a593Smuzhiyun 			break;
148*4882a593Smuzhiyun 		default:
149*4882a593Smuzhiyun 			dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n",
150*4882a593Smuzhiyun 				type, hr_qp->qpn);
151*4882a593Smuzhiyun 			return;
152*4882a593Smuzhiyun 		}
153*4882a593Smuzhiyun 		ibqp->event_handler(&event, ibqp->qp_context);
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
alloc_qpn(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)157*4882a593Smuzhiyun static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	unsigned long num = 0;
160*4882a593Smuzhiyun 	int ret;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
163*4882a593Smuzhiyun 		/* when hw version is v1, the sqpn is allocated */
164*4882a593Smuzhiyun 		if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
165*4882a593Smuzhiyun 			num = HNS_ROCE_MAX_PORTS +
166*4882a593Smuzhiyun 			      hr_dev->iboe.phy_port[hr_qp->port];
167*4882a593Smuzhiyun 		else
168*4882a593Smuzhiyun 			num = 1;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 		hr_qp->doorbell_qpn = 1;
171*4882a593Smuzhiyun 	} else {
172*4882a593Smuzhiyun 		ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap,
173*4882a593Smuzhiyun 						  1, 1, &num);
174*4882a593Smuzhiyun 		if (ret) {
175*4882a593Smuzhiyun 			ibdev_err(&hr_dev->ib_dev, "Failed to alloc bitmap\n");
176*4882a593Smuzhiyun 			return -ENOMEM;
177*4882a593Smuzhiyun 		}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		hr_qp->doorbell_qpn = (u32)num;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	hr_qp->qpn = num;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	return 0;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
to_hns_roce_state(enum ib_qp_state state)187*4882a593Smuzhiyun enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	switch (state) {
190*4882a593Smuzhiyun 	case IB_QPS_RESET:
191*4882a593Smuzhiyun 		return HNS_ROCE_QP_STATE_RST;
192*4882a593Smuzhiyun 	case IB_QPS_INIT:
193*4882a593Smuzhiyun 		return HNS_ROCE_QP_STATE_INIT;
194*4882a593Smuzhiyun 	case IB_QPS_RTR:
195*4882a593Smuzhiyun 		return HNS_ROCE_QP_STATE_RTR;
196*4882a593Smuzhiyun 	case IB_QPS_RTS:
197*4882a593Smuzhiyun 		return HNS_ROCE_QP_STATE_RTS;
198*4882a593Smuzhiyun 	case IB_QPS_SQD:
199*4882a593Smuzhiyun 		return HNS_ROCE_QP_STATE_SQD;
200*4882a593Smuzhiyun 	case IB_QPS_ERR:
201*4882a593Smuzhiyun 		return HNS_ROCE_QP_STATE_ERR;
202*4882a593Smuzhiyun 	default:
203*4882a593Smuzhiyun 		return HNS_ROCE_QP_NUM_STATE;
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
add_qp_to_list(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_cq * send_cq,struct ib_cq * recv_cq)207*4882a593Smuzhiyun static void add_qp_to_list(struct hns_roce_dev *hr_dev,
208*4882a593Smuzhiyun 			   struct hns_roce_qp *hr_qp,
209*4882a593Smuzhiyun 			   struct ib_cq *send_cq, struct ib_cq *recv_cq)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	struct hns_roce_cq *hr_send_cq, *hr_recv_cq;
212*4882a593Smuzhiyun 	unsigned long flags;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL;
215*4882a593Smuzhiyun 	hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
218*4882a593Smuzhiyun 	hns_roce_lock_cqs(hr_send_cq, hr_recv_cq);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	list_add_tail(&hr_qp->node, &hr_dev->qp_list);
221*4882a593Smuzhiyun 	if (hr_send_cq)
222*4882a593Smuzhiyun 		list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list);
223*4882a593Smuzhiyun 	if (hr_recv_cq)
224*4882a593Smuzhiyun 		list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq);
227*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
hns_roce_qp_store(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr)230*4882a593Smuzhiyun static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
231*4882a593Smuzhiyun 			     struct hns_roce_qp *hr_qp,
232*4882a593Smuzhiyun 			     struct ib_qp_init_attr *init_attr)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	struct xarray *xa = &hr_dev->qp_table_xa;
235*4882a593Smuzhiyun 	int ret;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if (!hr_qp->qpn)
238*4882a593Smuzhiyun 		return -EINVAL;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
241*4882a593Smuzhiyun 	if (ret)
242*4882a593Smuzhiyun 		dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
243*4882a593Smuzhiyun 	else
244*4882a593Smuzhiyun 		/* add QP to device's QP list for softwc */
245*4882a593Smuzhiyun 		add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
246*4882a593Smuzhiyun 			       init_attr->recv_cq);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	return ret;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
alloc_qpc(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)251*4882a593Smuzhiyun static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
254*4882a593Smuzhiyun 	struct device *dev = hr_dev->dev;
255*4882a593Smuzhiyun 	int ret;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (!hr_qp->qpn)
258*4882a593Smuzhiyun 		return -EINVAL;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	/* In v1 engine, GSI QP context is saved in the RoCE hw's register */
261*4882a593Smuzhiyun 	if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
262*4882a593Smuzhiyun 	    hr_dev->hw_rev == HNS_ROCE_HW_VER1)
263*4882a593Smuzhiyun 		return 0;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	/* Alloc memory for QPC */
266*4882a593Smuzhiyun 	ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
267*4882a593Smuzhiyun 	if (ret) {
268*4882a593Smuzhiyun 		dev_err(dev, "Failed to get QPC table\n");
269*4882a593Smuzhiyun 		goto err_out;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	/* Alloc memory for IRRL */
273*4882a593Smuzhiyun 	ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
274*4882a593Smuzhiyun 	if (ret) {
275*4882a593Smuzhiyun 		dev_err(dev, "Failed to get IRRL table\n");
276*4882a593Smuzhiyun 		goto err_put_qp;
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	if (hr_dev->caps.trrl_entry_sz) {
280*4882a593Smuzhiyun 		/* Alloc memory for TRRL */
281*4882a593Smuzhiyun 		ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
282*4882a593Smuzhiyun 					 hr_qp->qpn);
283*4882a593Smuzhiyun 		if (ret) {
284*4882a593Smuzhiyun 			dev_err(dev, "Failed to get TRRL table\n");
285*4882a593Smuzhiyun 			goto err_put_irrl;
286*4882a593Smuzhiyun 		}
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
290*4882a593Smuzhiyun 		/* Alloc memory for SCC CTX */
291*4882a593Smuzhiyun 		ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
292*4882a593Smuzhiyun 					 hr_qp->qpn);
293*4882a593Smuzhiyun 		if (ret) {
294*4882a593Smuzhiyun 			dev_err(dev, "Failed to get SCC CTX table\n");
295*4882a593Smuzhiyun 			goto err_put_trrl;
296*4882a593Smuzhiyun 		}
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	return 0;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun err_put_trrl:
302*4882a593Smuzhiyun 	if (hr_dev->caps.trrl_entry_sz)
303*4882a593Smuzhiyun 		hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun err_put_irrl:
306*4882a593Smuzhiyun 	hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun err_put_qp:
309*4882a593Smuzhiyun 	hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun err_out:
312*4882a593Smuzhiyun 	return ret;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
hns_roce_qp_remove(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)315*4882a593Smuzhiyun void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	struct xarray *xa = &hr_dev->qp_table_xa;
318*4882a593Smuzhiyun 	unsigned long flags;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	list_del(&hr_qp->node);
321*4882a593Smuzhiyun 	list_del(&hr_qp->sq_node);
322*4882a593Smuzhiyun 	list_del(&hr_qp->rq_node);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	xa_lock_irqsave(xa, flags);
325*4882a593Smuzhiyun 	__xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
326*4882a593Smuzhiyun 	xa_unlock_irqrestore(xa, flags);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
free_qpc(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)329*4882a593Smuzhiyun static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	/* In v1 engine, GSI QP context is saved in the RoCE hw's register */
334*4882a593Smuzhiyun 	if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
335*4882a593Smuzhiyun 	    hr_dev->hw_rev == HNS_ROCE_HW_VER1)
336*4882a593Smuzhiyun 		return;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (hr_dev->caps.trrl_entry_sz)
339*4882a593Smuzhiyun 		hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
340*4882a593Smuzhiyun 	hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
free_qpn(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)343*4882a593Smuzhiyun static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
348*4882a593Smuzhiyun 		return;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	if (hr_qp->qpn < hr_dev->caps.reserved_qps)
351*4882a593Smuzhiyun 		return;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
set_rq_size(struct hns_roce_dev * hr_dev,struct ib_qp_cap * cap,struct hns_roce_qp * hr_qp,int has_rq)356*4882a593Smuzhiyun static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
357*4882a593Smuzhiyun 		       struct hns_roce_qp *hr_qp, int has_rq)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	u32 cnt;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* If srq exist, set zero for relative number of rq */
362*4882a593Smuzhiyun 	if (!has_rq) {
363*4882a593Smuzhiyun 		hr_qp->rq.wqe_cnt = 0;
364*4882a593Smuzhiyun 		hr_qp->rq.max_gs = 0;
365*4882a593Smuzhiyun 		hr_qp->rq_inl_buf.wqe_cnt = 0;
366*4882a593Smuzhiyun 		cap->max_recv_wr = 0;
367*4882a593Smuzhiyun 		cap->max_recv_sge = 0;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		return 0;
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	/* Check the validity of QP support capacity */
373*4882a593Smuzhiyun 	if (!cap->max_recv_wr || cap->max_recv_wr > hr_dev->caps.max_wqes ||
374*4882a593Smuzhiyun 	    cap->max_recv_sge > hr_dev->caps.max_rq_sg) {
375*4882a593Smuzhiyun 		ibdev_err(&hr_dev->ib_dev, "RQ config error, depth=%u, sge=%d\n",
376*4882a593Smuzhiyun 			  cap->max_recv_wr, cap->max_recv_sge);
377*4882a593Smuzhiyun 		return -EINVAL;
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	cnt = roundup_pow_of_two(max(cap->max_recv_wr, hr_dev->caps.min_wqes));
381*4882a593Smuzhiyun 	if (cnt > hr_dev->caps.max_wqes) {
382*4882a593Smuzhiyun 		ibdev_err(&hr_dev->ib_dev, "rq depth %u too large\n",
383*4882a593Smuzhiyun 			  cap->max_recv_wr);
384*4882a593Smuzhiyun 		return -EINVAL;
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
390*4882a593Smuzhiyun 				    hr_qp->rq.max_gs);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	hr_qp->rq.wqe_cnt = cnt;
393*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
394*4882a593Smuzhiyun 		hr_qp->rq_inl_buf.wqe_cnt = cnt;
395*4882a593Smuzhiyun 	else
396*4882a593Smuzhiyun 		hr_qp->rq_inl_buf.wqe_cnt = 0;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	cap->max_recv_wr = cnt;
399*4882a593Smuzhiyun 	cap->max_recv_sge = hr_qp->rq.max_gs;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	return 0;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
set_extend_sge_param(struct hns_roce_dev * hr_dev,u32 sq_wqe_cnt,struct hns_roce_qp * hr_qp,struct ib_qp_cap * cap)404*4882a593Smuzhiyun static int set_extend_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
405*4882a593Smuzhiyun 				struct hns_roce_qp *hr_qp,
406*4882a593Smuzhiyun 				struct ib_qp_cap *cap)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	u32 cnt;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	cnt = max(1U, cap->max_send_sge);
411*4882a593Smuzhiyun 	if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
412*4882a593Smuzhiyun 		hr_qp->sq.max_gs = roundup_pow_of_two(cnt);
413*4882a593Smuzhiyun 		hr_qp->sge.sge_cnt = 0;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		return 0;
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	hr_qp->sq.max_gs = cnt;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/* UD sqwqe's sge use extend sge */
421*4882a593Smuzhiyun 	if (hr_qp->ibqp.qp_type == IB_QPT_GSI ||
422*4882a593Smuzhiyun 	    hr_qp->ibqp.qp_type == IB_QPT_UD) {
423*4882a593Smuzhiyun 		cnt = roundup_pow_of_two(sq_wqe_cnt * hr_qp->sq.max_gs);
424*4882a593Smuzhiyun 	} else if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE) {
425*4882a593Smuzhiyun 		cnt = roundup_pow_of_two(sq_wqe_cnt *
426*4882a593Smuzhiyun 				     (hr_qp->sq.max_gs - HNS_ROCE_SGE_IN_WQE));
427*4882a593Smuzhiyun 	} else {
428*4882a593Smuzhiyun 		cnt = 0;
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	/* If the number of extended sge is not zero, they MUST use the
434*4882a593Smuzhiyun 	 * space of HNS_HW_PAGE_SIZE at least.
435*4882a593Smuzhiyun 	 */
436*4882a593Smuzhiyun 	hr_qp->sge.sge_cnt = cnt ?
437*4882a593Smuzhiyun 			max(cnt, (u32)HNS_HW_PAGE_SIZE / HNS_ROCE_SGE_SIZE) : 0;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return 0;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
check_sq_size_with_integrity(struct hns_roce_dev * hr_dev,struct ib_qp_cap * cap,struct hns_roce_ib_create_qp * ucmd)442*4882a593Smuzhiyun static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
443*4882a593Smuzhiyun 					struct ib_qp_cap *cap,
444*4882a593Smuzhiyun 					struct hns_roce_ib_create_qp *ucmd)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
447*4882a593Smuzhiyun 	u8 max_sq_stride = ilog2(roundup_sq_stride);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/* Sanity check SQ size before proceeding */
450*4882a593Smuzhiyun 	if (ucmd->log_sq_stride > max_sq_stride ||
451*4882a593Smuzhiyun 	    ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
452*4882a593Smuzhiyun 		ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n");
453*4882a593Smuzhiyun 		return -EINVAL;
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
457*4882a593Smuzhiyun 		ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n",
458*4882a593Smuzhiyun 			  cap->max_send_sge);
459*4882a593Smuzhiyun 		return -EINVAL;
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	return 0;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
set_user_sq_size(struct hns_roce_dev * hr_dev,struct ib_qp_cap * cap,struct hns_roce_qp * hr_qp,struct hns_roce_ib_create_qp * ucmd)465*4882a593Smuzhiyun static int set_user_sq_size(struct hns_roce_dev *hr_dev,
466*4882a593Smuzhiyun 			    struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp,
467*4882a593Smuzhiyun 			    struct hns_roce_ib_create_qp *ucmd)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	struct ib_device *ibdev = &hr_dev->ib_dev;
470*4882a593Smuzhiyun 	u32 cnt = 0;
471*4882a593Smuzhiyun 	int ret;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (check_shl_overflow(1, ucmd->log_sq_bb_count, &cnt) ||
474*4882a593Smuzhiyun 	    cnt > hr_dev->caps.max_wqes)
475*4882a593Smuzhiyun 		return -EINVAL;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
478*4882a593Smuzhiyun 	if (ret) {
479*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to check user SQ size, ret = %d.\n",
480*4882a593Smuzhiyun 			  ret);
481*4882a593Smuzhiyun 		return ret;
482*4882a593Smuzhiyun 	}
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
485*4882a593Smuzhiyun 	if (ret)
486*4882a593Smuzhiyun 		return ret;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
489*4882a593Smuzhiyun 	hr_qp->sq.wqe_cnt = cnt;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	return 0;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
set_wqe_buf_attr(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct hns_roce_buf_attr * buf_attr)494*4882a593Smuzhiyun static int set_wqe_buf_attr(struct hns_roce_dev *hr_dev,
495*4882a593Smuzhiyun 			    struct hns_roce_qp *hr_qp,
496*4882a593Smuzhiyun 			    struct hns_roce_buf_attr *buf_attr)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	int buf_size;
499*4882a593Smuzhiyun 	int idx = 0;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	hr_qp->buff_size = 0;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* SQ WQE */
504*4882a593Smuzhiyun 	hr_qp->sq.offset = 0;
505*4882a593Smuzhiyun 	buf_size = to_hr_hem_entries_size(hr_qp->sq.wqe_cnt,
506*4882a593Smuzhiyun 					  hr_qp->sq.wqe_shift);
507*4882a593Smuzhiyun 	if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
508*4882a593Smuzhiyun 		buf_attr->region[idx].size = buf_size;
509*4882a593Smuzhiyun 		buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sq_hop_num;
510*4882a593Smuzhiyun 		idx++;
511*4882a593Smuzhiyun 		hr_qp->buff_size += buf_size;
512*4882a593Smuzhiyun 	}
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	/* extend SGE WQE in SQ */
515*4882a593Smuzhiyun 	hr_qp->sge.offset = hr_qp->buff_size;
516*4882a593Smuzhiyun 	buf_size = to_hr_hem_entries_size(hr_qp->sge.sge_cnt,
517*4882a593Smuzhiyun 					  hr_qp->sge.sge_shift);
518*4882a593Smuzhiyun 	if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
519*4882a593Smuzhiyun 		buf_attr->region[idx].size = buf_size;
520*4882a593Smuzhiyun 		buf_attr->region[idx].hopnum = hr_dev->caps.wqe_sge_hop_num;
521*4882a593Smuzhiyun 		idx++;
522*4882a593Smuzhiyun 		hr_qp->buff_size += buf_size;
523*4882a593Smuzhiyun 	}
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	/* RQ WQE */
526*4882a593Smuzhiyun 	hr_qp->rq.offset = hr_qp->buff_size;
527*4882a593Smuzhiyun 	buf_size = to_hr_hem_entries_size(hr_qp->rq.wqe_cnt,
528*4882a593Smuzhiyun 					  hr_qp->rq.wqe_shift);
529*4882a593Smuzhiyun 	if (buf_size > 0 && idx < ARRAY_SIZE(buf_attr->region)) {
530*4882a593Smuzhiyun 		buf_attr->region[idx].size = buf_size;
531*4882a593Smuzhiyun 		buf_attr->region[idx].hopnum = hr_dev->caps.wqe_rq_hop_num;
532*4882a593Smuzhiyun 		idx++;
533*4882a593Smuzhiyun 		hr_qp->buff_size += buf_size;
534*4882a593Smuzhiyun 	}
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	if (hr_qp->buff_size < 1)
537*4882a593Smuzhiyun 		return -EINVAL;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	buf_attr->page_shift = HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
540*4882a593Smuzhiyun 	buf_attr->fixed_page = true;
541*4882a593Smuzhiyun 	buf_attr->region_count = idx;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	return 0;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
set_kernel_sq_size(struct hns_roce_dev * hr_dev,struct ib_qp_cap * cap,struct hns_roce_qp * hr_qp)546*4882a593Smuzhiyun static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
547*4882a593Smuzhiyun 			      struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	struct ib_device *ibdev = &hr_dev->ib_dev;
550*4882a593Smuzhiyun 	u32 cnt;
551*4882a593Smuzhiyun 	int ret;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	if (!cap->max_send_wr || cap->max_send_wr > hr_dev->caps.max_wqes ||
554*4882a593Smuzhiyun 	    cap->max_send_sge > hr_dev->caps.max_sq_sg) {
555*4882a593Smuzhiyun 		ibdev_err(ibdev,
556*4882a593Smuzhiyun 			  "failed to check SQ WR or SGE num, ret = %d.\n",
557*4882a593Smuzhiyun 			  -EINVAL);
558*4882a593Smuzhiyun 		return -EINVAL;
559*4882a593Smuzhiyun 	}
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
562*4882a593Smuzhiyun 	if (cnt > hr_dev->caps.max_wqes) {
563*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n",
564*4882a593Smuzhiyun 			  cnt);
565*4882a593Smuzhiyun 		return -EINVAL;
566*4882a593Smuzhiyun 	}
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz);
569*4882a593Smuzhiyun 	hr_qp->sq.wqe_cnt = cnt;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	ret = set_extend_sge_param(hr_dev, cnt, hr_qp, cap);
572*4882a593Smuzhiyun 	if (ret)
573*4882a593Smuzhiyun 		return ret;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	/* sync the parameters of kernel QP to user's configuration */
576*4882a593Smuzhiyun 	cap->max_send_wr = cnt;
577*4882a593Smuzhiyun 	cap->max_send_sge = hr_qp->sq.max_gs;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	return 0;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun 
hns_roce_qp_has_sq(struct ib_qp_init_attr * attr)582*4882a593Smuzhiyun static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
585*4882a593Smuzhiyun 		return 0;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	return 1;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
hns_roce_qp_has_rq(struct ib_qp_init_attr * attr)590*4882a593Smuzhiyun static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	if (attr->qp_type == IB_QPT_XRC_INI ||
593*4882a593Smuzhiyun 	    attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
594*4882a593Smuzhiyun 	    !attr->cap.max_recv_wr)
595*4882a593Smuzhiyun 		return 0;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	return 1;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun 
alloc_rq_inline_buf(struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr)600*4882a593Smuzhiyun static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
601*4882a593Smuzhiyun 			       struct ib_qp_init_attr *init_attr)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	u32 max_recv_sge = init_attr->cap.max_recv_sge;
604*4882a593Smuzhiyun 	u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
605*4882a593Smuzhiyun 	struct hns_roce_rinl_wqe *wqe_list;
606*4882a593Smuzhiyun 	int i;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/* allocate recv inline buf */
609*4882a593Smuzhiyun 	wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
610*4882a593Smuzhiyun 			   GFP_KERNEL);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	if (!wqe_list)
613*4882a593Smuzhiyun 		goto err;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	/* Allocate a continuous buffer for all inline sge we need */
616*4882a593Smuzhiyun 	wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
617*4882a593Smuzhiyun 				      sizeof(struct hns_roce_rinl_sge)),
618*4882a593Smuzhiyun 				      GFP_KERNEL);
619*4882a593Smuzhiyun 	if (!wqe_list[0].sg_list)
620*4882a593Smuzhiyun 		goto err_wqe_list;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	/* Assign buffers of sg_list to each inline wqe */
623*4882a593Smuzhiyun 	for (i = 1; i < wqe_cnt; i++)
624*4882a593Smuzhiyun 		wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	hr_qp->rq_inl_buf.wqe_list = wqe_list;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	return 0;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun err_wqe_list:
631*4882a593Smuzhiyun 	kfree(wqe_list);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun err:
634*4882a593Smuzhiyun 	return -ENOMEM;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun 
free_rq_inline_buf(struct hns_roce_qp * hr_qp)637*4882a593Smuzhiyun static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun 	if (hr_qp->rq_inl_buf.wqe_list)
640*4882a593Smuzhiyun 		kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
641*4882a593Smuzhiyun 	kfree(hr_qp->rq_inl_buf.wqe_list);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
alloc_qp_buf(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,unsigned long addr)644*4882a593Smuzhiyun static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
645*4882a593Smuzhiyun 			struct ib_qp_init_attr *init_attr,
646*4882a593Smuzhiyun 			struct ib_udata *udata, unsigned long addr)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	struct ib_device *ibdev = &hr_dev->ib_dev;
649*4882a593Smuzhiyun 	struct hns_roce_buf_attr buf_attr = {};
650*4882a593Smuzhiyun 	int ret;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
653*4882a593Smuzhiyun 		ret = alloc_rq_inline_buf(hr_qp, init_attr);
654*4882a593Smuzhiyun 		if (ret) {
655*4882a593Smuzhiyun 			ibdev_err(ibdev,
656*4882a593Smuzhiyun 				  "failed to alloc inline buf, ret = %d.\n",
657*4882a593Smuzhiyun 				  ret);
658*4882a593Smuzhiyun 			return ret;
659*4882a593Smuzhiyun 		}
660*4882a593Smuzhiyun 	} else {
661*4882a593Smuzhiyun 		hr_qp->rq_inl_buf.wqe_list = NULL;
662*4882a593Smuzhiyun 	}
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
665*4882a593Smuzhiyun 	if (ret) {
666*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
667*4882a593Smuzhiyun 		goto err_inline;
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun 	ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
670*4882a593Smuzhiyun 				  HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
671*4882a593Smuzhiyun 				  udata, addr);
672*4882a593Smuzhiyun 	if (ret) {
673*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
674*4882a593Smuzhiyun 		goto err_inline;
675*4882a593Smuzhiyun 	}
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	return 0;
678*4882a593Smuzhiyun err_inline:
679*4882a593Smuzhiyun 	free_rq_inline_buf(hr_qp);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	return ret;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun 
free_qp_buf(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)684*4882a593Smuzhiyun static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
687*4882a593Smuzhiyun 	free_rq_inline_buf(hr_qp);
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun 
user_qp_has_sdb(struct hns_roce_dev * hr_dev,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct hns_roce_ib_create_qp_resp * resp,struct hns_roce_ib_create_qp * ucmd)690*4882a593Smuzhiyun static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
691*4882a593Smuzhiyun 				   struct ib_qp_init_attr *init_attr,
692*4882a593Smuzhiyun 				   struct ib_udata *udata,
693*4882a593Smuzhiyun 				   struct hns_roce_ib_create_qp_resp *resp,
694*4882a593Smuzhiyun 				   struct hns_roce_ib_create_qp *ucmd)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
697*4882a593Smuzhiyun 		udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
698*4882a593Smuzhiyun 		hns_roce_qp_has_sq(init_attr) &&
699*4882a593Smuzhiyun 		udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun 
user_qp_has_rdb(struct hns_roce_dev * hr_dev,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct hns_roce_ib_create_qp_resp * resp)702*4882a593Smuzhiyun static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
703*4882a593Smuzhiyun 				   struct ib_qp_init_attr *init_attr,
704*4882a593Smuzhiyun 				   struct ib_udata *udata,
705*4882a593Smuzhiyun 				   struct hns_roce_ib_create_qp_resp *resp)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
708*4882a593Smuzhiyun 		udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
709*4882a593Smuzhiyun 		hns_roce_qp_has_rq(init_attr));
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
kernel_qp_has_rdb(struct hns_roce_dev * hr_dev,struct ib_qp_init_attr * init_attr)712*4882a593Smuzhiyun static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
713*4882a593Smuzhiyun 				     struct ib_qp_init_attr *init_attr)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
716*4882a593Smuzhiyun 		hns_roce_qp_has_rq(init_attr));
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun 
alloc_qp_db(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct hns_roce_ib_create_qp * ucmd,struct hns_roce_ib_create_qp_resp * resp)719*4882a593Smuzhiyun static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
720*4882a593Smuzhiyun 		       struct ib_qp_init_attr *init_attr,
721*4882a593Smuzhiyun 		       struct ib_udata *udata,
722*4882a593Smuzhiyun 		       struct hns_roce_ib_create_qp *ucmd,
723*4882a593Smuzhiyun 		       struct hns_roce_ib_create_qp_resp *resp)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun 	struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
726*4882a593Smuzhiyun 		udata, struct hns_roce_ucontext, ibucontext);
727*4882a593Smuzhiyun 	struct ib_device *ibdev = &hr_dev->ib_dev;
728*4882a593Smuzhiyun 	int ret;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	if (udata) {
731*4882a593Smuzhiyun 		if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
732*4882a593Smuzhiyun 			ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr,
733*4882a593Smuzhiyun 						   &hr_qp->sdb);
734*4882a593Smuzhiyun 			if (ret) {
735*4882a593Smuzhiyun 				ibdev_err(ibdev,
736*4882a593Smuzhiyun 					  "failed to map user SQ doorbell, ret = %d.\n",
737*4882a593Smuzhiyun 					  ret);
738*4882a593Smuzhiyun 				goto err_out;
739*4882a593Smuzhiyun 			}
740*4882a593Smuzhiyun 			hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
741*4882a593Smuzhiyun 		}
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
744*4882a593Smuzhiyun 			ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr,
745*4882a593Smuzhiyun 						   &hr_qp->rdb);
746*4882a593Smuzhiyun 			if (ret) {
747*4882a593Smuzhiyun 				ibdev_err(ibdev,
748*4882a593Smuzhiyun 					  "failed to map user RQ doorbell, ret = %d.\n",
749*4882a593Smuzhiyun 					  ret);
750*4882a593Smuzhiyun 				goto err_sdb;
751*4882a593Smuzhiyun 			}
752*4882a593Smuzhiyun 			hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
753*4882a593Smuzhiyun 		}
754*4882a593Smuzhiyun 	} else {
755*4882a593Smuzhiyun 		/* QP doorbell register address */
756*4882a593Smuzhiyun 		hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
757*4882a593Smuzhiyun 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
758*4882a593Smuzhiyun 		hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
759*4882a593Smuzhiyun 				     DB_REG_OFFSET * hr_dev->priv_uar.index;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 		if (kernel_qp_has_rdb(hr_dev, init_attr)) {
762*4882a593Smuzhiyun 			ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
763*4882a593Smuzhiyun 			if (ret) {
764*4882a593Smuzhiyun 				ibdev_err(ibdev,
765*4882a593Smuzhiyun 					  "failed to alloc kernel RQ doorbell, ret = %d.\n",
766*4882a593Smuzhiyun 					  ret);
767*4882a593Smuzhiyun 				goto err_out;
768*4882a593Smuzhiyun 			}
769*4882a593Smuzhiyun 			*hr_qp->rdb.db_record = 0;
770*4882a593Smuzhiyun 			hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
771*4882a593Smuzhiyun 		}
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	return 0;
775*4882a593Smuzhiyun err_sdb:
776*4882a593Smuzhiyun 	if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
777*4882a593Smuzhiyun 		hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
778*4882a593Smuzhiyun err_out:
779*4882a593Smuzhiyun 	return ret;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun 
free_qp_db(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_udata * udata)782*4882a593Smuzhiyun static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
783*4882a593Smuzhiyun 		       struct ib_udata *udata)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun 	struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(
786*4882a593Smuzhiyun 		udata, struct hns_roce_ucontext, ibucontext);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	if (udata) {
789*4882a593Smuzhiyun 		if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
790*4882a593Smuzhiyun 			hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
791*4882a593Smuzhiyun 		if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
792*4882a593Smuzhiyun 			hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
793*4882a593Smuzhiyun 	} else {
794*4882a593Smuzhiyun 		if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
795*4882a593Smuzhiyun 			hns_roce_free_db(hr_dev, &hr_qp->rdb);
796*4882a593Smuzhiyun 	}
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun 
alloc_kernel_wrid(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp)799*4882a593Smuzhiyun static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
800*4882a593Smuzhiyun 			     struct hns_roce_qp *hr_qp)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun 	struct ib_device *ibdev = &hr_dev->ib_dev;
803*4882a593Smuzhiyun 	u64 *sq_wrid = NULL;
804*4882a593Smuzhiyun 	u64 *rq_wrid = NULL;
805*4882a593Smuzhiyun 	int ret;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
808*4882a593Smuzhiyun 	if (ZERO_OR_NULL_PTR(sq_wrid)) {
809*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
810*4882a593Smuzhiyun 		return -ENOMEM;
811*4882a593Smuzhiyun 	}
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	if (hr_qp->rq.wqe_cnt) {
814*4882a593Smuzhiyun 		rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
815*4882a593Smuzhiyun 		if (ZERO_OR_NULL_PTR(rq_wrid)) {
816*4882a593Smuzhiyun 			ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
817*4882a593Smuzhiyun 			ret = -ENOMEM;
818*4882a593Smuzhiyun 			goto err_sq;
819*4882a593Smuzhiyun 		}
820*4882a593Smuzhiyun 	}
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	hr_qp->sq.wrid = sq_wrid;
823*4882a593Smuzhiyun 	hr_qp->rq.wrid = rq_wrid;
824*4882a593Smuzhiyun 	return 0;
825*4882a593Smuzhiyun err_sq:
826*4882a593Smuzhiyun 	kfree(sq_wrid);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	return ret;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun 
free_kernel_wrid(struct hns_roce_qp * hr_qp)831*4882a593Smuzhiyun static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun 	kfree(hr_qp->rq.wrid);
834*4882a593Smuzhiyun 	kfree(hr_qp->sq.wrid);
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun 
set_qp_param(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct hns_roce_ib_create_qp * ucmd)837*4882a593Smuzhiyun static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
838*4882a593Smuzhiyun 			struct ib_qp_init_attr *init_attr,
839*4882a593Smuzhiyun 			struct ib_udata *udata,
840*4882a593Smuzhiyun 			struct hns_roce_ib_create_qp *ucmd)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun 	struct ib_device *ibdev = &hr_dev->ib_dev;
843*4882a593Smuzhiyun 	int ret;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	hr_qp->ibqp.qp_type = init_attr->qp_type;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	if (init_attr->cap.max_inline_data > hr_dev->caps.max_sq_inline)
848*4882a593Smuzhiyun 		init_attr->cap.max_inline_data = hr_dev->caps.max_sq_inline;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	hr_qp->max_inline_data = init_attr->cap.max_inline_data;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
853*4882a593Smuzhiyun 		hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
854*4882a593Smuzhiyun 	else
855*4882a593Smuzhiyun 		hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	ret = set_rq_size(hr_dev, &init_attr->cap, hr_qp,
858*4882a593Smuzhiyun 			  hns_roce_qp_has_rq(init_attr));
859*4882a593Smuzhiyun 	if (ret) {
860*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to set user RQ size, ret = %d.\n",
861*4882a593Smuzhiyun 			  ret);
862*4882a593Smuzhiyun 		return ret;
863*4882a593Smuzhiyun 	}
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	if (udata) {
866*4882a593Smuzhiyun 		ret = ib_copy_from_udata(ucmd, udata,
867*4882a593Smuzhiyun 					 min(udata->inlen, sizeof(*ucmd)));
868*4882a593Smuzhiyun 		if (ret) {
869*4882a593Smuzhiyun 			ibdev_err(ibdev,
870*4882a593Smuzhiyun 				  "failed to copy QP ucmd, ret = %d\n", ret);
871*4882a593Smuzhiyun 			return ret;
872*4882a593Smuzhiyun 		}
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 		ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
875*4882a593Smuzhiyun 		if (ret)
876*4882a593Smuzhiyun 			ibdev_err(ibdev,
877*4882a593Smuzhiyun 				  "failed to set user SQ size, ret = %d.\n",
878*4882a593Smuzhiyun 				  ret);
879*4882a593Smuzhiyun 	} else {
880*4882a593Smuzhiyun 		if (init_attr->create_flags &
881*4882a593Smuzhiyun 		    IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
882*4882a593Smuzhiyun 			ibdev_err(ibdev, "Failed to check multicast loopback\n");
883*4882a593Smuzhiyun 			return -EINVAL;
884*4882a593Smuzhiyun 		}
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 		if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) {
887*4882a593Smuzhiyun 			ibdev_err(ibdev, "Failed to check ipoib ud lso\n");
888*4882a593Smuzhiyun 			return -EINVAL;
889*4882a593Smuzhiyun 		}
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 		ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
892*4882a593Smuzhiyun 		if (ret)
893*4882a593Smuzhiyun 			ibdev_err(ibdev,
894*4882a593Smuzhiyun 				  "failed to set kernel SQ size, ret = %d.\n",
895*4882a593Smuzhiyun 				  ret);
896*4882a593Smuzhiyun 	}
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	return ret;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
hns_roce_create_qp_common(struct hns_roce_dev * hr_dev,struct ib_pd * ib_pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata,struct hns_roce_qp * hr_qp)901*4882a593Smuzhiyun static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
902*4882a593Smuzhiyun 				     struct ib_pd *ib_pd,
903*4882a593Smuzhiyun 				     struct ib_qp_init_attr *init_attr,
904*4882a593Smuzhiyun 				     struct ib_udata *udata,
905*4882a593Smuzhiyun 				     struct hns_roce_qp *hr_qp)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun 	struct hns_roce_ib_create_qp_resp resp = {};
908*4882a593Smuzhiyun 	struct ib_device *ibdev = &hr_dev->ib_dev;
909*4882a593Smuzhiyun 	struct hns_roce_ib_create_qp ucmd;
910*4882a593Smuzhiyun 	int ret;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	mutex_init(&hr_qp->mutex);
913*4882a593Smuzhiyun 	spin_lock_init(&hr_qp->sq.lock);
914*4882a593Smuzhiyun 	spin_lock_init(&hr_qp->rq.lock);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	hr_qp->state = IB_QPS_RESET;
917*4882a593Smuzhiyun 	hr_qp->flush_flag = 0;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
920*4882a593Smuzhiyun 	if (ret) {
921*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
922*4882a593Smuzhiyun 		return ret;
923*4882a593Smuzhiyun 	}
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	if (!udata) {
926*4882a593Smuzhiyun 		ret = alloc_kernel_wrid(hr_dev, hr_qp);
927*4882a593Smuzhiyun 		if (ret) {
928*4882a593Smuzhiyun 			ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
929*4882a593Smuzhiyun 				  ret);
930*4882a593Smuzhiyun 			return ret;
931*4882a593Smuzhiyun 		}
932*4882a593Smuzhiyun 	}
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
935*4882a593Smuzhiyun 	if (ret) {
936*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
937*4882a593Smuzhiyun 			  ret);
938*4882a593Smuzhiyun 		goto err_wrid;
939*4882a593Smuzhiyun 	}
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
942*4882a593Smuzhiyun 	if (ret) {
943*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
944*4882a593Smuzhiyun 		goto err_db;
945*4882a593Smuzhiyun 	}
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	ret = alloc_qpn(hr_dev, hr_qp);
948*4882a593Smuzhiyun 	if (ret) {
949*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
950*4882a593Smuzhiyun 		goto err_buf;
951*4882a593Smuzhiyun 	}
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	ret = alloc_qpc(hr_dev, hr_qp);
954*4882a593Smuzhiyun 	if (ret) {
955*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
956*4882a593Smuzhiyun 			  ret);
957*4882a593Smuzhiyun 		goto err_qpn;
958*4882a593Smuzhiyun 	}
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
961*4882a593Smuzhiyun 	if (ret) {
962*4882a593Smuzhiyun 		ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
963*4882a593Smuzhiyun 		goto err_qpc;
964*4882a593Smuzhiyun 	}
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	if (udata) {
967*4882a593Smuzhiyun 		resp.cap_flags = hr_qp->en_flags;
968*4882a593Smuzhiyun 		ret = ib_copy_to_udata(udata, &resp,
969*4882a593Smuzhiyun 				       min(udata->outlen, sizeof(resp)));
970*4882a593Smuzhiyun 		if (ret) {
971*4882a593Smuzhiyun 			ibdev_err(ibdev, "copy qp resp failed!\n");
972*4882a593Smuzhiyun 			goto err_store;
973*4882a593Smuzhiyun 		}
974*4882a593Smuzhiyun 	}
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
977*4882a593Smuzhiyun 		ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
978*4882a593Smuzhiyun 		if (ret)
979*4882a593Smuzhiyun 			goto err_store;
980*4882a593Smuzhiyun 	}
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	hr_qp->ibqp.qp_num = hr_qp->qpn;
983*4882a593Smuzhiyun 	hr_qp->event = hns_roce_ib_qp_event;
984*4882a593Smuzhiyun 	atomic_set(&hr_qp->refcount, 1);
985*4882a593Smuzhiyun 	init_completion(&hr_qp->free);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	return 0;
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun err_store:
990*4882a593Smuzhiyun 	hns_roce_qp_remove(hr_dev, hr_qp);
991*4882a593Smuzhiyun err_qpc:
992*4882a593Smuzhiyun 	free_qpc(hr_dev, hr_qp);
993*4882a593Smuzhiyun err_qpn:
994*4882a593Smuzhiyun 	free_qpn(hr_dev, hr_qp);
995*4882a593Smuzhiyun err_buf:
996*4882a593Smuzhiyun 	free_qp_buf(hr_dev, hr_qp);
997*4882a593Smuzhiyun err_db:
998*4882a593Smuzhiyun 	free_qp_db(hr_dev, hr_qp, udata);
999*4882a593Smuzhiyun err_wrid:
1000*4882a593Smuzhiyun 	free_kernel_wrid(hr_qp);
1001*4882a593Smuzhiyun 	return ret;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun 
hns_roce_qp_destroy(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_udata * udata)1004*4882a593Smuzhiyun void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1005*4882a593Smuzhiyun 			 struct ib_udata *udata)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun 	if (atomic_dec_and_test(&hr_qp->refcount))
1008*4882a593Smuzhiyun 		complete(&hr_qp->free);
1009*4882a593Smuzhiyun 	wait_for_completion(&hr_qp->free);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	free_qpc(hr_dev, hr_qp);
1012*4882a593Smuzhiyun 	free_qpn(hr_dev, hr_qp);
1013*4882a593Smuzhiyun 	free_qp_buf(hr_dev, hr_qp);
1014*4882a593Smuzhiyun 	free_kernel_wrid(hr_qp);
1015*4882a593Smuzhiyun 	free_qp_db(hr_dev, hr_qp, udata);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	kfree(hr_qp);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun 
hns_roce_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)1020*4882a593Smuzhiyun struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
1021*4882a593Smuzhiyun 				 struct ib_qp_init_attr *init_attr,
1022*4882a593Smuzhiyun 				 struct ib_udata *udata)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
1025*4882a593Smuzhiyun 	struct ib_device *ibdev = &hr_dev->ib_dev;
1026*4882a593Smuzhiyun 	struct hns_roce_qp *hr_qp;
1027*4882a593Smuzhiyun 	int ret;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	switch (init_attr->qp_type) {
1030*4882a593Smuzhiyun 	case IB_QPT_RC:
1031*4882a593Smuzhiyun 	case IB_QPT_GSI:
1032*4882a593Smuzhiyun 		break;
1033*4882a593Smuzhiyun 	default:
1034*4882a593Smuzhiyun 		ibdev_err(ibdev, "not support QP type %d\n",
1035*4882a593Smuzhiyun 			  init_attr->qp_type);
1036*4882a593Smuzhiyun 		return ERR_PTR(-EOPNOTSUPP);
1037*4882a593Smuzhiyun 	}
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	hr_qp = kzalloc(sizeof(*hr_qp), GFP_KERNEL);
1040*4882a593Smuzhiyun 	if (!hr_qp)
1041*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	if (init_attr->qp_type == IB_QPT_GSI) {
1044*4882a593Smuzhiyun 		hr_qp->port = init_attr->port_num - 1;
1045*4882a593Smuzhiyun 		hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
1046*4882a593Smuzhiyun 	}
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
1049*4882a593Smuzhiyun 	if (ret) {
1050*4882a593Smuzhiyun 		ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
1051*4882a593Smuzhiyun 			  init_attr->qp_type, ret);
1052*4882a593Smuzhiyun 		ibdev_err(ibdev, "Create GSI QP failed!\n");
1053*4882a593Smuzhiyun 		kfree(hr_qp);
1054*4882a593Smuzhiyun 		return ERR_PTR(ret);
1055*4882a593Smuzhiyun 	}
1056*4882a593Smuzhiyun 	return &hr_qp->ibqp;
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun 
to_hr_qp_type(int qp_type)1059*4882a593Smuzhiyun int to_hr_qp_type(int qp_type)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun 	int transport_type;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	if (qp_type == IB_QPT_RC)
1064*4882a593Smuzhiyun 		transport_type = SERV_TYPE_RC;
1065*4882a593Smuzhiyun 	else if (qp_type == IB_QPT_UC)
1066*4882a593Smuzhiyun 		transport_type = SERV_TYPE_UC;
1067*4882a593Smuzhiyun 	else if (qp_type == IB_QPT_UD)
1068*4882a593Smuzhiyun 		transport_type = SERV_TYPE_UD;
1069*4882a593Smuzhiyun 	else if (qp_type == IB_QPT_GSI)
1070*4882a593Smuzhiyun 		transport_type = SERV_TYPE_UD;
1071*4882a593Smuzhiyun 	else
1072*4882a593Smuzhiyun 		transport_type = -1;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	return transport_type;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun 
check_mtu_validate(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct ib_qp_attr * attr,int attr_mask)1077*4882a593Smuzhiyun static int check_mtu_validate(struct hns_roce_dev *hr_dev,
1078*4882a593Smuzhiyun 			      struct hns_roce_qp *hr_qp,
1079*4882a593Smuzhiyun 			      struct ib_qp_attr *attr, int attr_mask)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun 	enum ib_mtu active_mtu;
1082*4882a593Smuzhiyun 	int p;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1085*4882a593Smuzhiyun 	active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
1088*4882a593Smuzhiyun 	    attr->path_mtu > hr_dev->caps.max_mtu) ||
1089*4882a593Smuzhiyun 	    attr->path_mtu < IB_MTU_256 || attr->path_mtu > active_mtu) {
1090*4882a593Smuzhiyun 		ibdev_err(&hr_dev->ib_dev,
1091*4882a593Smuzhiyun 			"attr path_mtu(%d)invalid while modify qp",
1092*4882a593Smuzhiyun 			attr->path_mtu);
1093*4882a593Smuzhiyun 		return -EINVAL;
1094*4882a593Smuzhiyun 	}
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	return 0;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun 
hns_roce_check_qp_attr(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask)1099*4882a593Smuzhiyun static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1100*4882a593Smuzhiyun 				  int attr_mask)
1101*4882a593Smuzhiyun {
1102*4882a593Smuzhiyun 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1103*4882a593Smuzhiyun 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1104*4882a593Smuzhiyun 	int p;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	if ((attr_mask & IB_QP_PORT) &&
1107*4882a593Smuzhiyun 	    (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
1108*4882a593Smuzhiyun 		ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n",
1109*4882a593Smuzhiyun 			  attr->port_num);
1110*4882a593Smuzhiyun 		return -EINVAL;
1111*4882a593Smuzhiyun 	}
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	if (attr_mask & IB_QP_PKEY_INDEX) {
1114*4882a593Smuzhiyun 		p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
1115*4882a593Smuzhiyun 		if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
1116*4882a593Smuzhiyun 			ibdev_err(&hr_dev->ib_dev,
1117*4882a593Smuzhiyun 				  "invalid attr, pkey_index = %u.\n",
1118*4882a593Smuzhiyun 				  attr->pkey_index);
1119*4882a593Smuzhiyun 			return -EINVAL;
1120*4882a593Smuzhiyun 		}
1121*4882a593Smuzhiyun 	}
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1124*4882a593Smuzhiyun 	    attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
1125*4882a593Smuzhiyun 		ibdev_err(&hr_dev->ib_dev,
1126*4882a593Smuzhiyun 			  "invalid attr, max_rd_atomic = %u.\n",
1127*4882a593Smuzhiyun 			  attr->max_rd_atomic);
1128*4882a593Smuzhiyun 		return -EINVAL;
1129*4882a593Smuzhiyun 	}
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1132*4882a593Smuzhiyun 	    attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
1133*4882a593Smuzhiyun 		ibdev_err(&hr_dev->ib_dev,
1134*4882a593Smuzhiyun 			  "invalid attr, max_dest_rd_atomic = %u.\n",
1135*4882a593Smuzhiyun 			  attr->max_dest_rd_atomic);
1136*4882a593Smuzhiyun 		return -EINVAL;
1137*4882a593Smuzhiyun 	}
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	if (attr_mask & IB_QP_PATH_MTU)
1140*4882a593Smuzhiyun 		return check_mtu_validate(hr_dev, hr_qp, attr, attr_mask);
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	return 0;
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun 
hns_roce_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1145*4882a593Smuzhiyun int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1146*4882a593Smuzhiyun 		       int attr_mask, struct ib_udata *udata)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1149*4882a593Smuzhiyun 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1150*4882a593Smuzhiyun 	enum ib_qp_state cur_state, new_state;
1151*4882a593Smuzhiyun 	int ret = -EINVAL;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	mutex_lock(&hr_qp->mutex);
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	if (attr_mask & IB_QP_CUR_STATE && attr->cur_qp_state != hr_qp->state)
1156*4882a593Smuzhiyun 		goto out;
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 	cur_state = hr_qp->state;
1159*4882a593Smuzhiyun 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	if (ibqp->uobject &&
1162*4882a593Smuzhiyun 	    (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
1163*4882a593Smuzhiyun 		if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) {
1164*4882a593Smuzhiyun 			hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 			if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
1167*4882a593Smuzhiyun 				hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
1168*4882a593Smuzhiyun 		} else {
1169*4882a593Smuzhiyun 			ibdev_warn(&hr_dev->ib_dev,
1170*4882a593Smuzhiyun 				  "flush cqe is not supported in userspace!\n");
1171*4882a593Smuzhiyun 			goto out;
1172*4882a593Smuzhiyun 		}
1173*4882a593Smuzhiyun 	}
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1176*4882a593Smuzhiyun 				attr_mask)) {
1177*4882a593Smuzhiyun 		ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n");
1178*4882a593Smuzhiyun 		goto out;
1179*4882a593Smuzhiyun 	}
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	ret = hns_roce_check_qp_attr(ibqp, attr, attr_mask);
1182*4882a593Smuzhiyun 	if (ret)
1183*4882a593Smuzhiyun 		goto out;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1186*4882a593Smuzhiyun 		if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
1187*4882a593Smuzhiyun 			ret = -EPERM;
1188*4882a593Smuzhiyun 			ibdev_err(&hr_dev->ib_dev,
1189*4882a593Smuzhiyun 				  "RST2RST state is not supported\n");
1190*4882a593Smuzhiyun 		} else {
1191*4882a593Smuzhiyun 			ret = 0;
1192*4882a593Smuzhiyun 		}
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 		goto out;
1195*4882a593Smuzhiyun 	}
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
1198*4882a593Smuzhiyun 				    new_state);
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun out:
1201*4882a593Smuzhiyun 	mutex_unlock(&hr_qp->mutex);
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	return ret;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun 
hns_roce_lock_cqs(struct hns_roce_cq * send_cq,struct hns_roce_cq * recv_cq)1206*4882a593Smuzhiyun void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq)
1207*4882a593Smuzhiyun 		       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun 	if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1210*4882a593Smuzhiyun 		__acquire(&send_cq->lock);
1211*4882a593Smuzhiyun 		__acquire(&recv_cq->lock);
1212*4882a593Smuzhiyun 	} else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1213*4882a593Smuzhiyun 		spin_lock_irq(&send_cq->lock);
1214*4882a593Smuzhiyun 		__acquire(&recv_cq->lock);
1215*4882a593Smuzhiyun 	} else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1216*4882a593Smuzhiyun 		spin_lock_irq(&recv_cq->lock);
1217*4882a593Smuzhiyun 		__acquire(&send_cq->lock);
1218*4882a593Smuzhiyun 	} else if (send_cq == recv_cq) {
1219*4882a593Smuzhiyun 		spin_lock_irq(&send_cq->lock);
1220*4882a593Smuzhiyun 		__acquire(&recv_cq->lock);
1221*4882a593Smuzhiyun 	} else if (send_cq->cqn < recv_cq->cqn) {
1222*4882a593Smuzhiyun 		spin_lock_irq(&send_cq->lock);
1223*4882a593Smuzhiyun 		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1224*4882a593Smuzhiyun 	} else {
1225*4882a593Smuzhiyun 		spin_lock_irq(&recv_cq->lock);
1226*4882a593Smuzhiyun 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1227*4882a593Smuzhiyun 	}
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
hns_roce_unlock_cqs(struct hns_roce_cq * send_cq,struct hns_roce_cq * recv_cq)1230*4882a593Smuzhiyun void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1231*4882a593Smuzhiyun 			 struct hns_roce_cq *recv_cq) __releases(&send_cq->lock)
1232*4882a593Smuzhiyun 			 __releases(&recv_cq->lock)
1233*4882a593Smuzhiyun {
1234*4882a593Smuzhiyun 	if (unlikely(send_cq == NULL && recv_cq == NULL)) {
1235*4882a593Smuzhiyun 		__release(&recv_cq->lock);
1236*4882a593Smuzhiyun 		__release(&send_cq->lock);
1237*4882a593Smuzhiyun 	} else if (unlikely(send_cq != NULL && recv_cq == NULL)) {
1238*4882a593Smuzhiyun 		__release(&recv_cq->lock);
1239*4882a593Smuzhiyun 		spin_unlock(&send_cq->lock);
1240*4882a593Smuzhiyun 	} else if (unlikely(send_cq == NULL && recv_cq != NULL)) {
1241*4882a593Smuzhiyun 		__release(&send_cq->lock);
1242*4882a593Smuzhiyun 		spin_unlock(&recv_cq->lock);
1243*4882a593Smuzhiyun 	} else if (send_cq == recv_cq) {
1244*4882a593Smuzhiyun 		__release(&recv_cq->lock);
1245*4882a593Smuzhiyun 		spin_unlock_irq(&send_cq->lock);
1246*4882a593Smuzhiyun 	} else if (send_cq->cqn < recv_cq->cqn) {
1247*4882a593Smuzhiyun 		spin_unlock(&recv_cq->lock);
1248*4882a593Smuzhiyun 		spin_unlock_irq(&send_cq->lock);
1249*4882a593Smuzhiyun 	} else {
1250*4882a593Smuzhiyun 		spin_unlock(&send_cq->lock);
1251*4882a593Smuzhiyun 		spin_unlock_irq(&recv_cq->lock);
1252*4882a593Smuzhiyun 	}
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun 
get_wqe(struct hns_roce_qp * hr_qp,int offset)1255*4882a593Smuzhiyun static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun 	return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun 
hns_roce_get_recv_wqe(struct hns_roce_qp * hr_qp,int n)1260*4882a593Smuzhiyun void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun 	return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun 
hns_roce_get_send_wqe(struct hns_roce_qp * hr_qp,int n)1265*4882a593Smuzhiyun void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n)
1266*4882a593Smuzhiyun {
1267*4882a593Smuzhiyun 	return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun 
hns_roce_get_extend_sge(struct hns_roce_qp * hr_qp,int n)1270*4882a593Smuzhiyun void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun 	return get_wqe(hr_qp, hr_qp->sge.offset + (n << hr_qp->sge.sge_shift));
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun 
hns_roce_wq_overflow(struct hns_roce_wq * hr_wq,int nreq,struct ib_cq * ib_cq)1275*4882a593Smuzhiyun bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1276*4882a593Smuzhiyun 			  struct ib_cq *ib_cq)
1277*4882a593Smuzhiyun {
1278*4882a593Smuzhiyun 	struct hns_roce_cq *hr_cq;
1279*4882a593Smuzhiyun 	u32 cur;
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 	cur = hr_wq->head - hr_wq->tail;
1282*4882a593Smuzhiyun 	if (likely(cur + nreq < hr_wq->wqe_cnt))
1283*4882a593Smuzhiyun 		return false;
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	hr_cq = to_hr_cq(ib_cq);
1286*4882a593Smuzhiyun 	spin_lock(&hr_cq->lock);
1287*4882a593Smuzhiyun 	cur = hr_wq->head - hr_wq->tail;
1288*4882a593Smuzhiyun 	spin_unlock(&hr_cq->lock);
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	return cur + nreq >= hr_wq->wqe_cnt;
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun 
hns_roce_init_qp_table(struct hns_roce_dev * hr_dev)1293*4882a593Smuzhiyun int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
1294*4882a593Smuzhiyun {
1295*4882a593Smuzhiyun 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1296*4882a593Smuzhiyun 	int reserved_from_top = 0;
1297*4882a593Smuzhiyun 	int reserved_from_bot;
1298*4882a593Smuzhiyun 	int ret;
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	mutex_init(&qp_table->scc_mutex);
1301*4882a593Smuzhiyun 	xa_init(&hr_dev->qp_table_xa);
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	reserved_from_bot = hr_dev->caps.reserved_qps;
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
1306*4882a593Smuzhiyun 				   hr_dev->caps.num_qps - 1, reserved_from_bot,
1307*4882a593Smuzhiyun 				   reserved_from_top);
1308*4882a593Smuzhiyun 	if (ret) {
1309*4882a593Smuzhiyun 		dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
1310*4882a593Smuzhiyun 			ret);
1311*4882a593Smuzhiyun 		return ret;
1312*4882a593Smuzhiyun 	}
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	return 0;
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun 
hns_roce_cleanup_qp_table(struct hns_roce_dev * hr_dev)1317*4882a593Smuzhiyun void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun 	hns_roce_bitmap_cleanup(&hr_dev->qp_table.bitmap);
1320*4882a593Smuzhiyun }
1321