xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/mlx4/srq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3*4882a593Smuzhiyun  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun  * OpenIB.org BSD license below:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
13*4882a593Smuzhiyun  *     conditions are met:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
16*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun  *        disclaimer.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun  *        provided with the distribution.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun  * SOFTWARE.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include <linux/mlx4/qp.h>
35*4882a593Smuzhiyun #include <linux/mlx4/srq.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #include "mlx4_ib.h"
39*4882a593Smuzhiyun #include <rdma/mlx4-abi.h>
40*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
41*4882a593Smuzhiyun 
get_wqe(struct mlx4_ib_srq * srq,int n)42*4882a593Smuzhiyun static void *get_wqe(struct mlx4_ib_srq *srq, int n)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
mlx4_ib_srq_event(struct mlx4_srq * srq,enum mlx4_event type)47*4882a593Smuzhiyun static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct ib_event event;
50*4882a593Smuzhiyun 	struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (ibsrq->event_handler) {
53*4882a593Smuzhiyun 		event.device      = ibsrq->device;
54*4882a593Smuzhiyun 		event.element.srq = ibsrq;
55*4882a593Smuzhiyun 		switch (type) {
56*4882a593Smuzhiyun 		case MLX4_EVENT_TYPE_SRQ_LIMIT:
57*4882a593Smuzhiyun 			event.event = IB_EVENT_SRQ_LIMIT_REACHED;
58*4882a593Smuzhiyun 			break;
59*4882a593Smuzhiyun 		case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
60*4882a593Smuzhiyun 			event.event = IB_EVENT_SRQ_ERR;
61*4882a593Smuzhiyun 			break;
62*4882a593Smuzhiyun 		default:
63*4882a593Smuzhiyun 			pr_warn("Unexpected event type %d "
64*4882a593Smuzhiyun 			       "on SRQ %06x\n", type, srq->srqn);
65*4882a593Smuzhiyun 			return;
66*4882a593Smuzhiyun 		}
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 		ibsrq->event_handler(&event, ibsrq->srq_context);
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
mlx4_ib_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * init_attr,struct ib_udata * udata)72*4882a593Smuzhiyun int mlx4_ib_create_srq(struct ib_srq *ib_srq,
73*4882a593Smuzhiyun 		       struct ib_srq_init_attr *init_attr,
74*4882a593Smuzhiyun 		       struct ib_udata *udata)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	struct mlx4_ib_dev *dev = to_mdev(ib_srq->device);
77*4882a593Smuzhiyun 	struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
78*4882a593Smuzhiyun 		udata, struct mlx4_ib_ucontext, ibucontext);
79*4882a593Smuzhiyun 	struct mlx4_ib_srq *srq = to_msrq(ib_srq);
80*4882a593Smuzhiyun 	struct mlx4_wqe_srq_next_seg *next;
81*4882a593Smuzhiyun 	struct mlx4_wqe_data_seg *scatter;
82*4882a593Smuzhiyun 	u32 cqn;
83*4882a593Smuzhiyun 	u16 xrcdn;
84*4882a593Smuzhiyun 	int desc_size;
85*4882a593Smuzhiyun 	int buf_size;
86*4882a593Smuzhiyun 	int err;
87*4882a593Smuzhiyun 	int i;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	/* Sanity check SRQ size before proceeding */
90*4882a593Smuzhiyun 	if (init_attr->attr.max_wr  >= dev->dev->caps.max_srq_wqes ||
91*4882a593Smuzhiyun 	    init_attr->attr.max_sge >  dev->dev->caps.max_srq_sge)
92*4882a593Smuzhiyun 		return -EINVAL;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	mutex_init(&srq->mutex);
95*4882a593Smuzhiyun 	spin_lock_init(&srq->lock);
96*4882a593Smuzhiyun 	srq->msrq.max    = roundup_pow_of_two(init_attr->attr.max_wr + 1);
97*4882a593Smuzhiyun 	srq->msrq.max_gs = init_attr->attr.max_sge;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	desc_size = max(32UL,
100*4882a593Smuzhiyun 			roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
101*4882a593Smuzhiyun 					   srq->msrq.max_gs *
102*4882a593Smuzhiyun 					   sizeof (struct mlx4_wqe_data_seg)));
103*4882a593Smuzhiyun 	srq->msrq.wqe_shift = ilog2(desc_size);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	buf_size = srq->msrq.max * desc_size;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	if (udata) {
108*4882a593Smuzhiyun 		struct mlx4_ib_create_srq ucmd;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
111*4882a593Smuzhiyun 			return -EFAULT;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 		srq->umem =
114*4882a593Smuzhiyun 			ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0);
115*4882a593Smuzhiyun 		if (IS_ERR(srq->umem))
116*4882a593Smuzhiyun 			return PTR_ERR(srq->umem);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 		err = mlx4_mtt_init(
119*4882a593Smuzhiyun 			dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE),
120*4882a593Smuzhiyun 			PAGE_SHIFT, &srq->mtt);
121*4882a593Smuzhiyun 		if (err)
122*4882a593Smuzhiyun 			goto err_buf;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 		err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
125*4882a593Smuzhiyun 		if (err)
126*4882a593Smuzhiyun 			goto err_mtt;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 		err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db);
129*4882a593Smuzhiyun 		if (err)
130*4882a593Smuzhiyun 			goto err_mtt;
131*4882a593Smuzhiyun 	} else {
132*4882a593Smuzhiyun 		err = mlx4_db_alloc(dev->dev, &srq->db, 0);
133*4882a593Smuzhiyun 		if (err)
134*4882a593Smuzhiyun 			return err;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 		*srq->db.db = 0;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 		if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
139*4882a593Smuzhiyun 				   &srq->buf)) {
140*4882a593Smuzhiyun 			err = -ENOMEM;
141*4882a593Smuzhiyun 			goto err_db;
142*4882a593Smuzhiyun 		}
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 		srq->head    = 0;
145*4882a593Smuzhiyun 		srq->tail    = srq->msrq.max - 1;
146*4882a593Smuzhiyun 		srq->wqe_ctr = 0;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 		for (i = 0; i < srq->msrq.max; ++i) {
149*4882a593Smuzhiyun 			next = get_wqe(srq, i);
150*4882a593Smuzhiyun 			next->next_wqe_index =
151*4882a593Smuzhiyun 				cpu_to_be16((i + 1) & (srq->msrq.max - 1));
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 			for (scatter = (void *) (next + 1);
154*4882a593Smuzhiyun 			     (void *) scatter < (void *) next + desc_size;
155*4882a593Smuzhiyun 			     ++scatter)
156*4882a593Smuzhiyun 				scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
157*4882a593Smuzhiyun 		}
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 		err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
160*4882a593Smuzhiyun 				    &srq->mtt);
161*4882a593Smuzhiyun 		if (err)
162*4882a593Smuzhiyun 			goto err_buf;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 		err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
165*4882a593Smuzhiyun 		if (err)
166*4882a593Smuzhiyun 			goto err_mtt;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		srq->wrid = kvmalloc_array(srq->msrq.max,
169*4882a593Smuzhiyun 					   sizeof(u64), GFP_KERNEL);
170*4882a593Smuzhiyun 		if (!srq->wrid) {
171*4882a593Smuzhiyun 			err = -ENOMEM;
172*4882a593Smuzhiyun 			goto err_mtt;
173*4882a593Smuzhiyun 		}
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	cqn = ib_srq_has_cq(init_attr->srq_type) ?
177*4882a593Smuzhiyun 		to_mcq(init_attr->ext.cq)->mcq.cqn : 0;
178*4882a593Smuzhiyun 	xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
179*4882a593Smuzhiyun 		to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
180*4882a593Smuzhiyun 		(u16) dev->dev->caps.reserved_xrcds;
181*4882a593Smuzhiyun 	err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn,
182*4882a593Smuzhiyun 			     &srq->mtt, srq->db.dma, &srq->msrq);
183*4882a593Smuzhiyun 	if (err)
184*4882a593Smuzhiyun 		goto err_wrid;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	srq->msrq.event = mlx4_ib_srq_event;
187*4882a593Smuzhiyun 	srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (udata)
190*4882a593Smuzhiyun 		if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
191*4882a593Smuzhiyun 			err = -EFAULT;
192*4882a593Smuzhiyun 			goto err_wrid;
193*4882a593Smuzhiyun 		}
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	init_attr->attr.max_wr = srq->msrq.max - 1;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	return 0;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun err_wrid:
200*4882a593Smuzhiyun 	if (udata)
201*4882a593Smuzhiyun 		mlx4_ib_db_unmap_user(ucontext, &srq->db);
202*4882a593Smuzhiyun 	else
203*4882a593Smuzhiyun 		kvfree(srq->wrid);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun err_mtt:
206*4882a593Smuzhiyun 	mlx4_mtt_cleanup(dev->dev, &srq->mtt);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun err_buf:
209*4882a593Smuzhiyun 	if (!srq->umem)
210*4882a593Smuzhiyun 		mlx4_buf_free(dev->dev, buf_size, &srq->buf);
211*4882a593Smuzhiyun 	ib_umem_release(srq->umem);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun err_db:
214*4882a593Smuzhiyun 	if (!udata)
215*4882a593Smuzhiyun 		mlx4_db_free(dev->dev, &srq->db);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	return err;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
mlx4_ib_modify_srq(struct ib_srq * ibsrq,struct ib_srq_attr * attr,enum ib_srq_attr_mask attr_mask,struct ib_udata * udata)220*4882a593Smuzhiyun int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
221*4882a593Smuzhiyun 		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
224*4882a593Smuzhiyun 	struct mlx4_ib_srq *srq = to_msrq(ibsrq);
225*4882a593Smuzhiyun 	int ret;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	/* We don't support resizing SRQs (yet?) */
228*4882a593Smuzhiyun 	if (attr_mask & IB_SRQ_MAX_WR)
229*4882a593Smuzhiyun 		return -EINVAL;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (attr_mask & IB_SRQ_LIMIT) {
232*4882a593Smuzhiyun 		if (attr->srq_limit >= srq->msrq.max)
233*4882a593Smuzhiyun 			return -EINVAL;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 		mutex_lock(&srq->mutex);
236*4882a593Smuzhiyun 		ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
237*4882a593Smuzhiyun 		mutex_unlock(&srq->mutex);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		if (ret)
240*4882a593Smuzhiyun 			return ret;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
mlx4_ib_query_srq(struct ib_srq * ibsrq,struct ib_srq_attr * srq_attr)246*4882a593Smuzhiyun int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
249*4882a593Smuzhiyun 	struct mlx4_ib_srq *srq = to_msrq(ibsrq);
250*4882a593Smuzhiyun 	int ret;
251*4882a593Smuzhiyun 	int limit_watermark;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
254*4882a593Smuzhiyun 	if (ret)
255*4882a593Smuzhiyun 		return ret;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	srq_attr->srq_limit = limit_watermark;
258*4882a593Smuzhiyun 	srq_attr->max_wr    = srq->msrq.max - 1;
259*4882a593Smuzhiyun 	srq_attr->max_sge   = srq->msrq.max_gs;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
mlx4_ib_destroy_srq(struct ib_srq * srq,struct ib_udata * udata)264*4882a593Smuzhiyun int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct mlx4_ib_dev *dev = to_mdev(srq->device);
267*4882a593Smuzhiyun 	struct mlx4_ib_srq *msrq = to_msrq(srq);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	mlx4_srq_free(dev->dev, &msrq->msrq);
270*4882a593Smuzhiyun 	mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (udata) {
273*4882a593Smuzhiyun 		mlx4_ib_db_unmap_user(
274*4882a593Smuzhiyun 			rdma_udata_to_drv_context(
275*4882a593Smuzhiyun 				udata,
276*4882a593Smuzhiyun 				struct mlx4_ib_ucontext,
277*4882a593Smuzhiyun 				ibucontext),
278*4882a593Smuzhiyun 			&msrq->db);
279*4882a593Smuzhiyun 	} else {
280*4882a593Smuzhiyun 		kvfree(msrq->wrid);
281*4882a593Smuzhiyun 		mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
282*4882a593Smuzhiyun 			      &msrq->buf);
283*4882a593Smuzhiyun 		mlx4_db_free(dev->dev, &msrq->db);
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 	ib_umem_release(msrq->umem);
286*4882a593Smuzhiyun 	return 0;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
mlx4_ib_free_srq_wqe(struct mlx4_ib_srq * srq,int wqe_index)289*4882a593Smuzhiyun void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	struct mlx4_wqe_srq_next_seg *next;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	/* always called with interrupts disabled. */
294*4882a593Smuzhiyun 	spin_lock(&srq->lock);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	next = get_wqe(srq, srq->tail);
297*4882a593Smuzhiyun 	next->next_wqe_index = cpu_to_be16(wqe_index);
298*4882a593Smuzhiyun 	srq->tail = wqe_index;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	spin_unlock(&srq->lock);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
mlx4_ib_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)303*4882a593Smuzhiyun int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
304*4882a593Smuzhiyun 			  const struct ib_recv_wr **bad_wr)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	struct mlx4_ib_srq *srq = to_msrq(ibsrq);
307*4882a593Smuzhiyun 	struct mlx4_wqe_srq_next_seg *next;
308*4882a593Smuzhiyun 	struct mlx4_wqe_data_seg *scat;
309*4882a593Smuzhiyun 	unsigned long flags;
310*4882a593Smuzhiyun 	int err = 0;
311*4882a593Smuzhiyun 	int nreq;
312*4882a593Smuzhiyun 	int i;
313*4882a593Smuzhiyun 	struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	spin_lock_irqsave(&srq->lock, flags);
316*4882a593Smuzhiyun 	if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
317*4882a593Smuzhiyun 		err = -EIO;
318*4882a593Smuzhiyun 		*bad_wr = wr;
319*4882a593Smuzhiyun 		nreq = 0;
320*4882a593Smuzhiyun 		goto out;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
324*4882a593Smuzhiyun 		if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
325*4882a593Smuzhiyun 			err = -EINVAL;
326*4882a593Smuzhiyun 			*bad_wr = wr;
327*4882a593Smuzhiyun 			break;
328*4882a593Smuzhiyun 		}
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 		if (unlikely(srq->head == srq->tail)) {
331*4882a593Smuzhiyun 			err = -ENOMEM;
332*4882a593Smuzhiyun 			*bad_wr = wr;
333*4882a593Smuzhiyun 			break;
334*4882a593Smuzhiyun 		}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		srq->wrid[srq->head] = wr->wr_id;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 		next      = get_wqe(srq, srq->head);
339*4882a593Smuzhiyun 		srq->head = be16_to_cpu(next->next_wqe_index);
340*4882a593Smuzhiyun 		scat      = (struct mlx4_wqe_data_seg *) (next + 1);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 		for (i = 0; i < wr->num_sge; ++i) {
343*4882a593Smuzhiyun 			scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
344*4882a593Smuzhiyun 			scat[i].lkey       = cpu_to_be32(wr->sg_list[i].lkey);
345*4882a593Smuzhiyun 			scat[i].addr       = cpu_to_be64(wr->sg_list[i].addr);
346*4882a593Smuzhiyun 		}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 		if (i < srq->msrq.max_gs) {
349*4882a593Smuzhiyun 			scat[i].byte_count = 0;
350*4882a593Smuzhiyun 			scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);
351*4882a593Smuzhiyun 			scat[i].addr       = 0;
352*4882a593Smuzhiyun 		}
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (likely(nreq)) {
356*4882a593Smuzhiyun 		srq->wqe_ctr += nreq;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 		/*
359*4882a593Smuzhiyun 		 * Make sure that descriptors are written before
360*4882a593Smuzhiyun 		 * doorbell record.
361*4882a593Smuzhiyun 		 */
362*4882a593Smuzhiyun 		wmb();
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 		*srq->db.db = cpu_to_be32(srq->wqe_ctr);
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun out:
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	spin_unlock_irqrestore(&srq->lock, flags);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	return err;
371*4882a593Smuzhiyun }
372