xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/mthca/mthca_qp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3*4882a593Smuzhiyun  * Copyright (c) 2005 Cisco Systems. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5*4882a593Smuzhiyun  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
8*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
9*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
10*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
11*4882a593Smuzhiyun  * OpenIB.org BSD license below:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
14*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
15*4882a593Smuzhiyun  *     conditions are met:
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
18*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
19*4882a593Smuzhiyun  *        disclaimer.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
22*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
23*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
24*4882a593Smuzhiyun  *        provided with the distribution.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33*4882a593Smuzhiyun  * SOFTWARE.
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <linux/string.h>
37*4882a593Smuzhiyun #include <linux/slab.h>
38*4882a593Smuzhiyun #include <linux/sched.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include <asm/io.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
43*4882a593Smuzhiyun #include <rdma/ib_cache.h>
44*4882a593Smuzhiyun #include <rdma/ib_pack.h>
45*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #include "mthca_dev.h"
48*4882a593Smuzhiyun #include "mthca_cmd.h"
49*4882a593Smuzhiyun #include "mthca_memfree.h"
50*4882a593Smuzhiyun #include "mthca_wqe.h"
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun enum {
53*4882a593Smuzhiyun 	MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
54*4882a593Smuzhiyun 	MTHCA_ACK_REQ_FREQ       = 10,
55*4882a593Smuzhiyun 	MTHCA_FLIGHT_LIMIT       = 9,
56*4882a593Smuzhiyun 	MTHCA_UD_HEADER_SIZE     = 72, /* largest UD header possible */
57*4882a593Smuzhiyun 	MTHCA_INLINE_HEADER_SIZE = 4,  /* data segment overhead for inline */
58*4882a593Smuzhiyun 	MTHCA_INLINE_CHUNK_SIZE  = 16  /* inline data segment chunk */
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun enum {
62*4882a593Smuzhiyun 	MTHCA_QP_STATE_RST  = 0,
63*4882a593Smuzhiyun 	MTHCA_QP_STATE_INIT = 1,
64*4882a593Smuzhiyun 	MTHCA_QP_STATE_RTR  = 2,
65*4882a593Smuzhiyun 	MTHCA_QP_STATE_RTS  = 3,
66*4882a593Smuzhiyun 	MTHCA_QP_STATE_SQE  = 4,
67*4882a593Smuzhiyun 	MTHCA_QP_STATE_SQD  = 5,
68*4882a593Smuzhiyun 	MTHCA_QP_STATE_ERR  = 6,
69*4882a593Smuzhiyun 	MTHCA_QP_STATE_DRAINING = 7
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun enum {
73*4882a593Smuzhiyun 	MTHCA_QP_ST_RC 	= 0x0,
74*4882a593Smuzhiyun 	MTHCA_QP_ST_UC 	= 0x1,
75*4882a593Smuzhiyun 	MTHCA_QP_ST_RD 	= 0x2,
76*4882a593Smuzhiyun 	MTHCA_QP_ST_UD 	= 0x3,
77*4882a593Smuzhiyun 	MTHCA_QP_ST_MLX = 0x7
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun enum {
81*4882a593Smuzhiyun 	MTHCA_QP_PM_MIGRATED = 0x3,
82*4882a593Smuzhiyun 	MTHCA_QP_PM_ARMED    = 0x0,
83*4882a593Smuzhiyun 	MTHCA_QP_PM_REARM    = 0x1
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun enum {
87*4882a593Smuzhiyun 	/* qp_context flags */
88*4882a593Smuzhiyun 	MTHCA_QP_BIT_DE  = 1 <<  8,
89*4882a593Smuzhiyun 	/* params1 */
90*4882a593Smuzhiyun 	MTHCA_QP_BIT_SRE = 1 << 15,
91*4882a593Smuzhiyun 	MTHCA_QP_BIT_SWE = 1 << 14,
92*4882a593Smuzhiyun 	MTHCA_QP_BIT_SAE = 1 << 13,
93*4882a593Smuzhiyun 	MTHCA_QP_BIT_SIC = 1 <<  4,
94*4882a593Smuzhiyun 	MTHCA_QP_BIT_SSC = 1 <<  3,
95*4882a593Smuzhiyun 	/* params2 */
96*4882a593Smuzhiyun 	MTHCA_QP_BIT_RRE = 1 << 15,
97*4882a593Smuzhiyun 	MTHCA_QP_BIT_RWE = 1 << 14,
98*4882a593Smuzhiyun 	MTHCA_QP_BIT_RAE = 1 << 13,
99*4882a593Smuzhiyun 	MTHCA_QP_BIT_RIC = 1 <<  4,
100*4882a593Smuzhiyun 	MTHCA_QP_BIT_RSC = 1 <<  3
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun enum {
104*4882a593Smuzhiyun 	MTHCA_SEND_DOORBELL_FENCE = 1 << 5
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun struct mthca_qp_path {
108*4882a593Smuzhiyun 	__be32 port_pkey;
109*4882a593Smuzhiyun 	u8     rnr_retry;
110*4882a593Smuzhiyun 	u8     g_mylmc;
111*4882a593Smuzhiyun 	__be16 rlid;
112*4882a593Smuzhiyun 	u8     ackto;
113*4882a593Smuzhiyun 	u8     mgid_index;
114*4882a593Smuzhiyun 	u8     static_rate;
115*4882a593Smuzhiyun 	u8     hop_limit;
116*4882a593Smuzhiyun 	__be32 sl_tclass_flowlabel;
117*4882a593Smuzhiyun 	u8     rgid[16];
118*4882a593Smuzhiyun } __packed;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun struct mthca_qp_context {
121*4882a593Smuzhiyun 	__be32 flags;
122*4882a593Smuzhiyun 	__be32 tavor_sched_queue; /* Reserved on Arbel */
123*4882a593Smuzhiyun 	u8     mtu_msgmax;
124*4882a593Smuzhiyun 	u8     rq_size_stride;	/* Reserved on Tavor */
125*4882a593Smuzhiyun 	u8     sq_size_stride;	/* Reserved on Tavor */
126*4882a593Smuzhiyun 	u8     rlkey_arbel_sched_queue;	/* Reserved on Tavor */
127*4882a593Smuzhiyun 	__be32 usr_page;
128*4882a593Smuzhiyun 	__be32 local_qpn;
129*4882a593Smuzhiyun 	__be32 remote_qpn;
130*4882a593Smuzhiyun 	u32    reserved1[2];
131*4882a593Smuzhiyun 	struct mthca_qp_path pri_path;
132*4882a593Smuzhiyun 	struct mthca_qp_path alt_path;
133*4882a593Smuzhiyun 	__be32 rdd;
134*4882a593Smuzhiyun 	__be32 pd;
135*4882a593Smuzhiyun 	__be32 wqe_base;
136*4882a593Smuzhiyun 	__be32 wqe_lkey;
137*4882a593Smuzhiyun 	__be32 params1;
138*4882a593Smuzhiyun 	__be32 reserved2;
139*4882a593Smuzhiyun 	__be32 next_send_psn;
140*4882a593Smuzhiyun 	__be32 cqn_snd;
141*4882a593Smuzhiyun 	__be32 snd_wqe_base_l;	/* Next send WQE on Tavor */
142*4882a593Smuzhiyun 	__be32 snd_db_index;	/* (debugging only entries) */
143*4882a593Smuzhiyun 	__be32 last_acked_psn;
144*4882a593Smuzhiyun 	__be32 ssn;
145*4882a593Smuzhiyun 	__be32 params2;
146*4882a593Smuzhiyun 	__be32 rnr_nextrecvpsn;
147*4882a593Smuzhiyun 	__be32 ra_buff_indx;
148*4882a593Smuzhiyun 	__be32 cqn_rcv;
149*4882a593Smuzhiyun 	__be32 rcv_wqe_base_l;	/* Next recv WQE on Tavor */
150*4882a593Smuzhiyun 	__be32 rcv_db_index;	/* (debugging only entries) */
151*4882a593Smuzhiyun 	__be32 qkey;
152*4882a593Smuzhiyun 	__be32 srqn;
153*4882a593Smuzhiyun 	__be32 rmsn;
154*4882a593Smuzhiyun 	__be16 rq_wqe_counter;	/* reserved on Tavor */
155*4882a593Smuzhiyun 	__be16 sq_wqe_counter;	/* reserved on Tavor */
156*4882a593Smuzhiyun 	u32    reserved3[18];
157*4882a593Smuzhiyun } __packed;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun struct mthca_qp_param {
160*4882a593Smuzhiyun 	__be32 opt_param_mask;
161*4882a593Smuzhiyun 	u32    reserved1;
162*4882a593Smuzhiyun 	struct mthca_qp_context context;
163*4882a593Smuzhiyun 	u32    reserved2[62];
164*4882a593Smuzhiyun } __packed;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun enum {
167*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_ALT_ADDR_PATH     = 1 << 0,
168*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_RRE               = 1 << 1,
169*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_RAE               = 1 << 2,
170*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_RWE               = 1 << 3,
171*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_PKEY_INDEX        = 1 << 4,
172*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_Q_KEY             = 1 << 5,
173*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_RNR_TIMEOUT       = 1 << 6,
174*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
175*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_SRA_MAX           = 1 << 8,
176*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_RRA_MAX           = 1 << 9,
177*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_PM_STATE          = 1 << 10,
178*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_PORT_NUM          = 1 << 11,
179*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_RETRY_COUNT       = 1 << 12,
180*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_ALT_RNR_RETRY     = 1 << 13,
181*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_ACK_TIMEOUT       = 1 << 14,
182*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_RNR_RETRY         = 1 << 15,
183*4882a593Smuzhiyun 	MTHCA_QP_OPTPAR_SCHED_QUEUE       = 1 << 16
184*4882a593Smuzhiyun };
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun static const u8 mthca_opcode[] = {
187*4882a593Smuzhiyun 	[IB_WR_SEND]                 = MTHCA_OPCODE_SEND,
188*4882a593Smuzhiyun 	[IB_WR_SEND_WITH_IMM]        = MTHCA_OPCODE_SEND_IMM,
189*4882a593Smuzhiyun 	[IB_WR_RDMA_WRITE]           = MTHCA_OPCODE_RDMA_WRITE,
190*4882a593Smuzhiyun 	[IB_WR_RDMA_WRITE_WITH_IMM]  = MTHCA_OPCODE_RDMA_WRITE_IMM,
191*4882a593Smuzhiyun 	[IB_WR_RDMA_READ]            = MTHCA_OPCODE_RDMA_READ,
192*4882a593Smuzhiyun 	[IB_WR_ATOMIC_CMP_AND_SWP]   = MTHCA_OPCODE_ATOMIC_CS,
193*4882a593Smuzhiyun 	[IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun 
is_sqp(struct mthca_dev * dev,struct mthca_qp * qp)196*4882a593Smuzhiyun static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	return qp->qpn >= dev->qp_table.sqp_start &&
199*4882a593Smuzhiyun 		qp->qpn <= dev->qp_table.sqp_start + 3;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
is_qp0(struct mthca_dev * dev,struct mthca_qp * qp)202*4882a593Smuzhiyun static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	return qp->qpn >= dev->qp_table.sqp_start &&
205*4882a593Smuzhiyun 		qp->qpn <= dev->qp_table.sqp_start + 1;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
get_recv_wqe(struct mthca_qp * qp,int n)208*4882a593Smuzhiyun static void *get_recv_wqe(struct mthca_qp *qp, int n)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	if (qp->is_direct)
211*4882a593Smuzhiyun 		return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
212*4882a593Smuzhiyun 	else
213*4882a593Smuzhiyun 		return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
214*4882a593Smuzhiyun 			((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
get_send_wqe(struct mthca_qp * qp,int n)217*4882a593Smuzhiyun static void *get_send_wqe(struct mthca_qp *qp, int n)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	if (qp->is_direct)
220*4882a593Smuzhiyun 		return qp->queue.direct.buf + qp->send_wqe_offset +
221*4882a593Smuzhiyun 			(n << qp->sq.wqe_shift);
222*4882a593Smuzhiyun 	else
223*4882a593Smuzhiyun 		return qp->queue.page_list[(qp->send_wqe_offset +
224*4882a593Smuzhiyun 					    (n << qp->sq.wqe_shift)) >>
225*4882a593Smuzhiyun 					   PAGE_SHIFT].buf +
226*4882a593Smuzhiyun 			((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
227*4882a593Smuzhiyun 			 (PAGE_SIZE - 1));
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
mthca_wq_reset(struct mthca_wq * wq)230*4882a593Smuzhiyun static void mthca_wq_reset(struct mthca_wq *wq)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	wq->next_ind  = 0;
233*4882a593Smuzhiyun 	wq->last_comp = wq->max - 1;
234*4882a593Smuzhiyun 	wq->head      = 0;
235*4882a593Smuzhiyun 	wq->tail      = 0;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
mthca_qp_event(struct mthca_dev * dev,u32 qpn,enum ib_event_type event_type)238*4882a593Smuzhiyun void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
239*4882a593Smuzhiyun 		    enum ib_event_type event_type)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	struct mthca_qp *qp;
242*4882a593Smuzhiyun 	struct ib_event event;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	spin_lock(&dev->qp_table.lock);
245*4882a593Smuzhiyun 	qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
246*4882a593Smuzhiyun 	if (qp)
247*4882a593Smuzhiyun 		++qp->refcount;
248*4882a593Smuzhiyun 	spin_unlock(&dev->qp_table.lock);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (!qp) {
251*4882a593Smuzhiyun 		mthca_warn(dev, "Async event %d for bogus QP %08x\n",
252*4882a593Smuzhiyun 			   event_type, qpn);
253*4882a593Smuzhiyun 		return;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (event_type == IB_EVENT_PATH_MIG)
257*4882a593Smuzhiyun 		qp->port = qp->alt_port;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	event.device      = &dev->ib_dev;
260*4882a593Smuzhiyun 	event.event       = event_type;
261*4882a593Smuzhiyun 	event.element.qp  = &qp->ibqp;
262*4882a593Smuzhiyun 	if (qp->ibqp.event_handler)
263*4882a593Smuzhiyun 		qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	spin_lock(&dev->qp_table.lock);
266*4882a593Smuzhiyun 	if (!--qp->refcount)
267*4882a593Smuzhiyun 		wake_up(&qp->wait);
268*4882a593Smuzhiyun 	spin_unlock(&dev->qp_table.lock);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
to_mthca_state(enum ib_qp_state ib_state)271*4882a593Smuzhiyun static int to_mthca_state(enum ib_qp_state ib_state)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	switch (ib_state) {
274*4882a593Smuzhiyun 	case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
275*4882a593Smuzhiyun 	case IB_QPS_INIT:  return MTHCA_QP_STATE_INIT;
276*4882a593Smuzhiyun 	case IB_QPS_RTR:   return MTHCA_QP_STATE_RTR;
277*4882a593Smuzhiyun 	case IB_QPS_RTS:   return MTHCA_QP_STATE_RTS;
278*4882a593Smuzhiyun 	case IB_QPS_SQD:   return MTHCA_QP_STATE_SQD;
279*4882a593Smuzhiyun 	case IB_QPS_SQE:   return MTHCA_QP_STATE_SQE;
280*4882a593Smuzhiyun 	case IB_QPS_ERR:   return MTHCA_QP_STATE_ERR;
281*4882a593Smuzhiyun 	default:                return -1;
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
286*4882a593Smuzhiyun 
to_mthca_st(int transport)287*4882a593Smuzhiyun static int to_mthca_st(int transport)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	switch (transport) {
290*4882a593Smuzhiyun 	case RC:  return MTHCA_QP_ST_RC;
291*4882a593Smuzhiyun 	case UC:  return MTHCA_QP_ST_UC;
292*4882a593Smuzhiyun 	case UD:  return MTHCA_QP_ST_UD;
293*4882a593Smuzhiyun 	case RD:  return MTHCA_QP_ST_RD;
294*4882a593Smuzhiyun 	case MLX: return MTHCA_QP_ST_MLX;
295*4882a593Smuzhiyun 	default:  return -1;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
store_attrs(struct mthca_sqp * sqp,const struct ib_qp_attr * attr,int attr_mask)299*4882a593Smuzhiyun static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
300*4882a593Smuzhiyun 			int attr_mask)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	if (attr_mask & IB_QP_PKEY_INDEX)
303*4882a593Smuzhiyun 		sqp->pkey_index = attr->pkey_index;
304*4882a593Smuzhiyun 	if (attr_mask & IB_QP_QKEY)
305*4882a593Smuzhiyun 		sqp->qkey = attr->qkey;
306*4882a593Smuzhiyun 	if (attr_mask & IB_QP_SQ_PSN)
307*4882a593Smuzhiyun 		sqp->send_psn = attr->sq_psn;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
init_port(struct mthca_dev * dev,int port)310*4882a593Smuzhiyun static void init_port(struct mthca_dev *dev, int port)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	int err;
313*4882a593Smuzhiyun 	struct mthca_init_ib_param param;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	memset(&param, 0, sizeof param);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	param.port_width = dev->limits.port_width_cap;
318*4882a593Smuzhiyun 	param.vl_cap     = dev->limits.vl_cap;
319*4882a593Smuzhiyun 	param.mtu_cap    = dev->limits.mtu_cap;
320*4882a593Smuzhiyun 	param.gid_cap    = dev->limits.gid_table_len;
321*4882a593Smuzhiyun 	param.pkey_cap   = dev->limits.pkey_table_len;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	err = mthca_INIT_IB(dev, &param, port);
324*4882a593Smuzhiyun 	if (err)
325*4882a593Smuzhiyun 		mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
get_hw_access_flags(struct mthca_qp * qp,const struct ib_qp_attr * attr,int attr_mask)328*4882a593Smuzhiyun static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
329*4882a593Smuzhiyun 				  int attr_mask)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	u8 dest_rd_atomic;
332*4882a593Smuzhiyun 	u32 access_flags;
333*4882a593Smuzhiyun 	u32 hw_access_flags = 0;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
336*4882a593Smuzhiyun 		dest_rd_atomic = attr->max_dest_rd_atomic;
337*4882a593Smuzhiyun 	else
338*4882a593Smuzhiyun 		dest_rd_atomic = qp->resp_depth;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	if (attr_mask & IB_QP_ACCESS_FLAGS)
341*4882a593Smuzhiyun 		access_flags = attr->qp_access_flags;
342*4882a593Smuzhiyun 	else
343*4882a593Smuzhiyun 		access_flags = qp->atomic_rd_en;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if (!dest_rd_atomic)
346*4882a593Smuzhiyun 		access_flags &= IB_ACCESS_REMOTE_WRITE;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (access_flags & IB_ACCESS_REMOTE_READ)
349*4882a593Smuzhiyun 		hw_access_flags |= MTHCA_QP_BIT_RRE;
350*4882a593Smuzhiyun 	if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
351*4882a593Smuzhiyun 		hw_access_flags |= MTHCA_QP_BIT_RAE;
352*4882a593Smuzhiyun 	if (access_flags & IB_ACCESS_REMOTE_WRITE)
353*4882a593Smuzhiyun 		hw_access_flags |= MTHCA_QP_BIT_RWE;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	return cpu_to_be32(hw_access_flags);
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
to_ib_qp_state(int mthca_state)358*4882a593Smuzhiyun static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	switch (mthca_state) {
361*4882a593Smuzhiyun 	case MTHCA_QP_STATE_RST:      return IB_QPS_RESET;
362*4882a593Smuzhiyun 	case MTHCA_QP_STATE_INIT:     return IB_QPS_INIT;
363*4882a593Smuzhiyun 	case MTHCA_QP_STATE_RTR:      return IB_QPS_RTR;
364*4882a593Smuzhiyun 	case MTHCA_QP_STATE_RTS:      return IB_QPS_RTS;
365*4882a593Smuzhiyun 	case MTHCA_QP_STATE_DRAINING:
366*4882a593Smuzhiyun 	case MTHCA_QP_STATE_SQD:      return IB_QPS_SQD;
367*4882a593Smuzhiyun 	case MTHCA_QP_STATE_SQE:      return IB_QPS_SQE;
368*4882a593Smuzhiyun 	case MTHCA_QP_STATE_ERR:      return IB_QPS_ERR;
369*4882a593Smuzhiyun 	default:                      return -1;
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
to_ib_mig_state(int mthca_mig_state)373*4882a593Smuzhiyun static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	switch (mthca_mig_state) {
376*4882a593Smuzhiyun 	case 0:  return IB_MIG_ARMED;
377*4882a593Smuzhiyun 	case 1:  return IB_MIG_REARM;
378*4882a593Smuzhiyun 	case 3:  return IB_MIG_MIGRATED;
379*4882a593Smuzhiyun 	default: return -1;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
to_ib_qp_access_flags(int mthca_flags)383*4882a593Smuzhiyun static int to_ib_qp_access_flags(int mthca_flags)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	int ib_flags = 0;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	if (mthca_flags & MTHCA_QP_BIT_RRE)
388*4882a593Smuzhiyun 		ib_flags |= IB_ACCESS_REMOTE_READ;
389*4882a593Smuzhiyun 	if (mthca_flags & MTHCA_QP_BIT_RWE)
390*4882a593Smuzhiyun 		ib_flags |= IB_ACCESS_REMOTE_WRITE;
391*4882a593Smuzhiyun 	if (mthca_flags & MTHCA_QP_BIT_RAE)
392*4882a593Smuzhiyun 		ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	return ib_flags;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
to_rdma_ah_attr(struct mthca_dev * dev,struct rdma_ah_attr * ah_attr,struct mthca_qp_path * path)397*4882a593Smuzhiyun static void to_rdma_ah_attr(struct mthca_dev *dev,
398*4882a593Smuzhiyun 			    struct rdma_ah_attr *ah_attr,
399*4882a593Smuzhiyun 			    struct mthca_qp_path *path)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun 	u8 port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	memset(ah_attr, 0, sizeof(*ah_attr));
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	if (port_num == 0 || port_num > dev->limits.num_ports)
406*4882a593Smuzhiyun 		return;
407*4882a593Smuzhiyun 	ah_attr->type = rdma_ah_find_type(&dev->ib_dev, port_num);
408*4882a593Smuzhiyun 	rdma_ah_set_port_num(ah_attr, port_num);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid));
411*4882a593Smuzhiyun 	rdma_ah_set_sl(ah_attr, be32_to_cpu(path->sl_tclass_flowlabel) >> 28);
412*4882a593Smuzhiyun 	rdma_ah_set_path_bits(ah_attr, path->g_mylmc & 0x7f);
413*4882a593Smuzhiyun 	rdma_ah_set_static_rate(ah_attr,
414*4882a593Smuzhiyun 				mthca_rate_to_ib(dev,
415*4882a593Smuzhiyun 						 path->static_rate & 0xf,
416*4882a593Smuzhiyun 						 port_num));
417*4882a593Smuzhiyun 	if (path->g_mylmc & (1 << 7)) {
418*4882a593Smuzhiyun 		u32 tc_fl = be32_to_cpu(path->sl_tclass_flowlabel);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 		rdma_ah_set_grh(ah_attr, NULL,
421*4882a593Smuzhiyun 				tc_fl & 0xfffff,
422*4882a593Smuzhiyun 				path->mgid_index &
423*4882a593Smuzhiyun 				(dev->limits.gid_table_len - 1),
424*4882a593Smuzhiyun 				path->hop_limit,
425*4882a593Smuzhiyun 				(tc_fl >> 20) & 0xff);
426*4882a593Smuzhiyun 		rdma_ah_set_dgid_raw(ah_attr, path->rgid);
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
mthca_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)430*4882a593Smuzhiyun int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
431*4882a593Smuzhiyun 		   struct ib_qp_init_attr *qp_init_attr)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct mthca_dev *dev = to_mdev(ibqp->device);
434*4882a593Smuzhiyun 	struct mthca_qp *qp = to_mqp(ibqp);
435*4882a593Smuzhiyun 	int err = 0;
436*4882a593Smuzhiyun 	struct mthca_mailbox *mailbox = NULL;
437*4882a593Smuzhiyun 	struct mthca_qp_param *qp_param;
438*4882a593Smuzhiyun 	struct mthca_qp_context *context;
439*4882a593Smuzhiyun 	int mthca_state;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	mutex_lock(&qp->mutex);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (qp->state == IB_QPS_RESET) {
444*4882a593Smuzhiyun 		qp_attr->qp_state = IB_QPS_RESET;
445*4882a593Smuzhiyun 		goto done;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
449*4882a593Smuzhiyun 	if (IS_ERR(mailbox)) {
450*4882a593Smuzhiyun 		err = PTR_ERR(mailbox);
451*4882a593Smuzhiyun 		goto out;
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox);
455*4882a593Smuzhiyun 	if (err) {
456*4882a593Smuzhiyun 		mthca_warn(dev, "QUERY_QP failed (%d)\n", err);
457*4882a593Smuzhiyun 		goto out_mailbox;
458*4882a593Smuzhiyun 	}
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	qp_param    = mailbox->buf;
461*4882a593Smuzhiyun 	context     = &qp_param->context;
462*4882a593Smuzhiyun 	mthca_state = be32_to_cpu(context->flags) >> 28;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	qp->state		     = to_ib_qp_state(mthca_state);
465*4882a593Smuzhiyun 	qp_attr->qp_state	     = qp->state;
466*4882a593Smuzhiyun 	qp_attr->path_mtu 	     = context->mtu_msgmax >> 5;
467*4882a593Smuzhiyun 	qp_attr->path_mig_state      =
468*4882a593Smuzhiyun 		to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
469*4882a593Smuzhiyun 	qp_attr->qkey 		     = be32_to_cpu(context->qkey);
470*4882a593Smuzhiyun 	qp_attr->rq_psn 	     = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
471*4882a593Smuzhiyun 	qp_attr->sq_psn 	     = be32_to_cpu(context->next_send_psn) & 0xffffff;
472*4882a593Smuzhiyun 	qp_attr->dest_qp_num 	     = be32_to_cpu(context->remote_qpn) & 0xffffff;
473*4882a593Smuzhiyun 	qp_attr->qp_access_flags     =
474*4882a593Smuzhiyun 		to_ib_qp_access_flags(be32_to_cpu(context->params2));
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	if (qp->transport == RC || qp->transport == UC) {
477*4882a593Smuzhiyun 		to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
478*4882a593Smuzhiyun 		to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
479*4882a593Smuzhiyun 		qp_attr->alt_pkey_index =
480*4882a593Smuzhiyun 			be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
481*4882a593Smuzhiyun 		qp_attr->alt_port_num	=
482*4882a593Smuzhiyun 			rdma_ah_get_port_num(&qp_attr->alt_ah_attr);
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
486*4882a593Smuzhiyun 	qp_attr->port_num   =
487*4882a593Smuzhiyun 		(be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
490*4882a593Smuzhiyun 	qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	qp_attr->max_dest_rd_atomic =
495*4882a593Smuzhiyun 		1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
496*4882a593Smuzhiyun 	qp_attr->min_rnr_timer 	    =
497*4882a593Smuzhiyun 		(be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
498*4882a593Smuzhiyun 	qp_attr->timeout 	    = context->pri_path.ackto >> 3;
499*4882a593Smuzhiyun 	qp_attr->retry_cnt 	    = (be32_to_cpu(context->params1) >> 16) & 0x7;
500*4882a593Smuzhiyun 	qp_attr->rnr_retry 	    = context->pri_path.rnr_retry >> 5;
501*4882a593Smuzhiyun 	qp_attr->alt_timeout 	    = context->alt_path.ackto >> 3;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun done:
504*4882a593Smuzhiyun 	qp_attr->cur_qp_state	     = qp_attr->qp_state;
505*4882a593Smuzhiyun 	qp_attr->cap.max_send_wr     = qp->sq.max;
506*4882a593Smuzhiyun 	qp_attr->cap.max_recv_wr     = qp->rq.max;
507*4882a593Smuzhiyun 	qp_attr->cap.max_send_sge    = qp->sq.max_gs;
508*4882a593Smuzhiyun 	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
509*4882a593Smuzhiyun 	qp_attr->cap.max_inline_data = qp->max_inline_data;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	qp_init_attr->cap	     = qp_attr->cap;
512*4882a593Smuzhiyun 	qp_init_attr->sq_sig_type    = qp->sq_policy;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun out_mailbox:
515*4882a593Smuzhiyun 	mthca_free_mailbox(dev, mailbox);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun out:
518*4882a593Smuzhiyun 	mutex_unlock(&qp->mutex);
519*4882a593Smuzhiyun 	return err;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
mthca_path_set(struct mthca_dev * dev,const struct rdma_ah_attr * ah,struct mthca_qp_path * path,u8 port)522*4882a593Smuzhiyun static int mthca_path_set(struct mthca_dev *dev, const struct rdma_ah_attr *ah,
523*4882a593Smuzhiyun 			  struct mthca_qp_path *path, u8 port)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	path->g_mylmc     = rdma_ah_get_path_bits(ah) & 0x7f;
526*4882a593Smuzhiyun 	path->rlid        = cpu_to_be16(rdma_ah_get_dlid(ah));
527*4882a593Smuzhiyun 	path->static_rate = mthca_get_rate(dev, rdma_ah_get_static_rate(ah),
528*4882a593Smuzhiyun 					   port);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) {
531*4882a593Smuzhiyun 		const struct ib_global_route *grh = rdma_ah_read_grh(ah);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 		if (grh->sgid_index >= dev->limits.gid_table_len) {
534*4882a593Smuzhiyun 			mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
535*4882a593Smuzhiyun 				  grh->sgid_index,
536*4882a593Smuzhiyun 				  dev->limits.gid_table_len - 1);
537*4882a593Smuzhiyun 			return -1;
538*4882a593Smuzhiyun 		}
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 		path->g_mylmc   |= 1 << 7;
541*4882a593Smuzhiyun 		path->mgid_index = grh->sgid_index;
542*4882a593Smuzhiyun 		path->hop_limit  = grh->hop_limit;
543*4882a593Smuzhiyun 		path->sl_tclass_flowlabel =
544*4882a593Smuzhiyun 			cpu_to_be32((rdma_ah_get_sl(ah) << 28) |
545*4882a593Smuzhiyun 				    (grh->traffic_class << 20) |
546*4882a593Smuzhiyun 				    (grh->flow_label));
547*4882a593Smuzhiyun 		memcpy(path->rgid, grh->dgid.raw, 16);
548*4882a593Smuzhiyun 	} else {
549*4882a593Smuzhiyun 		path->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah) <<
550*4882a593Smuzhiyun 							28);
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	return 0;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
__mthca_modify_qp(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state,struct ib_udata * udata)556*4882a593Smuzhiyun static int __mthca_modify_qp(struct ib_qp *ibqp,
557*4882a593Smuzhiyun 			     const struct ib_qp_attr *attr, int attr_mask,
558*4882a593Smuzhiyun 			     enum ib_qp_state cur_state,
559*4882a593Smuzhiyun 			     enum ib_qp_state new_state,
560*4882a593Smuzhiyun 			     struct ib_udata *udata)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct mthca_dev *dev = to_mdev(ibqp->device);
563*4882a593Smuzhiyun 	struct mthca_qp *qp = to_mqp(ibqp);
564*4882a593Smuzhiyun 	struct mthca_ucontext *context = rdma_udata_to_drv_context(
565*4882a593Smuzhiyun 		udata, struct mthca_ucontext, ibucontext);
566*4882a593Smuzhiyun 	struct mthca_mailbox *mailbox;
567*4882a593Smuzhiyun 	struct mthca_qp_param *qp_param;
568*4882a593Smuzhiyun 	struct mthca_qp_context *qp_context;
569*4882a593Smuzhiyun 	u32 sqd_event = 0;
570*4882a593Smuzhiyun 	int err = -EINVAL;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
573*4882a593Smuzhiyun 	if (IS_ERR(mailbox)) {
574*4882a593Smuzhiyun 		err = PTR_ERR(mailbox);
575*4882a593Smuzhiyun 		goto out;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 	qp_param = mailbox->buf;
578*4882a593Smuzhiyun 	qp_context = &qp_param->context;
579*4882a593Smuzhiyun 	memset(qp_param, 0, sizeof *qp_param);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	qp_context->flags      = cpu_to_be32((to_mthca_state(new_state) << 28) |
582*4882a593Smuzhiyun 					     (to_mthca_st(qp->transport) << 16));
583*4882a593Smuzhiyun 	qp_context->flags     |= cpu_to_be32(MTHCA_QP_BIT_DE);
584*4882a593Smuzhiyun 	if (!(attr_mask & IB_QP_PATH_MIG_STATE))
585*4882a593Smuzhiyun 		qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
586*4882a593Smuzhiyun 	else {
587*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
588*4882a593Smuzhiyun 		switch (attr->path_mig_state) {
589*4882a593Smuzhiyun 		case IB_MIG_MIGRATED:
590*4882a593Smuzhiyun 			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
591*4882a593Smuzhiyun 			break;
592*4882a593Smuzhiyun 		case IB_MIG_REARM:
593*4882a593Smuzhiyun 			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
594*4882a593Smuzhiyun 			break;
595*4882a593Smuzhiyun 		case IB_MIG_ARMED:
596*4882a593Smuzhiyun 			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
597*4882a593Smuzhiyun 			break;
598*4882a593Smuzhiyun 		}
599*4882a593Smuzhiyun 	}
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	/* leave tavor_sched_queue as 0 */
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	if (qp->transport == MLX || qp->transport == UD)
604*4882a593Smuzhiyun 		qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
605*4882a593Smuzhiyun 	else if (attr_mask & IB_QP_PATH_MTU) {
606*4882a593Smuzhiyun 		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
607*4882a593Smuzhiyun 			mthca_dbg(dev, "path MTU (%u) is invalid\n",
608*4882a593Smuzhiyun 				  attr->path_mtu);
609*4882a593Smuzhiyun 			goto out_mailbox;
610*4882a593Smuzhiyun 		}
611*4882a593Smuzhiyun 		qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
612*4882a593Smuzhiyun 	}
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	if (mthca_is_memfree(dev)) {
615*4882a593Smuzhiyun 		if (qp->rq.max)
616*4882a593Smuzhiyun 			qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
617*4882a593Smuzhiyun 		qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 		if (qp->sq.max)
620*4882a593Smuzhiyun 			qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
621*4882a593Smuzhiyun 		qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	/* leave arbel_sched_queue as 0 */
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	if (qp->ibqp.uobject)
627*4882a593Smuzhiyun 		qp_context->usr_page = cpu_to_be32(context->uar.index);
628*4882a593Smuzhiyun 	else
629*4882a593Smuzhiyun 		qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
630*4882a593Smuzhiyun 	qp_context->local_qpn  = cpu_to_be32(qp->qpn);
631*4882a593Smuzhiyun 	if (attr_mask & IB_QP_DEST_QPN) {
632*4882a593Smuzhiyun 		qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	if (qp->transport == MLX)
636*4882a593Smuzhiyun 		qp_context->pri_path.port_pkey |=
637*4882a593Smuzhiyun 			cpu_to_be32(qp->port << 24);
638*4882a593Smuzhiyun 	else {
639*4882a593Smuzhiyun 		if (attr_mask & IB_QP_PORT) {
640*4882a593Smuzhiyun 			qp_context->pri_path.port_pkey |=
641*4882a593Smuzhiyun 				cpu_to_be32(attr->port_num << 24);
642*4882a593Smuzhiyun 			qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
643*4882a593Smuzhiyun 		}
644*4882a593Smuzhiyun 	}
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	if (attr_mask & IB_QP_PKEY_INDEX) {
647*4882a593Smuzhiyun 		qp_context->pri_path.port_pkey |=
648*4882a593Smuzhiyun 			cpu_to_be32(attr->pkey_index);
649*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	if (attr_mask & IB_QP_RNR_RETRY) {
653*4882a593Smuzhiyun 		qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
654*4882a593Smuzhiyun 			attr->rnr_retry << 5;
655*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
656*4882a593Smuzhiyun 							MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (attr_mask & IB_QP_AV) {
660*4882a593Smuzhiyun 		if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
661*4882a593Smuzhiyun 				   attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
662*4882a593Smuzhiyun 			goto out_mailbox;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
665*4882a593Smuzhiyun 	}
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	if (ibqp->qp_type == IB_QPT_RC &&
668*4882a593Smuzhiyun 	    cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
669*4882a593Smuzhiyun 		u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 		if (mthca_is_memfree(dev))
672*4882a593Smuzhiyun 			qp_context->rlkey_arbel_sched_queue |= sched_queue;
673*4882a593Smuzhiyun 		else
674*4882a593Smuzhiyun 			qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 		qp_param->opt_param_mask |=
677*4882a593Smuzhiyun 			cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	if (attr_mask & IB_QP_TIMEOUT) {
681*4882a593Smuzhiyun 		qp_context->pri_path.ackto = attr->timeout << 3;
682*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	if (attr_mask & IB_QP_ALT_PATH) {
686*4882a593Smuzhiyun 		if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
687*4882a593Smuzhiyun 			mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
688*4882a593Smuzhiyun 				  attr->alt_pkey_index, dev->limits.pkey_table_len-1);
689*4882a593Smuzhiyun 			goto out_mailbox;
690*4882a593Smuzhiyun 		}
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 		if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
693*4882a593Smuzhiyun 			mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
694*4882a593Smuzhiyun 				attr->alt_port_num);
695*4882a593Smuzhiyun 			goto out_mailbox;
696*4882a593Smuzhiyun 		}
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 		if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
699*4882a593Smuzhiyun 				   rdma_ah_get_port_num(&attr->alt_ah_attr)))
700*4882a593Smuzhiyun 			goto out_mailbox;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 		qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
703*4882a593Smuzhiyun 							      attr->alt_port_num << 24);
704*4882a593Smuzhiyun 		qp_context->alt_path.ackto = attr->alt_timeout << 3;
705*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
706*4882a593Smuzhiyun 	}
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	/* leave rdd as 0 */
709*4882a593Smuzhiyun 	qp_context->pd         = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
710*4882a593Smuzhiyun 	/* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
711*4882a593Smuzhiyun 	qp_context->wqe_lkey   = cpu_to_be32(qp->mr.ibmr.lkey);
712*4882a593Smuzhiyun 	qp_context->params1    = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
713*4882a593Smuzhiyun 					     (MTHCA_FLIGHT_LIMIT << 24) |
714*4882a593Smuzhiyun 					     MTHCA_QP_BIT_SWE);
715*4882a593Smuzhiyun 	if (qp->sq_policy == IB_SIGNAL_ALL_WR)
716*4882a593Smuzhiyun 		qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
717*4882a593Smuzhiyun 	if (attr_mask & IB_QP_RETRY_CNT) {
718*4882a593Smuzhiyun 		qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
719*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
720*4882a593Smuzhiyun 	}
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
723*4882a593Smuzhiyun 		if (attr->max_rd_atomic) {
724*4882a593Smuzhiyun 			qp_context->params1 |=
725*4882a593Smuzhiyun 				cpu_to_be32(MTHCA_QP_BIT_SRE |
726*4882a593Smuzhiyun 					    MTHCA_QP_BIT_SAE);
727*4882a593Smuzhiyun 			qp_context->params1 |=
728*4882a593Smuzhiyun 				cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
729*4882a593Smuzhiyun 		}
730*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
731*4882a593Smuzhiyun 	}
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	if (attr_mask & IB_QP_SQ_PSN)
734*4882a593Smuzhiyun 		qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
735*4882a593Smuzhiyun 	qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	if (mthca_is_memfree(dev)) {
738*4882a593Smuzhiyun 		qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
739*4882a593Smuzhiyun 		qp_context->snd_db_index   = cpu_to_be32(qp->sq.db_index);
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
743*4882a593Smuzhiyun 		if (attr->max_dest_rd_atomic)
744*4882a593Smuzhiyun 			qp_context->params2 |=
745*4882a593Smuzhiyun 				cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
748*4882a593Smuzhiyun 	}
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
751*4882a593Smuzhiyun 		qp_context->params2      |= get_hw_access_flags(qp, attr, attr_mask);
752*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
753*4882a593Smuzhiyun 							MTHCA_QP_OPTPAR_RRE |
754*4882a593Smuzhiyun 							MTHCA_QP_OPTPAR_RAE);
755*4882a593Smuzhiyun 	}
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	if (ibqp->srq)
760*4882a593Smuzhiyun 		qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
763*4882a593Smuzhiyun 		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
764*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
765*4882a593Smuzhiyun 	}
766*4882a593Smuzhiyun 	if (attr_mask & IB_QP_RQ_PSN)
767*4882a593Smuzhiyun 		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	qp_context->ra_buff_indx =
770*4882a593Smuzhiyun 		cpu_to_be32(dev->qp_table.rdb_base +
771*4882a593Smuzhiyun 			    ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
772*4882a593Smuzhiyun 			     dev->qp_table.rdb_shift));
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	if (mthca_is_memfree(dev))
777*4882a593Smuzhiyun 		qp_context->rcv_db_index   = cpu_to_be32(qp->rq.db_index);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (attr_mask & IB_QP_QKEY) {
780*4882a593Smuzhiyun 		qp_context->qkey = cpu_to_be32(attr->qkey);
781*4882a593Smuzhiyun 		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	if (ibqp->srq)
785*4882a593Smuzhiyun 		qp_context->srqn = cpu_to_be32(1 << 24 |
786*4882a593Smuzhiyun 					       to_msrq(ibqp->srq)->srqn);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD	&&
789*4882a593Smuzhiyun 	    attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY		&&
790*4882a593Smuzhiyun 	    attr->en_sqd_async_notify)
791*4882a593Smuzhiyun 		sqd_event = 1 << 31;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
794*4882a593Smuzhiyun 			      mailbox, sqd_event);
795*4882a593Smuzhiyun 	if (err) {
796*4882a593Smuzhiyun 		mthca_warn(dev, "modify QP %d->%d returned %d.\n",
797*4882a593Smuzhiyun 			   cur_state, new_state, err);
798*4882a593Smuzhiyun 		goto out_mailbox;
799*4882a593Smuzhiyun 	}
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	qp->state = new_state;
802*4882a593Smuzhiyun 	if (attr_mask & IB_QP_ACCESS_FLAGS)
803*4882a593Smuzhiyun 		qp->atomic_rd_en = attr->qp_access_flags;
804*4882a593Smuzhiyun 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
805*4882a593Smuzhiyun 		qp->resp_depth = attr->max_dest_rd_atomic;
806*4882a593Smuzhiyun 	if (attr_mask & IB_QP_PORT)
807*4882a593Smuzhiyun 		qp->port = attr->port_num;
808*4882a593Smuzhiyun 	if (attr_mask & IB_QP_ALT_PATH)
809*4882a593Smuzhiyun 		qp->alt_port = attr->alt_port_num;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	if (is_sqp(dev, qp))
812*4882a593Smuzhiyun 		store_attrs(qp->sqp, attr, attr_mask);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	/*
815*4882a593Smuzhiyun 	 * If we moved QP0 to RTR, bring the IB link up; if we moved
816*4882a593Smuzhiyun 	 * QP0 to RESET or ERROR, bring the link back down.
817*4882a593Smuzhiyun 	 */
818*4882a593Smuzhiyun 	if (is_qp0(dev, qp)) {
819*4882a593Smuzhiyun 		if (cur_state != IB_QPS_RTR &&
820*4882a593Smuzhiyun 		    new_state == IB_QPS_RTR)
821*4882a593Smuzhiyun 			init_port(dev, qp->port);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 		if (cur_state != IB_QPS_RESET &&
824*4882a593Smuzhiyun 		    cur_state != IB_QPS_ERR &&
825*4882a593Smuzhiyun 		    (new_state == IB_QPS_RESET ||
826*4882a593Smuzhiyun 		     new_state == IB_QPS_ERR))
827*4882a593Smuzhiyun 			mthca_CLOSE_IB(dev, qp->port);
828*4882a593Smuzhiyun 	}
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	/*
831*4882a593Smuzhiyun 	 * If we moved a kernel QP to RESET, clean up all old CQ
832*4882a593Smuzhiyun 	 * entries and reinitialize the QP.
833*4882a593Smuzhiyun 	 */
834*4882a593Smuzhiyun 	if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
835*4882a593Smuzhiyun 		mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
836*4882a593Smuzhiyun 			       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
837*4882a593Smuzhiyun 		if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
838*4882a593Smuzhiyun 			mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 		mthca_wq_reset(&qp->sq);
841*4882a593Smuzhiyun 		qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 		mthca_wq_reset(&qp->rq);
844*4882a593Smuzhiyun 		qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		if (mthca_is_memfree(dev)) {
847*4882a593Smuzhiyun 			*qp->sq.db = 0;
848*4882a593Smuzhiyun 			*qp->rq.db = 0;
849*4882a593Smuzhiyun 		}
850*4882a593Smuzhiyun 	}
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun out_mailbox:
853*4882a593Smuzhiyun 	mthca_free_mailbox(dev, mailbox);
854*4882a593Smuzhiyun out:
855*4882a593Smuzhiyun 	return err;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun 
mthca_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)858*4882a593Smuzhiyun int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
859*4882a593Smuzhiyun 		    struct ib_udata *udata)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun 	struct mthca_dev *dev = to_mdev(ibqp->device);
862*4882a593Smuzhiyun 	struct mthca_qp *qp = to_mqp(ibqp);
863*4882a593Smuzhiyun 	enum ib_qp_state cur_state, new_state;
864*4882a593Smuzhiyun 	int err = -EINVAL;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	mutex_lock(&qp->mutex);
867*4882a593Smuzhiyun 	if (attr_mask & IB_QP_CUR_STATE) {
868*4882a593Smuzhiyun 		cur_state = attr->cur_qp_state;
869*4882a593Smuzhiyun 	} else {
870*4882a593Smuzhiyun 		spin_lock_irq(&qp->sq.lock);
871*4882a593Smuzhiyun 		spin_lock(&qp->rq.lock);
872*4882a593Smuzhiyun 		cur_state = qp->state;
873*4882a593Smuzhiyun 		spin_unlock(&qp->rq.lock);
874*4882a593Smuzhiyun 		spin_unlock_irq(&qp->sq.lock);
875*4882a593Smuzhiyun 	}
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
880*4882a593Smuzhiyun 				attr_mask)) {
881*4882a593Smuzhiyun 		mthca_dbg(dev, "Bad QP transition (transport %d) "
882*4882a593Smuzhiyun 			  "%d->%d with attr 0x%08x\n",
883*4882a593Smuzhiyun 			  qp->transport, cur_state, new_state,
884*4882a593Smuzhiyun 			  attr_mask);
885*4882a593Smuzhiyun 		goto out;
886*4882a593Smuzhiyun 	}
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	if ((attr_mask & IB_QP_PKEY_INDEX) &&
889*4882a593Smuzhiyun 	     attr->pkey_index >= dev->limits.pkey_table_len) {
890*4882a593Smuzhiyun 		mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
891*4882a593Smuzhiyun 			  attr->pkey_index, dev->limits.pkey_table_len-1);
892*4882a593Smuzhiyun 		goto out;
893*4882a593Smuzhiyun 	}
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	if ((attr_mask & IB_QP_PORT) &&
896*4882a593Smuzhiyun 	    (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
897*4882a593Smuzhiyun 		mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
898*4882a593Smuzhiyun 		goto out;
899*4882a593Smuzhiyun 	}
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
902*4882a593Smuzhiyun 	    attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
903*4882a593Smuzhiyun 		mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
904*4882a593Smuzhiyun 			  attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
905*4882a593Smuzhiyun 		goto out;
906*4882a593Smuzhiyun 	}
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
909*4882a593Smuzhiyun 	    attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
910*4882a593Smuzhiyun 		mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
911*4882a593Smuzhiyun 			  attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
912*4882a593Smuzhiyun 		goto out;
913*4882a593Smuzhiyun 	}
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	if (cur_state == new_state && cur_state == IB_QPS_RESET) {
916*4882a593Smuzhiyun 		err = 0;
917*4882a593Smuzhiyun 		goto out;
918*4882a593Smuzhiyun 	}
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state,
921*4882a593Smuzhiyun 				udata);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun out:
924*4882a593Smuzhiyun 	mutex_unlock(&qp->mutex);
925*4882a593Smuzhiyun 	return err;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun 
mthca_max_data_size(struct mthca_dev * dev,struct mthca_qp * qp,int desc_sz)928*4882a593Smuzhiyun static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun 	/*
931*4882a593Smuzhiyun 	 * Calculate the maximum size of WQE s/g segments, excluding
932*4882a593Smuzhiyun 	 * the next segment and other non-data segments.
933*4882a593Smuzhiyun 	 */
934*4882a593Smuzhiyun 	int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	switch (qp->transport) {
937*4882a593Smuzhiyun 	case MLX:
938*4882a593Smuzhiyun 		max_data_size -= 2 * sizeof (struct mthca_data_seg);
939*4882a593Smuzhiyun 		break;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	case UD:
942*4882a593Smuzhiyun 		if (mthca_is_memfree(dev))
943*4882a593Smuzhiyun 			max_data_size -= sizeof (struct mthca_arbel_ud_seg);
944*4882a593Smuzhiyun 		else
945*4882a593Smuzhiyun 			max_data_size -= sizeof (struct mthca_tavor_ud_seg);
946*4882a593Smuzhiyun 		break;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	default:
949*4882a593Smuzhiyun 		max_data_size -= sizeof (struct mthca_raddr_seg);
950*4882a593Smuzhiyun 		break;
951*4882a593Smuzhiyun 	}
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	return max_data_size;
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun 
mthca_max_inline_data(struct mthca_pd * pd,int max_data_size)956*4882a593Smuzhiyun static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun 	/* We don't support inline data for kernel QPs (yet). */
959*4882a593Smuzhiyun 	return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun 
mthca_adjust_qp_caps(struct mthca_dev * dev,struct mthca_pd * pd,struct mthca_qp * qp)962*4882a593Smuzhiyun static void mthca_adjust_qp_caps(struct mthca_dev *dev,
963*4882a593Smuzhiyun 				 struct mthca_pd *pd,
964*4882a593Smuzhiyun 				 struct mthca_qp *qp)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	int max_data_size = mthca_max_data_size(dev, qp,
967*4882a593Smuzhiyun 						min(dev->limits.max_desc_sz,
968*4882a593Smuzhiyun 						    1 << qp->sq.wqe_shift));
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	qp->sq.max_gs = min_t(int, dev->limits.max_sg,
973*4882a593Smuzhiyun 			      max_data_size / sizeof (struct mthca_data_seg));
974*4882a593Smuzhiyun 	qp->rq.max_gs = min_t(int, dev->limits.max_sg,
975*4882a593Smuzhiyun 			       (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
976*4882a593Smuzhiyun 				sizeof (struct mthca_next_seg)) /
977*4882a593Smuzhiyun 			       sizeof (struct mthca_data_seg));
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun /*
981*4882a593Smuzhiyun  * Allocate and register buffer for WQEs.  qp->rq.max, sq.max,
982*4882a593Smuzhiyun  * rq.max_gs and sq.max_gs must all be assigned.
983*4882a593Smuzhiyun  * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
984*4882a593Smuzhiyun  * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
985*4882a593Smuzhiyun  * queue)
986*4882a593Smuzhiyun  */
mthca_alloc_wqe_buf(struct mthca_dev * dev,struct mthca_pd * pd,struct mthca_qp * qp,struct ib_udata * udata)987*4882a593Smuzhiyun static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
988*4882a593Smuzhiyun 			       struct mthca_pd *pd,
989*4882a593Smuzhiyun 			       struct mthca_qp *qp,
990*4882a593Smuzhiyun 			       struct ib_udata *udata)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	int size;
993*4882a593Smuzhiyun 	int err = -ENOMEM;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	size = sizeof (struct mthca_next_seg) +
996*4882a593Smuzhiyun 		qp->rq.max_gs * sizeof (struct mthca_data_seg);
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	if (size > dev->limits.max_desc_sz)
999*4882a593Smuzhiyun 		return -EINVAL;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
1002*4882a593Smuzhiyun 	     qp->rq.wqe_shift++)
1003*4882a593Smuzhiyun 		; /* nothing */
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
1006*4882a593Smuzhiyun 	switch (qp->transport) {
1007*4882a593Smuzhiyun 	case MLX:
1008*4882a593Smuzhiyun 		size += 2 * sizeof (struct mthca_data_seg);
1009*4882a593Smuzhiyun 		break;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	case UD:
1012*4882a593Smuzhiyun 		size += mthca_is_memfree(dev) ?
1013*4882a593Smuzhiyun 			sizeof (struct mthca_arbel_ud_seg) :
1014*4882a593Smuzhiyun 			sizeof (struct mthca_tavor_ud_seg);
1015*4882a593Smuzhiyun 		break;
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	case UC:
1018*4882a593Smuzhiyun 		size += sizeof (struct mthca_raddr_seg);
1019*4882a593Smuzhiyun 		break;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	case RC:
1022*4882a593Smuzhiyun 		size += sizeof (struct mthca_raddr_seg);
1023*4882a593Smuzhiyun 		/*
1024*4882a593Smuzhiyun 		 * An atomic op will require an atomic segment, a
1025*4882a593Smuzhiyun 		 * remote address segment and one scatter entry.
1026*4882a593Smuzhiyun 		 */
1027*4882a593Smuzhiyun 		size = max_t(int, size,
1028*4882a593Smuzhiyun 			     sizeof (struct mthca_atomic_seg) +
1029*4882a593Smuzhiyun 			     sizeof (struct mthca_raddr_seg) +
1030*4882a593Smuzhiyun 			     sizeof (struct mthca_data_seg));
1031*4882a593Smuzhiyun 		break;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	default:
1034*4882a593Smuzhiyun 		break;
1035*4882a593Smuzhiyun 	}
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	/* Make sure that we have enough space for a bind request */
1038*4882a593Smuzhiyun 	size = max_t(int, size, sizeof (struct mthca_bind_seg));
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	size += sizeof (struct mthca_next_seg);
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	if (size > dev->limits.max_desc_sz)
1043*4882a593Smuzhiyun 		return -EINVAL;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
1046*4882a593Smuzhiyun 	     qp->sq.wqe_shift++)
1047*4882a593Smuzhiyun 		; /* nothing */
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
1050*4882a593Smuzhiyun 				    1 << qp->sq.wqe_shift);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	/*
1053*4882a593Smuzhiyun 	 * If this is a userspace QP, we don't actually have to
1054*4882a593Smuzhiyun 	 * allocate anything.  All we need is to calculate the WQE
1055*4882a593Smuzhiyun 	 * sizes and the send_wqe_offset, so we're done now.
1056*4882a593Smuzhiyun 	 */
1057*4882a593Smuzhiyun 	if (udata)
1058*4882a593Smuzhiyun 		return 0;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	size = PAGE_ALIGN(qp->send_wqe_offset +
1061*4882a593Smuzhiyun 			  (qp->sq.max << qp->sq.wqe_shift));
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	qp->wrid = kmalloc_array(qp->rq.max + qp->sq.max, sizeof(u64),
1064*4882a593Smuzhiyun 				 GFP_KERNEL);
1065*4882a593Smuzhiyun 	if (!qp->wrid)
1066*4882a593Smuzhiyun 		goto err_out;
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
1069*4882a593Smuzhiyun 			      &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
1070*4882a593Smuzhiyun 	if (err)
1071*4882a593Smuzhiyun 		goto err_out;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	return 0;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun err_out:
1076*4882a593Smuzhiyun 	kfree(qp->wrid);
1077*4882a593Smuzhiyun 	return err;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
mthca_free_wqe_buf(struct mthca_dev * dev,struct mthca_qp * qp)1080*4882a593Smuzhiyun static void mthca_free_wqe_buf(struct mthca_dev *dev,
1081*4882a593Smuzhiyun 			       struct mthca_qp *qp)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1084*4882a593Smuzhiyun 				       (qp->sq.max << qp->sq.wqe_shift)),
1085*4882a593Smuzhiyun 		       &qp->queue, qp->is_direct, &qp->mr);
1086*4882a593Smuzhiyun 	kfree(qp->wrid);
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun 
mthca_map_memfree(struct mthca_dev * dev,struct mthca_qp * qp)1089*4882a593Smuzhiyun static int mthca_map_memfree(struct mthca_dev *dev,
1090*4882a593Smuzhiyun 			     struct mthca_qp *qp)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun 	int ret;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	if (mthca_is_memfree(dev)) {
1095*4882a593Smuzhiyun 		ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1096*4882a593Smuzhiyun 		if (ret)
1097*4882a593Smuzhiyun 			return ret;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 		ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1100*4882a593Smuzhiyun 		if (ret)
1101*4882a593Smuzhiyun 			goto err_qpc;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 		ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1104*4882a593Smuzhiyun 				      qp->qpn << dev->qp_table.rdb_shift);
1105*4882a593Smuzhiyun 		if (ret)
1106*4882a593Smuzhiyun 			goto err_eqpc;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	}
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	return 0;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun err_eqpc:
1113*4882a593Smuzhiyun 	mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun err_qpc:
1116*4882a593Smuzhiyun 	mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	return ret;
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun 
mthca_unmap_memfree(struct mthca_dev * dev,struct mthca_qp * qp)1121*4882a593Smuzhiyun static void mthca_unmap_memfree(struct mthca_dev *dev,
1122*4882a593Smuzhiyun 				struct mthca_qp *qp)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun 	mthca_table_put(dev, dev->qp_table.rdb_table,
1125*4882a593Smuzhiyun 			qp->qpn << dev->qp_table.rdb_shift);
1126*4882a593Smuzhiyun 	mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1127*4882a593Smuzhiyun 	mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun 
mthca_alloc_memfree(struct mthca_dev * dev,struct mthca_qp * qp)1130*4882a593Smuzhiyun static int mthca_alloc_memfree(struct mthca_dev *dev,
1131*4882a593Smuzhiyun 			       struct mthca_qp *qp)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun 	if (mthca_is_memfree(dev)) {
1134*4882a593Smuzhiyun 		qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1135*4882a593Smuzhiyun 						 qp->qpn, &qp->rq.db);
1136*4882a593Smuzhiyun 		if (qp->rq.db_index < 0)
1137*4882a593Smuzhiyun 			return -ENOMEM;
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 		qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1140*4882a593Smuzhiyun 						 qp->qpn, &qp->sq.db);
1141*4882a593Smuzhiyun 		if (qp->sq.db_index < 0) {
1142*4882a593Smuzhiyun 			mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1143*4882a593Smuzhiyun 			return -ENOMEM;
1144*4882a593Smuzhiyun 		}
1145*4882a593Smuzhiyun 	}
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	return 0;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun 
mthca_free_memfree(struct mthca_dev * dev,struct mthca_qp * qp)1150*4882a593Smuzhiyun static void mthca_free_memfree(struct mthca_dev *dev,
1151*4882a593Smuzhiyun 			       struct mthca_qp *qp)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun 	if (mthca_is_memfree(dev)) {
1154*4882a593Smuzhiyun 		mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1155*4882a593Smuzhiyun 		mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1156*4882a593Smuzhiyun 	}
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun 
mthca_alloc_qp_common(struct mthca_dev * dev,struct mthca_pd * pd,struct mthca_cq * send_cq,struct mthca_cq * recv_cq,enum ib_sig_type send_policy,struct mthca_qp * qp,struct ib_udata * udata)1159*4882a593Smuzhiyun static int mthca_alloc_qp_common(struct mthca_dev *dev,
1160*4882a593Smuzhiyun 				 struct mthca_pd *pd,
1161*4882a593Smuzhiyun 				 struct mthca_cq *send_cq,
1162*4882a593Smuzhiyun 				 struct mthca_cq *recv_cq,
1163*4882a593Smuzhiyun 				 enum ib_sig_type send_policy,
1164*4882a593Smuzhiyun 				 struct mthca_qp *qp,
1165*4882a593Smuzhiyun 				 struct ib_udata *udata)
1166*4882a593Smuzhiyun {
1167*4882a593Smuzhiyun 	int ret;
1168*4882a593Smuzhiyun 	int i;
1169*4882a593Smuzhiyun 	struct mthca_next_seg *next;
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	qp->refcount = 1;
1172*4882a593Smuzhiyun 	init_waitqueue_head(&qp->wait);
1173*4882a593Smuzhiyun 	mutex_init(&qp->mutex);
1174*4882a593Smuzhiyun 	qp->state    	 = IB_QPS_RESET;
1175*4882a593Smuzhiyun 	qp->atomic_rd_en = 0;
1176*4882a593Smuzhiyun 	qp->resp_depth   = 0;
1177*4882a593Smuzhiyun 	qp->sq_policy    = send_policy;
1178*4882a593Smuzhiyun 	mthca_wq_reset(&qp->sq);
1179*4882a593Smuzhiyun 	mthca_wq_reset(&qp->rq);
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	spin_lock_init(&qp->sq.lock);
1182*4882a593Smuzhiyun 	spin_lock_init(&qp->rq.lock);
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	ret = mthca_map_memfree(dev, qp);
1185*4882a593Smuzhiyun 	if (ret)
1186*4882a593Smuzhiyun 		return ret;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	ret = mthca_alloc_wqe_buf(dev, pd, qp, udata);
1189*4882a593Smuzhiyun 	if (ret) {
1190*4882a593Smuzhiyun 		mthca_unmap_memfree(dev, qp);
1191*4882a593Smuzhiyun 		return ret;
1192*4882a593Smuzhiyun 	}
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	mthca_adjust_qp_caps(dev, pd, qp);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	/*
1197*4882a593Smuzhiyun 	 * If this is a userspace QP, we're done now.  The doorbells
1198*4882a593Smuzhiyun 	 * will be allocated and buffers will be initialized in
1199*4882a593Smuzhiyun 	 * userspace.
1200*4882a593Smuzhiyun 	 */
1201*4882a593Smuzhiyun 	if (udata)
1202*4882a593Smuzhiyun 		return 0;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	ret = mthca_alloc_memfree(dev, qp);
1205*4882a593Smuzhiyun 	if (ret) {
1206*4882a593Smuzhiyun 		mthca_free_wqe_buf(dev, qp);
1207*4882a593Smuzhiyun 		mthca_unmap_memfree(dev, qp);
1208*4882a593Smuzhiyun 		return ret;
1209*4882a593Smuzhiyun 	}
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	if (mthca_is_memfree(dev)) {
1212*4882a593Smuzhiyun 		struct mthca_data_seg *scatter;
1213*4882a593Smuzhiyun 		int size = (sizeof (struct mthca_next_seg) +
1214*4882a593Smuzhiyun 			    qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 		for (i = 0; i < qp->rq.max; ++i) {
1217*4882a593Smuzhiyun 			next = get_recv_wqe(qp, i);
1218*4882a593Smuzhiyun 			next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1219*4882a593Smuzhiyun 						   qp->rq.wqe_shift);
1220*4882a593Smuzhiyun 			next->ee_nds = cpu_to_be32(size);
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 			for (scatter = (void *) (next + 1);
1223*4882a593Smuzhiyun 			     (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1224*4882a593Smuzhiyun 			     ++scatter)
1225*4882a593Smuzhiyun 				scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1226*4882a593Smuzhiyun 		}
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 		for (i = 0; i < qp->sq.max; ++i) {
1229*4882a593Smuzhiyun 			next = get_send_wqe(qp, i);
1230*4882a593Smuzhiyun 			next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1231*4882a593Smuzhiyun 						    qp->sq.wqe_shift) +
1232*4882a593Smuzhiyun 						   qp->send_wqe_offset);
1233*4882a593Smuzhiyun 		}
1234*4882a593Smuzhiyun 	} else {
1235*4882a593Smuzhiyun 		for (i = 0; i < qp->rq.max; ++i) {
1236*4882a593Smuzhiyun 			next = get_recv_wqe(qp, i);
1237*4882a593Smuzhiyun 			next->nda_op = htonl((((i + 1) % qp->rq.max) <<
1238*4882a593Smuzhiyun 					      qp->rq.wqe_shift) | 1);
1239*4882a593Smuzhiyun 		}
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	}
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1244*4882a593Smuzhiyun 	qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	return 0;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun 
mthca_set_qp_size(struct mthca_dev * dev,struct ib_qp_cap * cap,struct mthca_pd * pd,struct mthca_qp * qp)1249*4882a593Smuzhiyun static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1250*4882a593Smuzhiyun 			     struct mthca_pd *pd, struct mthca_qp *qp)
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun 	int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	/* Sanity check QP size before proceeding */
1255*4882a593Smuzhiyun 	if (cap->max_send_wr  	 > dev->limits.max_wqes ||
1256*4882a593Smuzhiyun 	    cap->max_recv_wr  	 > dev->limits.max_wqes ||
1257*4882a593Smuzhiyun 	    cap->max_send_sge 	 > dev->limits.max_sg   ||
1258*4882a593Smuzhiyun 	    cap->max_recv_sge 	 > dev->limits.max_sg   ||
1259*4882a593Smuzhiyun 	    cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1260*4882a593Smuzhiyun 		return -EINVAL;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	/*
1263*4882a593Smuzhiyun 	 * For MLX transport we need 2 extra send gather entries:
1264*4882a593Smuzhiyun 	 * one for the header and one for the checksum at the end
1265*4882a593Smuzhiyun 	 */
1266*4882a593Smuzhiyun 	if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg)
1267*4882a593Smuzhiyun 		return -EINVAL;
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 	if (mthca_is_memfree(dev)) {
1270*4882a593Smuzhiyun 		qp->rq.max = cap->max_recv_wr ?
1271*4882a593Smuzhiyun 			roundup_pow_of_two(cap->max_recv_wr) : 0;
1272*4882a593Smuzhiyun 		qp->sq.max = cap->max_send_wr ?
1273*4882a593Smuzhiyun 			roundup_pow_of_two(cap->max_send_wr) : 0;
1274*4882a593Smuzhiyun 	} else {
1275*4882a593Smuzhiyun 		qp->rq.max = cap->max_recv_wr;
1276*4882a593Smuzhiyun 		qp->sq.max = cap->max_send_wr;
1277*4882a593Smuzhiyun 	}
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	qp->rq.max_gs = cap->max_recv_sge;
1280*4882a593Smuzhiyun 	qp->sq.max_gs = max_t(int, cap->max_send_sge,
1281*4882a593Smuzhiyun 			      ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1282*4882a593Smuzhiyun 				    MTHCA_INLINE_CHUNK_SIZE) /
1283*4882a593Smuzhiyun 			      sizeof (struct mthca_data_seg));
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	return 0;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun 
mthca_alloc_qp(struct mthca_dev * dev,struct mthca_pd * pd,struct mthca_cq * send_cq,struct mthca_cq * recv_cq,enum ib_qp_type type,enum ib_sig_type send_policy,struct ib_qp_cap * cap,struct mthca_qp * qp,struct ib_udata * udata)1288*4882a593Smuzhiyun int mthca_alloc_qp(struct mthca_dev *dev,
1289*4882a593Smuzhiyun 		   struct mthca_pd *pd,
1290*4882a593Smuzhiyun 		   struct mthca_cq *send_cq,
1291*4882a593Smuzhiyun 		   struct mthca_cq *recv_cq,
1292*4882a593Smuzhiyun 		   enum ib_qp_type type,
1293*4882a593Smuzhiyun 		   enum ib_sig_type send_policy,
1294*4882a593Smuzhiyun 		   struct ib_qp_cap *cap,
1295*4882a593Smuzhiyun 		   struct mthca_qp *qp,
1296*4882a593Smuzhiyun 		   struct ib_udata *udata)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun 	int err;
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	switch (type) {
1301*4882a593Smuzhiyun 	case IB_QPT_RC: qp->transport = RC; break;
1302*4882a593Smuzhiyun 	case IB_QPT_UC: qp->transport = UC; break;
1303*4882a593Smuzhiyun 	case IB_QPT_UD: qp->transport = UD; break;
1304*4882a593Smuzhiyun 	default: return -EINVAL;
1305*4882a593Smuzhiyun 	}
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	err = mthca_set_qp_size(dev, cap, pd, qp);
1308*4882a593Smuzhiyun 	if (err)
1309*4882a593Smuzhiyun 		return err;
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1312*4882a593Smuzhiyun 	if (qp->qpn == -1)
1313*4882a593Smuzhiyun 		return -ENOMEM;
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	/* initialize port to zero for error-catching. */
1316*4882a593Smuzhiyun 	qp->port = 0;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1319*4882a593Smuzhiyun 				    send_policy, qp, udata);
1320*4882a593Smuzhiyun 	if (err) {
1321*4882a593Smuzhiyun 		mthca_free(&dev->qp_table.alloc, qp->qpn);
1322*4882a593Smuzhiyun 		return err;
1323*4882a593Smuzhiyun 	}
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	spin_lock_irq(&dev->qp_table.lock);
1326*4882a593Smuzhiyun 	mthca_array_set(&dev->qp_table.qp,
1327*4882a593Smuzhiyun 			qp->qpn & (dev->limits.num_qps - 1), qp);
1328*4882a593Smuzhiyun 	spin_unlock_irq(&dev->qp_table.lock);
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	return 0;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun 
mthca_lock_cqs(struct mthca_cq * send_cq,struct mthca_cq * recv_cq)1333*4882a593Smuzhiyun static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1334*4882a593Smuzhiyun 	__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1335*4882a593Smuzhiyun {
1336*4882a593Smuzhiyun 	if (send_cq == recv_cq) {
1337*4882a593Smuzhiyun 		spin_lock_irq(&send_cq->lock);
1338*4882a593Smuzhiyun 		__acquire(&recv_cq->lock);
1339*4882a593Smuzhiyun 	} else if (send_cq->cqn < recv_cq->cqn) {
1340*4882a593Smuzhiyun 		spin_lock_irq(&send_cq->lock);
1341*4882a593Smuzhiyun 		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1342*4882a593Smuzhiyun 	} else {
1343*4882a593Smuzhiyun 		spin_lock_irq(&recv_cq->lock);
1344*4882a593Smuzhiyun 		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1345*4882a593Smuzhiyun 	}
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun 
mthca_unlock_cqs(struct mthca_cq * send_cq,struct mthca_cq * recv_cq)1348*4882a593Smuzhiyun static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1349*4882a593Smuzhiyun 	__releases(&send_cq->lock) __releases(&recv_cq->lock)
1350*4882a593Smuzhiyun {
1351*4882a593Smuzhiyun 	if (send_cq == recv_cq) {
1352*4882a593Smuzhiyun 		__release(&recv_cq->lock);
1353*4882a593Smuzhiyun 		spin_unlock_irq(&send_cq->lock);
1354*4882a593Smuzhiyun 	} else if (send_cq->cqn < recv_cq->cqn) {
1355*4882a593Smuzhiyun 		spin_unlock(&recv_cq->lock);
1356*4882a593Smuzhiyun 		spin_unlock_irq(&send_cq->lock);
1357*4882a593Smuzhiyun 	} else {
1358*4882a593Smuzhiyun 		spin_unlock(&send_cq->lock);
1359*4882a593Smuzhiyun 		spin_unlock_irq(&recv_cq->lock);
1360*4882a593Smuzhiyun 	}
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun 
mthca_alloc_sqp(struct mthca_dev * dev,struct mthca_pd * pd,struct mthca_cq * send_cq,struct mthca_cq * recv_cq,enum ib_sig_type send_policy,struct ib_qp_cap * cap,int qpn,int port,struct mthca_qp * qp,struct ib_udata * udata)1363*4882a593Smuzhiyun int mthca_alloc_sqp(struct mthca_dev *dev,
1364*4882a593Smuzhiyun 		    struct mthca_pd *pd,
1365*4882a593Smuzhiyun 		    struct mthca_cq *send_cq,
1366*4882a593Smuzhiyun 		    struct mthca_cq *recv_cq,
1367*4882a593Smuzhiyun 		    enum ib_sig_type send_policy,
1368*4882a593Smuzhiyun 		    struct ib_qp_cap *cap,
1369*4882a593Smuzhiyun 		    int qpn,
1370*4882a593Smuzhiyun 		    int port,
1371*4882a593Smuzhiyun 		    struct mthca_qp *qp,
1372*4882a593Smuzhiyun 		    struct ib_udata *udata)
1373*4882a593Smuzhiyun {
1374*4882a593Smuzhiyun 	u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1375*4882a593Smuzhiyun 	int err;
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	qp->transport = MLX;
1378*4882a593Smuzhiyun 	err = mthca_set_qp_size(dev, cap, pd, qp);
1379*4882a593Smuzhiyun 	if (err)
1380*4882a593Smuzhiyun 		return err;
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	qp->sqp->header_buf_size = qp->sq.max * MTHCA_UD_HEADER_SIZE;
1383*4882a593Smuzhiyun 	qp->sqp->header_buf =
1384*4882a593Smuzhiyun 		dma_alloc_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
1385*4882a593Smuzhiyun 				   &qp->sqp->header_dma, GFP_KERNEL);
1386*4882a593Smuzhiyun 	if (!qp->sqp->header_buf)
1387*4882a593Smuzhiyun 		return -ENOMEM;
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 	spin_lock_irq(&dev->qp_table.lock);
1390*4882a593Smuzhiyun 	if (mthca_array_get(&dev->qp_table.qp, mqpn))
1391*4882a593Smuzhiyun 		err = -EBUSY;
1392*4882a593Smuzhiyun 	else
1393*4882a593Smuzhiyun 		mthca_array_set(&dev->qp_table.qp, mqpn, qp->sqp);
1394*4882a593Smuzhiyun 	spin_unlock_irq(&dev->qp_table.lock);
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	if (err)
1397*4882a593Smuzhiyun 		goto err_out;
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	qp->port      = port;
1400*4882a593Smuzhiyun 	qp->qpn       = mqpn;
1401*4882a593Smuzhiyun 	qp->transport = MLX;
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1404*4882a593Smuzhiyun 				    send_policy, qp, udata);
1405*4882a593Smuzhiyun 	if (err)
1406*4882a593Smuzhiyun 		goto err_out_free;
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	atomic_inc(&pd->sqp_count);
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	return 0;
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun  err_out_free:
1413*4882a593Smuzhiyun 	/*
1414*4882a593Smuzhiyun 	 * Lock CQs here, so that CQ polling code can do QP lookup
1415*4882a593Smuzhiyun 	 * without taking a lock.
1416*4882a593Smuzhiyun 	 */
1417*4882a593Smuzhiyun 	mthca_lock_cqs(send_cq, recv_cq);
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	spin_lock(&dev->qp_table.lock);
1420*4882a593Smuzhiyun 	mthca_array_clear(&dev->qp_table.qp, mqpn);
1421*4882a593Smuzhiyun 	spin_unlock(&dev->qp_table.lock);
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	mthca_unlock_cqs(send_cq, recv_cq);
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun err_out:
1426*4882a593Smuzhiyun 	dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
1427*4882a593Smuzhiyun 			  qp->sqp->header_buf, qp->sqp->header_dma);
1428*4882a593Smuzhiyun 	return err;
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun 
get_qp_refcount(struct mthca_dev * dev,struct mthca_qp * qp)1431*4882a593Smuzhiyun static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1432*4882a593Smuzhiyun {
1433*4882a593Smuzhiyun 	int c;
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	spin_lock_irq(&dev->qp_table.lock);
1436*4882a593Smuzhiyun 	c = qp->refcount;
1437*4882a593Smuzhiyun 	spin_unlock_irq(&dev->qp_table.lock);
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	return c;
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun 
mthca_free_qp(struct mthca_dev * dev,struct mthca_qp * qp)1442*4882a593Smuzhiyun void mthca_free_qp(struct mthca_dev *dev,
1443*4882a593Smuzhiyun 		   struct mthca_qp *qp)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun 	struct mthca_cq *send_cq;
1446*4882a593Smuzhiyun 	struct mthca_cq *recv_cq;
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	send_cq = to_mcq(qp->ibqp.send_cq);
1449*4882a593Smuzhiyun 	recv_cq = to_mcq(qp->ibqp.recv_cq);
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	/*
1452*4882a593Smuzhiyun 	 * Lock CQs here, so that CQ polling code can do QP lookup
1453*4882a593Smuzhiyun 	 * without taking a lock.
1454*4882a593Smuzhiyun 	 */
1455*4882a593Smuzhiyun 	mthca_lock_cqs(send_cq, recv_cq);
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 	spin_lock(&dev->qp_table.lock);
1458*4882a593Smuzhiyun 	mthca_array_clear(&dev->qp_table.qp,
1459*4882a593Smuzhiyun 			  qp->qpn & (dev->limits.num_qps - 1));
1460*4882a593Smuzhiyun 	--qp->refcount;
1461*4882a593Smuzhiyun 	spin_unlock(&dev->qp_table.lock);
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	mthca_unlock_cqs(send_cq, recv_cq);
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	wait_event(qp->wait, !get_qp_refcount(dev, qp));
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	if (qp->state != IB_QPS_RESET)
1468*4882a593Smuzhiyun 		mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1469*4882a593Smuzhiyun 				NULL, 0);
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	/*
1472*4882a593Smuzhiyun 	 * If this is a userspace QP, the buffers, MR, CQs and so on
1473*4882a593Smuzhiyun 	 * will be cleaned up in userspace, so all we have to do is
1474*4882a593Smuzhiyun 	 * unref the mem-free tables and free the QPN in our table.
1475*4882a593Smuzhiyun 	 */
1476*4882a593Smuzhiyun 	if (!qp->ibqp.uobject) {
1477*4882a593Smuzhiyun 		mthca_cq_clean(dev, recv_cq, qp->qpn,
1478*4882a593Smuzhiyun 			       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1479*4882a593Smuzhiyun 		if (send_cq != recv_cq)
1480*4882a593Smuzhiyun 			mthca_cq_clean(dev, send_cq, qp->qpn, NULL);
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 		mthca_free_memfree(dev, qp);
1483*4882a593Smuzhiyun 		mthca_free_wqe_buf(dev, qp);
1484*4882a593Smuzhiyun 	}
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	mthca_unmap_memfree(dev, qp);
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	if (is_sqp(dev, qp)) {
1489*4882a593Smuzhiyun 		atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1490*4882a593Smuzhiyun 		dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size,
1491*4882a593Smuzhiyun 				  qp->sqp->header_buf, qp->sqp->header_dma);
1492*4882a593Smuzhiyun 	} else
1493*4882a593Smuzhiyun 		mthca_free(&dev->qp_table.alloc, qp->qpn);
1494*4882a593Smuzhiyun }
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun /* Create UD header for an MLX send and build a data segment for it */
build_mlx_header(struct mthca_dev * dev,struct mthca_qp * qp,int ind,const struct ib_ud_wr * wr,struct mthca_mlx_seg * mlx,struct mthca_data_seg * data)1497*4882a593Smuzhiyun static int build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind,
1498*4882a593Smuzhiyun 			    const struct ib_ud_wr *wr,
1499*4882a593Smuzhiyun 			    struct mthca_mlx_seg *mlx,
1500*4882a593Smuzhiyun 			    struct mthca_data_seg *data)
1501*4882a593Smuzhiyun {
1502*4882a593Smuzhiyun 	struct mthca_sqp *sqp = qp->sqp;
1503*4882a593Smuzhiyun 	int header_size;
1504*4882a593Smuzhiyun 	int err;
1505*4882a593Smuzhiyun 	u16 pkey;
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 	ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
1508*4882a593Smuzhiyun 			  mthca_ah_grh_present(to_mah(wr->ah)), 0, 0, 0,
1509*4882a593Smuzhiyun 			  &sqp->ud_header);
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 	err = mthca_read_ah(dev, to_mah(wr->ah), &sqp->ud_header);
1512*4882a593Smuzhiyun 	if (err)
1513*4882a593Smuzhiyun 		return err;
1514*4882a593Smuzhiyun 	mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1515*4882a593Smuzhiyun 	mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1516*4882a593Smuzhiyun 				  (sqp->ud_header.lrh.destination_lid ==
1517*4882a593Smuzhiyun 				   IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1518*4882a593Smuzhiyun 				  (sqp->ud_header.lrh.service_level << 8));
1519*4882a593Smuzhiyun 	mlx->rlid = sqp->ud_header.lrh.destination_lid;
1520*4882a593Smuzhiyun 	mlx->vcrc = 0;
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	switch (wr->wr.opcode) {
1523*4882a593Smuzhiyun 	case IB_WR_SEND:
1524*4882a593Smuzhiyun 		sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1525*4882a593Smuzhiyun 		sqp->ud_header.immediate_present = 0;
1526*4882a593Smuzhiyun 		break;
1527*4882a593Smuzhiyun 	case IB_WR_SEND_WITH_IMM:
1528*4882a593Smuzhiyun 		sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1529*4882a593Smuzhiyun 		sqp->ud_header.immediate_present = 1;
1530*4882a593Smuzhiyun 		sqp->ud_header.immediate_data = wr->wr.ex.imm_data;
1531*4882a593Smuzhiyun 		break;
1532*4882a593Smuzhiyun 	default:
1533*4882a593Smuzhiyun 		return -EINVAL;
1534*4882a593Smuzhiyun 	}
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 	sqp->ud_header.lrh.virtual_lane    = !qp->ibqp.qp_num ? 15 : 0;
1537*4882a593Smuzhiyun 	if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1538*4882a593Smuzhiyun 		sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1539*4882a593Smuzhiyun 	sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
1540*4882a593Smuzhiyun 	if (!qp->ibqp.qp_num)
1541*4882a593Smuzhiyun 		ib_get_cached_pkey(&dev->ib_dev, qp->port, sqp->pkey_index,
1542*4882a593Smuzhiyun 				   &pkey);
1543*4882a593Smuzhiyun 	else
1544*4882a593Smuzhiyun 		ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index,
1545*4882a593Smuzhiyun 				   &pkey);
1546*4882a593Smuzhiyun 	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1547*4882a593Smuzhiyun 	sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
1548*4882a593Smuzhiyun 	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1549*4882a593Smuzhiyun 	sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ?
1550*4882a593Smuzhiyun 					       sqp->qkey : wr->remote_qkey);
1551*4882a593Smuzhiyun 	sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num);
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	header_size = ib_ud_header_pack(&sqp->ud_header,
1554*4882a593Smuzhiyun 					sqp->header_buf +
1555*4882a593Smuzhiyun 					ind * MTHCA_UD_HEADER_SIZE);
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 	data->byte_count = cpu_to_be32(header_size);
1558*4882a593Smuzhiyun 	data->lkey       = cpu_to_be32(to_mpd(qp->ibqp.pd)->ntmr.ibmr.lkey);
1559*4882a593Smuzhiyun 	data->addr       = cpu_to_be64(sqp->header_dma +
1560*4882a593Smuzhiyun 				       ind * MTHCA_UD_HEADER_SIZE);
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 	return 0;
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun 
mthca_wq_overflow(struct mthca_wq * wq,int nreq,struct ib_cq * ib_cq)1565*4882a593Smuzhiyun static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1566*4882a593Smuzhiyun 				    struct ib_cq *ib_cq)
1567*4882a593Smuzhiyun {
1568*4882a593Smuzhiyun 	unsigned cur;
1569*4882a593Smuzhiyun 	struct mthca_cq *cq;
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	cur = wq->head - wq->tail;
1572*4882a593Smuzhiyun 	if (likely(cur + nreq < wq->max))
1573*4882a593Smuzhiyun 		return 0;
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 	cq = to_mcq(ib_cq);
1576*4882a593Smuzhiyun 	spin_lock(&cq->lock);
1577*4882a593Smuzhiyun 	cur = wq->head - wq->tail;
1578*4882a593Smuzhiyun 	spin_unlock(&cq->lock);
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	return cur + nreq >= wq->max;
1581*4882a593Smuzhiyun }
1582*4882a593Smuzhiyun 
set_raddr_seg(struct mthca_raddr_seg * rseg,u64 remote_addr,u32 rkey)1583*4882a593Smuzhiyun static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
1584*4882a593Smuzhiyun 					  u64 remote_addr, u32 rkey)
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun 	rseg->raddr    = cpu_to_be64(remote_addr);
1587*4882a593Smuzhiyun 	rseg->rkey     = cpu_to_be32(rkey);
1588*4882a593Smuzhiyun 	rseg->reserved = 0;
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun 
set_atomic_seg(struct mthca_atomic_seg * aseg,const struct ib_atomic_wr * wr)1591*4882a593Smuzhiyun static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
1592*4882a593Smuzhiyun 					   const struct ib_atomic_wr *wr)
1593*4882a593Smuzhiyun {
1594*4882a593Smuzhiyun 	if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1595*4882a593Smuzhiyun 		aseg->swap_add = cpu_to_be64(wr->swap);
1596*4882a593Smuzhiyun 		aseg->compare  = cpu_to_be64(wr->compare_add);
1597*4882a593Smuzhiyun 	} else {
1598*4882a593Smuzhiyun 		aseg->swap_add = cpu_to_be64(wr->compare_add);
1599*4882a593Smuzhiyun 		aseg->compare  = 0;
1600*4882a593Smuzhiyun 	}
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun 
set_tavor_ud_seg(struct mthca_tavor_ud_seg * useg,const struct ib_ud_wr * wr)1604*4882a593Smuzhiyun static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
1605*4882a593Smuzhiyun 			     const struct ib_ud_wr *wr)
1606*4882a593Smuzhiyun {
1607*4882a593Smuzhiyun 	useg->lkey    = cpu_to_be32(to_mah(wr->ah)->key);
1608*4882a593Smuzhiyun 	useg->av_addr =	cpu_to_be64(to_mah(wr->ah)->avdma);
1609*4882a593Smuzhiyun 	useg->dqpn    =	cpu_to_be32(wr->remote_qpn);
1610*4882a593Smuzhiyun 	useg->qkey    =	cpu_to_be32(wr->remote_qkey);
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun 
set_arbel_ud_seg(struct mthca_arbel_ud_seg * useg,const struct ib_ud_wr * wr)1614*4882a593Smuzhiyun static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
1615*4882a593Smuzhiyun 			     const struct ib_ud_wr *wr)
1616*4882a593Smuzhiyun {
1617*4882a593Smuzhiyun 	memcpy(useg->av, to_mah(wr->ah)->av, MTHCA_AV_SIZE);
1618*4882a593Smuzhiyun 	useg->dqpn = cpu_to_be32(wr->remote_qpn);
1619*4882a593Smuzhiyun 	useg->qkey = cpu_to_be32(wr->remote_qkey);
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun 
mthca_tavor_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)1622*4882a593Smuzhiyun int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1623*4882a593Smuzhiyun 			  const struct ib_send_wr **bad_wr)
1624*4882a593Smuzhiyun {
1625*4882a593Smuzhiyun 	struct mthca_dev *dev = to_mdev(ibqp->device);
1626*4882a593Smuzhiyun 	struct mthca_qp *qp = to_mqp(ibqp);
1627*4882a593Smuzhiyun 	void *wqe;
1628*4882a593Smuzhiyun 	void *prev_wqe;
1629*4882a593Smuzhiyun 	unsigned long flags;
1630*4882a593Smuzhiyun 	int err = 0;
1631*4882a593Smuzhiyun 	int nreq;
1632*4882a593Smuzhiyun 	int i;
1633*4882a593Smuzhiyun 	int size;
1634*4882a593Smuzhiyun 	/*
1635*4882a593Smuzhiyun 	 * f0 and size0 are only used if nreq != 0, and they will
1636*4882a593Smuzhiyun 	 * always be initialized the first time through the main loop
1637*4882a593Smuzhiyun 	 * before nreq is incremented.  So nreq cannot become non-zero
1638*4882a593Smuzhiyun 	 * without initializing f0 and size0, and they are in fact
1639*4882a593Smuzhiyun 	 * never used uninitialized.
1640*4882a593Smuzhiyun 	 */
1641*4882a593Smuzhiyun 	int size0;
1642*4882a593Smuzhiyun 	u32 f0;
1643*4882a593Smuzhiyun 	int ind;
1644*4882a593Smuzhiyun 	u8 op0 = 0;
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	spin_lock_irqsave(&qp->sq.lock, flags);
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	/* XXX check that state is OK to post send */
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	ind = qp->sq.next_ind;
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
1653*4882a593Smuzhiyun 		if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1654*4882a593Smuzhiyun 			mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1655*4882a593Smuzhiyun 					" %d max, %d nreq)\n", qp->qpn,
1656*4882a593Smuzhiyun 					qp->sq.head, qp->sq.tail,
1657*4882a593Smuzhiyun 					qp->sq.max, nreq);
1658*4882a593Smuzhiyun 			err = -ENOMEM;
1659*4882a593Smuzhiyun 			*bad_wr = wr;
1660*4882a593Smuzhiyun 			goto out;
1661*4882a593Smuzhiyun 		}
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 		wqe = get_send_wqe(qp, ind);
1664*4882a593Smuzhiyun 		prev_wqe = qp->sq.last;
1665*4882a593Smuzhiyun 		qp->sq.last = wqe;
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 		((struct mthca_next_seg *) wqe)->nda_op = 0;
1668*4882a593Smuzhiyun 		((struct mthca_next_seg *) wqe)->ee_nds = 0;
1669*4882a593Smuzhiyun 		((struct mthca_next_seg *) wqe)->flags =
1670*4882a593Smuzhiyun 			((wr->send_flags & IB_SEND_SIGNALED) ?
1671*4882a593Smuzhiyun 			 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1672*4882a593Smuzhiyun 			((wr->send_flags & IB_SEND_SOLICITED) ?
1673*4882a593Smuzhiyun 			 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
1674*4882a593Smuzhiyun 			cpu_to_be32(1);
1675*4882a593Smuzhiyun 		if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1676*4882a593Smuzhiyun 		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1677*4882a593Smuzhiyun 			((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 		wqe += sizeof (struct mthca_next_seg);
1680*4882a593Smuzhiyun 		size = sizeof (struct mthca_next_seg) / 16;
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 		switch (qp->transport) {
1683*4882a593Smuzhiyun 		case RC:
1684*4882a593Smuzhiyun 			switch (wr->opcode) {
1685*4882a593Smuzhiyun 			case IB_WR_ATOMIC_CMP_AND_SWP:
1686*4882a593Smuzhiyun 			case IB_WR_ATOMIC_FETCH_AND_ADD:
1687*4882a593Smuzhiyun 				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
1688*4882a593Smuzhiyun 					      atomic_wr(wr)->rkey);
1689*4882a593Smuzhiyun 				wqe += sizeof (struct mthca_raddr_seg);
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 				set_atomic_seg(wqe, atomic_wr(wr));
1692*4882a593Smuzhiyun 				wqe += sizeof (struct mthca_atomic_seg);
1693*4882a593Smuzhiyun 				size += (sizeof (struct mthca_raddr_seg) +
1694*4882a593Smuzhiyun 					 sizeof (struct mthca_atomic_seg)) / 16;
1695*4882a593Smuzhiyun 				break;
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 			case IB_WR_RDMA_WRITE:
1698*4882a593Smuzhiyun 			case IB_WR_RDMA_WRITE_WITH_IMM:
1699*4882a593Smuzhiyun 			case IB_WR_RDMA_READ:
1700*4882a593Smuzhiyun 				set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
1701*4882a593Smuzhiyun 					      rdma_wr(wr)->rkey);
1702*4882a593Smuzhiyun 				wqe  += sizeof (struct mthca_raddr_seg);
1703*4882a593Smuzhiyun 				size += sizeof (struct mthca_raddr_seg) / 16;
1704*4882a593Smuzhiyun 				break;
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 			default:
1707*4882a593Smuzhiyun 				/* No extra segments required for sends */
1708*4882a593Smuzhiyun 				break;
1709*4882a593Smuzhiyun 			}
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun 			break;
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 		case UC:
1714*4882a593Smuzhiyun 			switch (wr->opcode) {
1715*4882a593Smuzhiyun 			case IB_WR_RDMA_WRITE:
1716*4882a593Smuzhiyun 			case IB_WR_RDMA_WRITE_WITH_IMM:
1717*4882a593Smuzhiyun 				set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
1718*4882a593Smuzhiyun 					      rdma_wr(wr)->rkey);
1719*4882a593Smuzhiyun 				wqe  += sizeof (struct mthca_raddr_seg);
1720*4882a593Smuzhiyun 				size += sizeof (struct mthca_raddr_seg) / 16;
1721*4882a593Smuzhiyun 				break;
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun 			default:
1724*4882a593Smuzhiyun 				/* No extra segments required for sends */
1725*4882a593Smuzhiyun 				break;
1726*4882a593Smuzhiyun 			}
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 			break;
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 		case UD:
1731*4882a593Smuzhiyun 			set_tavor_ud_seg(wqe, ud_wr(wr));
1732*4882a593Smuzhiyun 			wqe  += sizeof (struct mthca_tavor_ud_seg);
1733*4882a593Smuzhiyun 			size += sizeof (struct mthca_tavor_ud_seg) / 16;
1734*4882a593Smuzhiyun 			break;
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 		case MLX:
1737*4882a593Smuzhiyun 			err = build_mlx_header(
1738*4882a593Smuzhiyun 				dev, qp, ind, ud_wr(wr),
1739*4882a593Smuzhiyun 				wqe - sizeof(struct mthca_next_seg), wqe);
1740*4882a593Smuzhiyun 			if (err) {
1741*4882a593Smuzhiyun 				*bad_wr = wr;
1742*4882a593Smuzhiyun 				goto out;
1743*4882a593Smuzhiyun 			}
1744*4882a593Smuzhiyun 			wqe += sizeof (struct mthca_data_seg);
1745*4882a593Smuzhiyun 			size += sizeof (struct mthca_data_seg) / 16;
1746*4882a593Smuzhiyun 			break;
1747*4882a593Smuzhiyun 		}
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 		if (wr->num_sge > qp->sq.max_gs) {
1750*4882a593Smuzhiyun 			mthca_err(dev, "too many gathers\n");
1751*4882a593Smuzhiyun 			err = -EINVAL;
1752*4882a593Smuzhiyun 			*bad_wr = wr;
1753*4882a593Smuzhiyun 			goto out;
1754*4882a593Smuzhiyun 		}
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 		for (i = 0; i < wr->num_sge; ++i) {
1757*4882a593Smuzhiyun 			mthca_set_data_seg(wqe, wr->sg_list + i);
1758*4882a593Smuzhiyun 			wqe  += sizeof (struct mthca_data_seg);
1759*4882a593Smuzhiyun 			size += sizeof (struct mthca_data_seg) / 16;
1760*4882a593Smuzhiyun 		}
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 		/* Add one more inline data segment for ICRC */
1763*4882a593Smuzhiyun 		if (qp->transport == MLX) {
1764*4882a593Smuzhiyun 			((struct mthca_data_seg *) wqe)->byte_count =
1765*4882a593Smuzhiyun 				cpu_to_be32((1 << 31) | 4);
1766*4882a593Smuzhiyun 			((u32 *) wqe)[1] = 0;
1767*4882a593Smuzhiyun 			wqe += sizeof (struct mthca_data_seg);
1768*4882a593Smuzhiyun 			size += sizeof (struct mthca_data_seg) / 16;
1769*4882a593Smuzhiyun 		}
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 		qp->wrid[ind + qp->rq.max] = wr->wr_id;
1772*4882a593Smuzhiyun 
1773*4882a593Smuzhiyun 		if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1774*4882a593Smuzhiyun 			mthca_err(dev, "opcode invalid\n");
1775*4882a593Smuzhiyun 			err = -EINVAL;
1776*4882a593Smuzhiyun 			*bad_wr = wr;
1777*4882a593Smuzhiyun 			goto out;
1778*4882a593Smuzhiyun 		}
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun 		((struct mthca_next_seg *) prev_wqe)->nda_op =
1781*4882a593Smuzhiyun 			cpu_to_be32(((ind << qp->sq.wqe_shift) +
1782*4882a593Smuzhiyun 				     qp->send_wqe_offset) |
1783*4882a593Smuzhiyun 				    mthca_opcode[wr->opcode]);
1784*4882a593Smuzhiyun 		wmb();
1785*4882a593Smuzhiyun 		((struct mthca_next_seg *) prev_wqe)->ee_nds =
1786*4882a593Smuzhiyun 			cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
1787*4882a593Smuzhiyun 				    ((wr->send_flags & IB_SEND_FENCE) ?
1788*4882a593Smuzhiyun 				    MTHCA_NEXT_FENCE : 0));
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 		if (!nreq) {
1791*4882a593Smuzhiyun 			size0 = size;
1792*4882a593Smuzhiyun 			op0   = mthca_opcode[wr->opcode];
1793*4882a593Smuzhiyun 			f0    = wr->send_flags & IB_SEND_FENCE ?
1794*4882a593Smuzhiyun 				MTHCA_SEND_DOORBELL_FENCE : 0;
1795*4882a593Smuzhiyun 		}
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 		++ind;
1798*4882a593Smuzhiyun 		if (unlikely(ind >= qp->sq.max))
1799*4882a593Smuzhiyun 			ind -= qp->sq.max;
1800*4882a593Smuzhiyun 	}
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun out:
1803*4882a593Smuzhiyun 	if (likely(nreq)) {
1804*4882a593Smuzhiyun 		wmb();
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 		mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) +
1807*4882a593Smuzhiyun 			       qp->send_wqe_offset) | f0 | op0,
1808*4882a593Smuzhiyun 			      (qp->qpn << 8) | size0,
1809*4882a593Smuzhiyun 			      dev->kar + MTHCA_SEND_DOORBELL,
1810*4882a593Smuzhiyun 			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1811*4882a593Smuzhiyun 	}
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 	qp->sq.next_ind = ind;
1814*4882a593Smuzhiyun 	qp->sq.head    += nreq;
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qp->sq.lock, flags);
1817*4882a593Smuzhiyun 	return err;
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun 
mthca_tavor_post_receive(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1820*4882a593Smuzhiyun int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1821*4882a593Smuzhiyun 			     const struct ib_recv_wr **bad_wr)
1822*4882a593Smuzhiyun {
1823*4882a593Smuzhiyun 	struct mthca_dev *dev = to_mdev(ibqp->device);
1824*4882a593Smuzhiyun 	struct mthca_qp *qp = to_mqp(ibqp);
1825*4882a593Smuzhiyun 	unsigned long flags;
1826*4882a593Smuzhiyun 	int err = 0;
1827*4882a593Smuzhiyun 	int nreq;
1828*4882a593Smuzhiyun 	int i;
1829*4882a593Smuzhiyun 	int size;
1830*4882a593Smuzhiyun 	/*
1831*4882a593Smuzhiyun 	 * size0 is only used if nreq != 0, and it will always be
1832*4882a593Smuzhiyun 	 * initialized the first time through the main loop before
1833*4882a593Smuzhiyun 	 * nreq is incremented.  So nreq cannot become non-zero
1834*4882a593Smuzhiyun 	 * without initializing size0, and it is in fact never used
1835*4882a593Smuzhiyun 	 * uninitialized.
1836*4882a593Smuzhiyun 	 */
1837*4882a593Smuzhiyun 	int size0;
1838*4882a593Smuzhiyun 	int ind;
1839*4882a593Smuzhiyun 	void *wqe;
1840*4882a593Smuzhiyun 	void *prev_wqe;
1841*4882a593Smuzhiyun 
1842*4882a593Smuzhiyun 	spin_lock_irqsave(&qp->rq.lock, flags);
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	/* XXX check that state is OK to post receive */
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 	ind = qp->rq.next_ind;
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun 	for (nreq = 0; wr; wr = wr->next) {
1849*4882a593Smuzhiyun 		if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1850*4882a593Smuzhiyun 			mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1851*4882a593Smuzhiyun 					" %d max, %d nreq)\n", qp->qpn,
1852*4882a593Smuzhiyun 					qp->rq.head, qp->rq.tail,
1853*4882a593Smuzhiyun 					qp->rq.max, nreq);
1854*4882a593Smuzhiyun 			err = -ENOMEM;
1855*4882a593Smuzhiyun 			*bad_wr = wr;
1856*4882a593Smuzhiyun 			goto out;
1857*4882a593Smuzhiyun 		}
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun 		wqe = get_recv_wqe(qp, ind);
1860*4882a593Smuzhiyun 		prev_wqe = qp->rq.last;
1861*4882a593Smuzhiyun 		qp->rq.last = wqe;
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 		((struct mthca_next_seg *) wqe)->ee_nds =
1864*4882a593Smuzhiyun 			cpu_to_be32(MTHCA_NEXT_DBD);
1865*4882a593Smuzhiyun 		((struct mthca_next_seg *) wqe)->flags = 0;
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 		wqe += sizeof (struct mthca_next_seg);
1868*4882a593Smuzhiyun 		size = sizeof (struct mthca_next_seg) / 16;
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1871*4882a593Smuzhiyun 			err = -EINVAL;
1872*4882a593Smuzhiyun 			*bad_wr = wr;
1873*4882a593Smuzhiyun 			goto out;
1874*4882a593Smuzhiyun 		}
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 		for (i = 0; i < wr->num_sge; ++i) {
1877*4882a593Smuzhiyun 			mthca_set_data_seg(wqe, wr->sg_list + i);
1878*4882a593Smuzhiyun 			wqe  += sizeof (struct mthca_data_seg);
1879*4882a593Smuzhiyun 			size += sizeof (struct mthca_data_seg) / 16;
1880*4882a593Smuzhiyun 		}
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 		qp->wrid[ind] = wr->wr_id;
1883*4882a593Smuzhiyun 
1884*4882a593Smuzhiyun 		((struct mthca_next_seg *) prev_wqe)->ee_nds =
1885*4882a593Smuzhiyun 			cpu_to_be32(MTHCA_NEXT_DBD | size);
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 		if (!nreq)
1888*4882a593Smuzhiyun 			size0 = size;
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 		++ind;
1891*4882a593Smuzhiyun 		if (unlikely(ind >= qp->rq.max))
1892*4882a593Smuzhiyun 			ind -= qp->rq.max;
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 		++nreq;
1895*4882a593Smuzhiyun 		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1896*4882a593Smuzhiyun 			nreq = 0;
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 			wmb();
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 			mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1901*4882a593Smuzhiyun 				      qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL,
1902*4882a593Smuzhiyun 				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 			qp->rq.next_ind = ind;
1905*4882a593Smuzhiyun 			qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1906*4882a593Smuzhiyun 		}
1907*4882a593Smuzhiyun 	}
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun out:
1910*4882a593Smuzhiyun 	if (likely(nreq)) {
1911*4882a593Smuzhiyun 		wmb();
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 		mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1914*4882a593Smuzhiyun 			      qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL,
1915*4882a593Smuzhiyun 			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1916*4882a593Smuzhiyun 	}
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun 	qp->rq.next_ind = ind;
1919*4882a593Smuzhiyun 	qp->rq.head    += nreq;
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qp->rq.lock, flags);
1922*4882a593Smuzhiyun 	return err;
1923*4882a593Smuzhiyun }
1924*4882a593Smuzhiyun 
mthca_arbel_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)1925*4882a593Smuzhiyun int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1926*4882a593Smuzhiyun 			  const struct ib_send_wr **bad_wr)
1927*4882a593Smuzhiyun {
1928*4882a593Smuzhiyun 	struct mthca_dev *dev = to_mdev(ibqp->device);
1929*4882a593Smuzhiyun 	struct mthca_qp *qp = to_mqp(ibqp);
1930*4882a593Smuzhiyun 	u32 dbhi;
1931*4882a593Smuzhiyun 	void *wqe;
1932*4882a593Smuzhiyun 	void *prev_wqe;
1933*4882a593Smuzhiyun 	unsigned long flags;
1934*4882a593Smuzhiyun 	int err = 0;
1935*4882a593Smuzhiyun 	int nreq;
1936*4882a593Smuzhiyun 	int i;
1937*4882a593Smuzhiyun 	int size;
1938*4882a593Smuzhiyun 	/*
1939*4882a593Smuzhiyun 	 * f0 and size0 are only used if nreq != 0, and they will
1940*4882a593Smuzhiyun 	 * always be initialized the first time through the main loop
1941*4882a593Smuzhiyun 	 * before nreq is incremented.  So nreq cannot become non-zero
1942*4882a593Smuzhiyun 	 * without initializing f0 and size0, and they are in fact
1943*4882a593Smuzhiyun 	 * never used uninitialized.
1944*4882a593Smuzhiyun 	 */
1945*4882a593Smuzhiyun 	int size0;
1946*4882a593Smuzhiyun 	u32 f0;
1947*4882a593Smuzhiyun 	int ind;
1948*4882a593Smuzhiyun 	u8 op0 = 0;
1949*4882a593Smuzhiyun 
1950*4882a593Smuzhiyun 	spin_lock_irqsave(&qp->sq.lock, flags);
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	/* XXX check that state is OK to post send */
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	ind = qp->sq.head & (qp->sq.max - 1);
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
1957*4882a593Smuzhiyun 		if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1958*4882a593Smuzhiyun 			nreq = 0;
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun 			dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1961*4882a593Smuzhiyun 				((qp->sq.head & 0xffff) << 8) | f0 | op0;
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 			qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 			/*
1966*4882a593Smuzhiyun 			 * Make sure that descriptors are written before
1967*4882a593Smuzhiyun 			 * doorbell record.
1968*4882a593Smuzhiyun 			 */
1969*4882a593Smuzhiyun 			wmb();
1970*4882a593Smuzhiyun 			*qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun 			/*
1973*4882a593Smuzhiyun 			 * Make sure doorbell record is written before we
1974*4882a593Smuzhiyun 			 * write MMIO send doorbell.
1975*4882a593Smuzhiyun 			 */
1976*4882a593Smuzhiyun 			wmb();
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 			mthca_write64(dbhi, (qp->qpn << 8) | size0,
1979*4882a593Smuzhiyun 				      dev->kar + MTHCA_SEND_DOORBELL,
1980*4882a593Smuzhiyun 				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1981*4882a593Smuzhiyun 		}
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 		if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1984*4882a593Smuzhiyun 			mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1985*4882a593Smuzhiyun 					" %d max, %d nreq)\n", qp->qpn,
1986*4882a593Smuzhiyun 					qp->sq.head, qp->sq.tail,
1987*4882a593Smuzhiyun 					qp->sq.max, nreq);
1988*4882a593Smuzhiyun 			err = -ENOMEM;
1989*4882a593Smuzhiyun 			*bad_wr = wr;
1990*4882a593Smuzhiyun 			goto out;
1991*4882a593Smuzhiyun 		}
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 		wqe = get_send_wqe(qp, ind);
1994*4882a593Smuzhiyun 		prev_wqe = qp->sq.last;
1995*4882a593Smuzhiyun 		qp->sq.last = wqe;
1996*4882a593Smuzhiyun 
1997*4882a593Smuzhiyun 		((struct mthca_next_seg *) wqe)->flags =
1998*4882a593Smuzhiyun 			((wr->send_flags & IB_SEND_SIGNALED) ?
1999*4882a593Smuzhiyun 			 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
2000*4882a593Smuzhiyun 			((wr->send_flags & IB_SEND_SOLICITED) ?
2001*4882a593Smuzhiyun 			 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |
2002*4882a593Smuzhiyun 			((wr->send_flags & IB_SEND_IP_CSUM) ?
2003*4882a593Smuzhiyun 			 cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) |
2004*4882a593Smuzhiyun 			cpu_to_be32(1);
2005*4882a593Smuzhiyun 		if (wr->opcode == IB_WR_SEND_WITH_IMM ||
2006*4882a593Smuzhiyun 		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
2007*4882a593Smuzhiyun 			((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 		wqe += sizeof (struct mthca_next_seg);
2010*4882a593Smuzhiyun 		size = sizeof (struct mthca_next_seg) / 16;
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun 		switch (qp->transport) {
2013*4882a593Smuzhiyun 		case RC:
2014*4882a593Smuzhiyun 			switch (wr->opcode) {
2015*4882a593Smuzhiyun 			case IB_WR_ATOMIC_CMP_AND_SWP:
2016*4882a593Smuzhiyun 			case IB_WR_ATOMIC_FETCH_AND_ADD:
2017*4882a593Smuzhiyun 				set_raddr_seg(wqe, atomic_wr(wr)->remote_addr,
2018*4882a593Smuzhiyun 					      atomic_wr(wr)->rkey);
2019*4882a593Smuzhiyun 				wqe += sizeof (struct mthca_raddr_seg);
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 				set_atomic_seg(wqe, atomic_wr(wr));
2022*4882a593Smuzhiyun 				wqe  += sizeof (struct mthca_atomic_seg);
2023*4882a593Smuzhiyun 				size += (sizeof (struct mthca_raddr_seg) +
2024*4882a593Smuzhiyun 					 sizeof (struct mthca_atomic_seg)) / 16;
2025*4882a593Smuzhiyun 				break;
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 			case IB_WR_RDMA_READ:
2028*4882a593Smuzhiyun 			case IB_WR_RDMA_WRITE:
2029*4882a593Smuzhiyun 			case IB_WR_RDMA_WRITE_WITH_IMM:
2030*4882a593Smuzhiyun 				set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
2031*4882a593Smuzhiyun 					      rdma_wr(wr)->rkey);
2032*4882a593Smuzhiyun 				wqe  += sizeof (struct mthca_raddr_seg);
2033*4882a593Smuzhiyun 				size += sizeof (struct mthca_raddr_seg) / 16;
2034*4882a593Smuzhiyun 				break;
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 			default:
2037*4882a593Smuzhiyun 				/* No extra segments required for sends */
2038*4882a593Smuzhiyun 				break;
2039*4882a593Smuzhiyun 			}
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun 			break;
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 		case UC:
2044*4882a593Smuzhiyun 			switch (wr->opcode) {
2045*4882a593Smuzhiyun 			case IB_WR_RDMA_WRITE:
2046*4882a593Smuzhiyun 			case IB_WR_RDMA_WRITE_WITH_IMM:
2047*4882a593Smuzhiyun 				set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
2048*4882a593Smuzhiyun 					      rdma_wr(wr)->rkey);
2049*4882a593Smuzhiyun 				wqe  += sizeof (struct mthca_raddr_seg);
2050*4882a593Smuzhiyun 				size += sizeof (struct mthca_raddr_seg) / 16;
2051*4882a593Smuzhiyun 				break;
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 			default:
2054*4882a593Smuzhiyun 				/* No extra segments required for sends */
2055*4882a593Smuzhiyun 				break;
2056*4882a593Smuzhiyun 			}
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 			break;
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun 		case UD:
2061*4882a593Smuzhiyun 			set_arbel_ud_seg(wqe, ud_wr(wr));
2062*4882a593Smuzhiyun 			wqe  += sizeof (struct mthca_arbel_ud_seg);
2063*4882a593Smuzhiyun 			size += sizeof (struct mthca_arbel_ud_seg) / 16;
2064*4882a593Smuzhiyun 			break;
2065*4882a593Smuzhiyun 
2066*4882a593Smuzhiyun 		case MLX:
2067*4882a593Smuzhiyun 			err = build_mlx_header(
2068*4882a593Smuzhiyun 				dev, qp, ind, ud_wr(wr),
2069*4882a593Smuzhiyun 				wqe - sizeof(struct mthca_next_seg), wqe);
2070*4882a593Smuzhiyun 			if (err) {
2071*4882a593Smuzhiyun 				*bad_wr = wr;
2072*4882a593Smuzhiyun 				goto out;
2073*4882a593Smuzhiyun 			}
2074*4882a593Smuzhiyun 			wqe += sizeof (struct mthca_data_seg);
2075*4882a593Smuzhiyun 			size += sizeof (struct mthca_data_seg) / 16;
2076*4882a593Smuzhiyun 			break;
2077*4882a593Smuzhiyun 		}
2078*4882a593Smuzhiyun 
2079*4882a593Smuzhiyun 		if (wr->num_sge > qp->sq.max_gs) {
2080*4882a593Smuzhiyun 			mthca_err(dev, "too many gathers\n");
2081*4882a593Smuzhiyun 			err = -EINVAL;
2082*4882a593Smuzhiyun 			*bad_wr = wr;
2083*4882a593Smuzhiyun 			goto out;
2084*4882a593Smuzhiyun 		}
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 		for (i = 0; i < wr->num_sge; ++i) {
2087*4882a593Smuzhiyun 			mthca_set_data_seg(wqe, wr->sg_list + i);
2088*4882a593Smuzhiyun 			wqe  += sizeof (struct mthca_data_seg);
2089*4882a593Smuzhiyun 			size += sizeof (struct mthca_data_seg) / 16;
2090*4882a593Smuzhiyun 		}
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 		/* Add one more inline data segment for ICRC */
2093*4882a593Smuzhiyun 		if (qp->transport == MLX) {
2094*4882a593Smuzhiyun 			((struct mthca_data_seg *) wqe)->byte_count =
2095*4882a593Smuzhiyun 				cpu_to_be32((1 << 31) | 4);
2096*4882a593Smuzhiyun 			((u32 *) wqe)[1] = 0;
2097*4882a593Smuzhiyun 			wqe += sizeof (struct mthca_data_seg);
2098*4882a593Smuzhiyun 			size += sizeof (struct mthca_data_seg) / 16;
2099*4882a593Smuzhiyun 		}
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 		qp->wrid[ind + qp->rq.max] = wr->wr_id;
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun 		if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
2104*4882a593Smuzhiyun 			mthca_err(dev, "opcode invalid\n");
2105*4882a593Smuzhiyun 			err = -EINVAL;
2106*4882a593Smuzhiyun 			*bad_wr = wr;
2107*4882a593Smuzhiyun 			goto out;
2108*4882a593Smuzhiyun 		}
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 		((struct mthca_next_seg *) prev_wqe)->nda_op =
2111*4882a593Smuzhiyun 			cpu_to_be32(((ind << qp->sq.wqe_shift) +
2112*4882a593Smuzhiyun 				     qp->send_wqe_offset) |
2113*4882a593Smuzhiyun 				    mthca_opcode[wr->opcode]);
2114*4882a593Smuzhiyun 		wmb();
2115*4882a593Smuzhiyun 		((struct mthca_next_seg *) prev_wqe)->ee_nds =
2116*4882a593Smuzhiyun 			cpu_to_be32(MTHCA_NEXT_DBD | size |
2117*4882a593Smuzhiyun 				    ((wr->send_flags & IB_SEND_FENCE) ?
2118*4882a593Smuzhiyun 				     MTHCA_NEXT_FENCE : 0));
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 		if (!nreq) {
2121*4882a593Smuzhiyun 			size0 = size;
2122*4882a593Smuzhiyun 			op0   = mthca_opcode[wr->opcode];
2123*4882a593Smuzhiyun 			f0    = wr->send_flags & IB_SEND_FENCE ?
2124*4882a593Smuzhiyun 				MTHCA_SEND_DOORBELL_FENCE : 0;
2125*4882a593Smuzhiyun 		}
2126*4882a593Smuzhiyun 
2127*4882a593Smuzhiyun 		++ind;
2128*4882a593Smuzhiyun 		if (unlikely(ind >= qp->sq.max))
2129*4882a593Smuzhiyun 			ind -= qp->sq.max;
2130*4882a593Smuzhiyun 	}
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun out:
2133*4882a593Smuzhiyun 	if (likely(nreq)) {
2134*4882a593Smuzhiyun 		dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0;
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun 		qp->sq.head += nreq;
2137*4882a593Smuzhiyun 
2138*4882a593Smuzhiyun 		/*
2139*4882a593Smuzhiyun 		 * Make sure that descriptors are written before
2140*4882a593Smuzhiyun 		 * doorbell record.
2141*4882a593Smuzhiyun 		 */
2142*4882a593Smuzhiyun 		wmb();
2143*4882a593Smuzhiyun 		*qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2144*4882a593Smuzhiyun 
2145*4882a593Smuzhiyun 		/*
2146*4882a593Smuzhiyun 		 * Make sure doorbell record is written before we
2147*4882a593Smuzhiyun 		 * write MMIO send doorbell.
2148*4882a593Smuzhiyun 		 */
2149*4882a593Smuzhiyun 		wmb();
2150*4882a593Smuzhiyun 
2151*4882a593Smuzhiyun 		mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL,
2152*4882a593Smuzhiyun 			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2153*4882a593Smuzhiyun 	}
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qp->sq.lock, flags);
2156*4882a593Smuzhiyun 	return err;
2157*4882a593Smuzhiyun }
2158*4882a593Smuzhiyun 
mthca_arbel_post_receive(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)2159*4882a593Smuzhiyun int mthca_arbel_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
2160*4882a593Smuzhiyun 			     const struct ib_recv_wr **bad_wr)
2161*4882a593Smuzhiyun {
2162*4882a593Smuzhiyun 	struct mthca_dev *dev = to_mdev(ibqp->device);
2163*4882a593Smuzhiyun 	struct mthca_qp *qp = to_mqp(ibqp);
2164*4882a593Smuzhiyun 	unsigned long flags;
2165*4882a593Smuzhiyun 	int err = 0;
2166*4882a593Smuzhiyun 	int nreq;
2167*4882a593Smuzhiyun 	int ind;
2168*4882a593Smuzhiyun 	int i;
2169*4882a593Smuzhiyun 	void *wqe;
2170*4882a593Smuzhiyun 
2171*4882a593Smuzhiyun 	spin_lock_irqsave(&qp->rq.lock, flags);
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 	/* XXX check that state is OK to post receive */
2174*4882a593Smuzhiyun 
2175*4882a593Smuzhiyun 	ind = qp->rq.head & (qp->rq.max - 1);
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
2178*4882a593Smuzhiyun 		if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2179*4882a593Smuzhiyun 			mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2180*4882a593Smuzhiyun 					" %d max, %d nreq)\n", qp->qpn,
2181*4882a593Smuzhiyun 					qp->rq.head, qp->rq.tail,
2182*4882a593Smuzhiyun 					qp->rq.max, nreq);
2183*4882a593Smuzhiyun 			err = -ENOMEM;
2184*4882a593Smuzhiyun 			*bad_wr = wr;
2185*4882a593Smuzhiyun 			goto out;
2186*4882a593Smuzhiyun 		}
2187*4882a593Smuzhiyun 
2188*4882a593Smuzhiyun 		wqe = get_recv_wqe(qp, ind);
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 		((struct mthca_next_seg *) wqe)->flags = 0;
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 		wqe += sizeof (struct mthca_next_seg);
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 		if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2195*4882a593Smuzhiyun 			err = -EINVAL;
2196*4882a593Smuzhiyun 			*bad_wr = wr;
2197*4882a593Smuzhiyun 			goto out;
2198*4882a593Smuzhiyun 		}
2199*4882a593Smuzhiyun 
2200*4882a593Smuzhiyun 		for (i = 0; i < wr->num_sge; ++i) {
2201*4882a593Smuzhiyun 			mthca_set_data_seg(wqe, wr->sg_list + i);
2202*4882a593Smuzhiyun 			wqe += sizeof (struct mthca_data_seg);
2203*4882a593Smuzhiyun 		}
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 		if (i < qp->rq.max_gs)
2206*4882a593Smuzhiyun 			mthca_set_data_seg_inval(wqe);
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun 		qp->wrid[ind] = wr->wr_id;
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 		++ind;
2211*4882a593Smuzhiyun 		if (unlikely(ind >= qp->rq.max))
2212*4882a593Smuzhiyun 			ind -= qp->rq.max;
2213*4882a593Smuzhiyun 	}
2214*4882a593Smuzhiyun out:
2215*4882a593Smuzhiyun 	if (likely(nreq)) {
2216*4882a593Smuzhiyun 		qp->rq.head += nreq;
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 		/*
2219*4882a593Smuzhiyun 		 * Make sure that descriptors are written before
2220*4882a593Smuzhiyun 		 * doorbell record.
2221*4882a593Smuzhiyun 		 */
2222*4882a593Smuzhiyun 		wmb();
2223*4882a593Smuzhiyun 		*qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2224*4882a593Smuzhiyun 	}
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qp->rq.lock, flags);
2227*4882a593Smuzhiyun 	return err;
2228*4882a593Smuzhiyun }
2229*4882a593Smuzhiyun 
mthca_free_err_wqe(struct mthca_dev * dev,struct mthca_qp * qp,int is_send,int index,int * dbd,__be32 * new_wqe)2230*4882a593Smuzhiyun void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2231*4882a593Smuzhiyun 			int index, int *dbd, __be32 *new_wqe)
2232*4882a593Smuzhiyun {
2233*4882a593Smuzhiyun 	struct mthca_next_seg *next;
2234*4882a593Smuzhiyun 
2235*4882a593Smuzhiyun 	/*
2236*4882a593Smuzhiyun 	 * For SRQs, all receive WQEs generate a CQE, so we're always
2237*4882a593Smuzhiyun 	 * at the end of the doorbell chain.
2238*4882a593Smuzhiyun 	 */
2239*4882a593Smuzhiyun 	if (qp->ibqp.srq && !is_send) {
2240*4882a593Smuzhiyun 		*new_wqe = 0;
2241*4882a593Smuzhiyun 		return;
2242*4882a593Smuzhiyun 	}
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 	if (is_send)
2245*4882a593Smuzhiyun 		next = get_send_wqe(qp, index);
2246*4882a593Smuzhiyun 	else
2247*4882a593Smuzhiyun 		next = get_recv_wqe(qp, index);
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 	*dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2250*4882a593Smuzhiyun 	if (next->ee_nds & cpu_to_be32(0x3f))
2251*4882a593Smuzhiyun 		*new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2252*4882a593Smuzhiyun 			(next->ee_nds & cpu_to_be32(0x3f));
2253*4882a593Smuzhiyun 	else
2254*4882a593Smuzhiyun 		*new_wqe = 0;
2255*4882a593Smuzhiyun }
2256*4882a593Smuzhiyun 
mthca_init_qp_table(struct mthca_dev * dev)2257*4882a593Smuzhiyun int mthca_init_qp_table(struct mthca_dev *dev)
2258*4882a593Smuzhiyun {
2259*4882a593Smuzhiyun 	int err;
2260*4882a593Smuzhiyun 	int i;
2261*4882a593Smuzhiyun 
2262*4882a593Smuzhiyun 	spin_lock_init(&dev->qp_table.lock);
2263*4882a593Smuzhiyun 
2264*4882a593Smuzhiyun 	/*
2265*4882a593Smuzhiyun 	 * We reserve 2 extra QPs per port for the special QPs.  The
2266*4882a593Smuzhiyun 	 * special QP for port 1 has to be even, so round up.
2267*4882a593Smuzhiyun 	 */
2268*4882a593Smuzhiyun 	dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2269*4882a593Smuzhiyun 	err = mthca_alloc_init(&dev->qp_table.alloc,
2270*4882a593Smuzhiyun 			       dev->limits.num_qps,
2271*4882a593Smuzhiyun 			       (1 << 24) - 1,
2272*4882a593Smuzhiyun 			       dev->qp_table.sqp_start +
2273*4882a593Smuzhiyun 			       MTHCA_MAX_PORTS * 2);
2274*4882a593Smuzhiyun 	if (err)
2275*4882a593Smuzhiyun 		return err;
2276*4882a593Smuzhiyun 
2277*4882a593Smuzhiyun 	err = mthca_array_init(&dev->qp_table.qp,
2278*4882a593Smuzhiyun 			       dev->limits.num_qps);
2279*4882a593Smuzhiyun 	if (err) {
2280*4882a593Smuzhiyun 		mthca_alloc_cleanup(&dev->qp_table.alloc);
2281*4882a593Smuzhiyun 		return err;
2282*4882a593Smuzhiyun 	}
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	for (i = 0; i < 2; ++i) {
2285*4882a593Smuzhiyun 		err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2286*4882a593Smuzhiyun 				    dev->qp_table.sqp_start + i * 2);
2287*4882a593Smuzhiyun 		if (err) {
2288*4882a593Smuzhiyun 			mthca_warn(dev, "CONF_SPECIAL_QP returned "
2289*4882a593Smuzhiyun 				   "%d, aborting.\n", err);
2290*4882a593Smuzhiyun 			goto err_out;
2291*4882a593Smuzhiyun 		}
2292*4882a593Smuzhiyun 	}
2293*4882a593Smuzhiyun 	return 0;
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun  err_out:
2296*4882a593Smuzhiyun 	for (i = 0; i < 2; ++i)
2297*4882a593Smuzhiyun 		mthca_CONF_SPECIAL_QP(dev, i, 0);
2298*4882a593Smuzhiyun 
2299*4882a593Smuzhiyun 	mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2300*4882a593Smuzhiyun 	mthca_alloc_cleanup(&dev->qp_table.alloc);
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun 	return err;
2303*4882a593Smuzhiyun }
2304*4882a593Smuzhiyun 
mthca_cleanup_qp_table(struct mthca_dev * dev)2305*4882a593Smuzhiyun void mthca_cleanup_qp_table(struct mthca_dev *dev)
2306*4882a593Smuzhiyun {
2307*4882a593Smuzhiyun 	int i;
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun 	for (i = 0; i < 2; ++i)
2310*4882a593Smuzhiyun 		mthca_CONF_SPECIAL_QP(dev, i, 0);
2311*4882a593Smuzhiyun 
2312*4882a593Smuzhiyun 	mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2313*4882a593Smuzhiyun 	mthca_alloc_cleanup(&dev->qp_table.alloc);
2314*4882a593Smuzhiyun }
2315