xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/mellanox/mlx4/qp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3*4882a593Smuzhiyun  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5*4882a593Smuzhiyun  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
8*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
9*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
10*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
11*4882a593Smuzhiyun  * OpenIB.org BSD license below:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
14*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
15*4882a593Smuzhiyun  *     conditions are met:
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
18*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
19*4882a593Smuzhiyun  *        disclaimer.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
22*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
23*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
24*4882a593Smuzhiyun  *        provided with the distribution.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33*4882a593Smuzhiyun  * SOFTWARE.
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <linux/gfp.h>
37*4882a593Smuzhiyun #include <linux/export.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include <linux/mlx4/cmd.h>
40*4882a593Smuzhiyun #include <linux/mlx4/qp.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include "mlx4.h"
43*4882a593Smuzhiyun #include "icm.h"
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /* QP to support BF should have bits 6,7 cleared */
46*4882a593Smuzhiyun #define MLX4_BF_QP_SKIP_MASK	0xc0
47*4882a593Smuzhiyun #define MLX4_MAX_BF_QP_RANGE	0x40
48*4882a593Smuzhiyun 
mlx4_qp_event(struct mlx4_dev * dev,u32 qpn,int event_type)49*4882a593Smuzhiyun void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
52*4882a593Smuzhiyun 	struct mlx4_qp *qp;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	spin_lock(&qp_table->lock);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	qp = __mlx4_qp_lookup(dev, qpn);
57*4882a593Smuzhiyun 	if (qp)
58*4882a593Smuzhiyun 		refcount_inc(&qp->refcount);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	spin_unlock(&qp_table->lock);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (!qp) {
63*4882a593Smuzhiyun 		mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
64*4882a593Smuzhiyun 		return;
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	qp->event(qp, event_type);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	if (refcount_dec_and_test(&qp->refcount))
70*4882a593Smuzhiyun 		complete(&qp->free);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /* used for INIT/CLOSE port logic */
is_master_qp0(struct mlx4_dev * dev,struct mlx4_qp * qp,int * real_qp0,int * proxy_qp0)74*4882a593Smuzhiyun static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	/* this procedure is called after we already know we are on the master */
77*4882a593Smuzhiyun 	/* qp0 is either the proxy qp0, or the real qp0 */
78*4882a593Smuzhiyun 	u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
79*4882a593Smuzhiyun 	*proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	*real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
82*4882a593Smuzhiyun 		qp->qpn <= dev->phys_caps.base_sqpn + 1;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	return *real_qp0 || *proxy_qp0;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
__mlx4_qp_modify(struct mlx4_dev * dev,struct mlx4_mtt * mtt,enum mlx4_qp_state cur_state,enum mlx4_qp_state new_state,struct mlx4_qp_context * context,enum mlx4_qp_optpar optpar,int sqd_event,struct mlx4_qp * qp,int native)87*4882a593Smuzhiyun static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
88*4882a593Smuzhiyun 		     enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
89*4882a593Smuzhiyun 		     struct mlx4_qp_context *context,
90*4882a593Smuzhiyun 		     enum mlx4_qp_optpar optpar,
91*4882a593Smuzhiyun 		     int sqd_event, struct mlx4_qp *qp, int native)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
94*4882a593Smuzhiyun 		[MLX4_QP_STATE_RST] = {
95*4882a593Smuzhiyun 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
96*4882a593Smuzhiyun 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
97*4882a593Smuzhiyun 			[MLX4_QP_STATE_INIT]	= MLX4_CMD_RST2INIT_QP,
98*4882a593Smuzhiyun 		},
99*4882a593Smuzhiyun 		[MLX4_QP_STATE_INIT]  = {
100*4882a593Smuzhiyun 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
101*4882a593Smuzhiyun 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
102*4882a593Smuzhiyun 			[MLX4_QP_STATE_INIT]	= MLX4_CMD_INIT2INIT_QP,
103*4882a593Smuzhiyun 			[MLX4_QP_STATE_RTR]	= MLX4_CMD_INIT2RTR_QP,
104*4882a593Smuzhiyun 		},
105*4882a593Smuzhiyun 		[MLX4_QP_STATE_RTR]   = {
106*4882a593Smuzhiyun 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
107*4882a593Smuzhiyun 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
108*4882a593Smuzhiyun 			[MLX4_QP_STATE_RTS]	= MLX4_CMD_RTR2RTS_QP,
109*4882a593Smuzhiyun 		},
110*4882a593Smuzhiyun 		[MLX4_QP_STATE_RTS]   = {
111*4882a593Smuzhiyun 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
112*4882a593Smuzhiyun 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
113*4882a593Smuzhiyun 			[MLX4_QP_STATE_RTS]	= MLX4_CMD_RTS2RTS_QP,
114*4882a593Smuzhiyun 			[MLX4_QP_STATE_SQD]	= MLX4_CMD_RTS2SQD_QP,
115*4882a593Smuzhiyun 		},
116*4882a593Smuzhiyun 		[MLX4_QP_STATE_SQD] = {
117*4882a593Smuzhiyun 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
118*4882a593Smuzhiyun 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
119*4882a593Smuzhiyun 			[MLX4_QP_STATE_RTS]	= MLX4_CMD_SQD2RTS_QP,
120*4882a593Smuzhiyun 			[MLX4_QP_STATE_SQD]	= MLX4_CMD_SQD2SQD_QP,
121*4882a593Smuzhiyun 		},
122*4882a593Smuzhiyun 		[MLX4_QP_STATE_SQER] = {
123*4882a593Smuzhiyun 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
124*4882a593Smuzhiyun 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
125*4882a593Smuzhiyun 			[MLX4_QP_STATE_RTS]	= MLX4_CMD_SQERR2RTS_QP,
126*4882a593Smuzhiyun 		},
127*4882a593Smuzhiyun 		[MLX4_QP_STATE_ERR] = {
128*4882a593Smuzhiyun 			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
129*4882a593Smuzhiyun 			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
130*4882a593Smuzhiyun 		}
131*4882a593Smuzhiyun 	};
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	struct mlx4_priv *priv = mlx4_priv(dev);
134*4882a593Smuzhiyun 	struct mlx4_cmd_mailbox *mailbox;
135*4882a593Smuzhiyun 	int ret = 0;
136*4882a593Smuzhiyun 	int real_qp0 = 0;
137*4882a593Smuzhiyun 	int proxy_qp0 = 0;
138*4882a593Smuzhiyun 	u8 port;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
141*4882a593Smuzhiyun 	    !op[cur_state][new_state])
142*4882a593Smuzhiyun 		return -EINVAL;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
145*4882a593Smuzhiyun 		ret = mlx4_cmd(dev, 0, qp->qpn, 2,
146*4882a593Smuzhiyun 			MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
147*4882a593Smuzhiyun 		if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
148*4882a593Smuzhiyun 		    cur_state != MLX4_QP_STATE_RST &&
149*4882a593Smuzhiyun 		    is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
150*4882a593Smuzhiyun 			port = (qp->qpn & 1) + 1;
151*4882a593Smuzhiyun 			if (proxy_qp0)
152*4882a593Smuzhiyun 				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
153*4882a593Smuzhiyun 			else
154*4882a593Smuzhiyun 				priv->mfunc.master.qp0_state[port].qp0_active = 0;
155*4882a593Smuzhiyun 		}
156*4882a593Smuzhiyun 		return ret;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	mailbox = mlx4_alloc_cmd_mailbox(dev);
160*4882a593Smuzhiyun 	if (IS_ERR(mailbox))
161*4882a593Smuzhiyun 		return PTR_ERR(mailbox);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
164*4882a593Smuzhiyun 		u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
165*4882a593Smuzhiyun 		context->mtt_base_addr_h = mtt_addr >> 32;
166*4882a593Smuzhiyun 		context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
167*4882a593Smuzhiyun 		context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if ((cur_state == MLX4_QP_STATE_RTR) &&
171*4882a593Smuzhiyun 	    (new_state == MLX4_QP_STATE_RTS) &&
172*4882a593Smuzhiyun 	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
173*4882a593Smuzhiyun 		context->roce_entropy =
174*4882a593Smuzhiyun 			cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn));
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
177*4882a593Smuzhiyun 	memcpy(mailbox->buf + 8, context, sizeof(*context));
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
180*4882a593Smuzhiyun 		cpu_to_be32(qp->qpn);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	ret = mlx4_cmd(dev, mailbox->dma,
183*4882a593Smuzhiyun 		       qp->qpn | (!!sqd_event << 31),
184*4882a593Smuzhiyun 		       new_state == MLX4_QP_STATE_RST ? 2 : 0,
185*4882a593Smuzhiyun 		       op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
188*4882a593Smuzhiyun 		port = (qp->qpn & 1) + 1;
189*4882a593Smuzhiyun 		if (cur_state != MLX4_QP_STATE_ERR &&
190*4882a593Smuzhiyun 		    cur_state != MLX4_QP_STATE_RST &&
191*4882a593Smuzhiyun 		    new_state == MLX4_QP_STATE_ERR) {
192*4882a593Smuzhiyun 			if (proxy_qp0)
193*4882a593Smuzhiyun 				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
194*4882a593Smuzhiyun 			else
195*4882a593Smuzhiyun 				priv->mfunc.master.qp0_state[port].qp0_active = 0;
196*4882a593Smuzhiyun 		} else if (new_state == MLX4_QP_STATE_RTR) {
197*4882a593Smuzhiyun 			if (proxy_qp0)
198*4882a593Smuzhiyun 				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
199*4882a593Smuzhiyun 			else
200*4882a593Smuzhiyun 				priv->mfunc.master.qp0_state[port].qp0_active = 1;
201*4882a593Smuzhiyun 		}
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	mlx4_free_cmd_mailbox(dev, mailbox);
205*4882a593Smuzhiyun 	return ret;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
mlx4_qp_modify(struct mlx4_dev * dev,struct mlx4_mtt * mtt,enum mlx4_qp_state cur_state,enum mlx4_qp_state new_state,struct mlx4_qp_context * context,enum mlx4_qp_optpar optpar,int sqd_event,struct mlx4_qp * qp)208*4882a593Smuzhiyun int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
209*4882a593Smuzhiyun 		   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
210*4882a593Smuzhiyun 		   struct mlx4_qp_context *context,
211*4882a593Smuzhiyun 		   enum mlx4_qp_optpar optpar,
212*4882a593Smuzhiyun 		   int sqd_event, struct mlx4_qp *qp)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
215*4882a593Smuzhiyun 				optpar, sqd_event, qp, 0);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_qp_modify);
218*4882a593Smuzhiyun 
__mlx4_qp_reserve_range(struct mlx4_dev * dev,int cnt,int align,int * base,u8 flags)219*4882a593Smuzhiyun int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
220*4882a593Smuzhiyun 			    int *base, u8 flags)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	u32 uid;
223*4882a593Smuzhiyun 	int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	struct mlx4_priv *priv = mlx4_priv(dev);
226*4882a593Smuzhiyun 	struct mlx4_qp_table *qp_table = &priv->qp_table;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
229*4882a593Smuzhiyun 		return -ENOMEM;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	uid = MLX4_QP_TABLE_ZONE_GENERAL;
232*4882a593Smuzhiyun 	if (flags & (u8)MLX4_RESERVE_A0_QP) {
233*4882a593Smuzhiyun 		if (bf_qp)
234*4882a593Smuzhiyun 			uid = MLX4_QP_TABLE_ZONE_RAW_ETH;
235*4882a593Smuzhiyun 		else
236*4882a593Smuzhiyun 			uid = MLX4_QP_TABLE_ZONE_RSS;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	*base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align,
240*4882a593Smuzhiyun 					bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL);
241*4882a593Smuzhiyun 	if (*base == -1)
242*4882a593Smuzhiyun 		return -ENOMEM;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return 0;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
mlx4_qp_reserve_range(struct mlx4_dev * dev,int cnt,int align,int * base,u8 flags,u8 usage)247*4882a593Smuzhiyun int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
248*4882a593Smuzhiyun 			  int *base, u8 flags, u8 usage)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	u32 in_modifier = RES_QP | (((u32)usage & 3) << 30);
251*4882a593Smuzhiyun 	u64 in_param = 0;
252*4882a593Smuzhiyun 	u64 out_param;
253*4882a593Smuzhiyun 	int err;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* Turn off all unsupported QP allocation flags */
256*4882a593Smuzhiyun 	flags &= dev->caps.alloc_res_qp_mask;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	if (mlx4_is_mfunc(dev)) {
259*4882a593Smuzhiyun 		set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
260*4882a593Smuzhiyun 		set_param_h(&in_param, align);
261*4882a593Smuzhiyun 		err = mlx4_cmd_imm(dev, in_param, &out_param,
262*4882a593Smuzhiyun 				   in_modifier, RES_OP_RESERVE,
263*4882a593Smuzhiyun 				   MLX4_CMD_ALLOC_RES,
264*4882a593Smuzhiyun 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
265*4882a593Smuzhiyun 		if (err)
266*4882a593Smuzhiyun 			return err;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		*base = get_param_l(&out_param);
269*4882a593Smuzhiyun 		return 0;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 	return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
274*4882a593Smuzhiyun 
__mlx4_qp_release_range(struct mlx4_dev * dev,int base_qpn,int cnt)275*4882a593Smuzhiyun void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	struct mlx4_priv *priv = mlx4_priv(dev);
278*4882a593Smuzhiyun 	struct mlx4_qp_table *qp_table = &priv->qp_table;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
281*4882a593Smuzhiyun 		return;
282*4882a593Smuzhiyun 	mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
mlx4_qp_release_range(struct mlx4_dev * dev,int base_qpn,int cnt)285*4882a593Smuzhiyun void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	u64 in_param = 0;
288*4882a593Smuzhiyun 	int err;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	if (!cnt)
291*4882a593Smuzhiyun 		return;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	if (mlx4_is_mfunc(dev)) {
294*4882a593Smuzhiyun 		set_param_l(&in_param, base_qpn);
295*4882a593Smuzhiyun 		set_param_h(&in_param, cnt);
296*4882a593Smuzhiyun 		err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
297*4882a593Smuzhiyun 			       MLX4_CMD_FREE_RES,
298*4882a593Smuzhiyun 			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
299*4882a593Smuzhiyun 		if (err) {
300*4882a593Smuzhiyun 			mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
301*4882a593Smuzhiyun 				  base_qpn, cnt);
302*4882a593Smuzhiyun 		}
303*4882a593Smuzhiyun 	} else
304*4882a593Smuzhiyun 		 __mlx4_qp_release_range(dev, base_qpn, cnt);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
307*4882a593Smuzhiyun 
__mlx4_qp_alloc_icm(struct mlx4_dev * dev,int qpn)308*4882a593Smuzhiyun int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	struct mlx4_priv *priv = mlx4_priv(dev);
311*4882a593Smuzhiyun 	struct mlx4_qp_table *qp_table = &priv->qp_table;
312*4882a593Smuzhiyun 	int err;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
315*4882a593Smuzhiyun 	if (err)
316*4882a593Smuzhiyun 		goto err_out;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
319*4882a593Smuzhiyun 	if (err)
320*4882a593Smuzhiyun 		goto err_put_qp;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
323*4882a593Smuzhiyun 	if (err)
324*4882a593Smuzhiyun 		goto err_put_auxc;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
327*4882a593Smuzhiyun 	if (err)
328*4882a593Smuzhiyun 		goto err_put_altc;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
331*4882a593Smuzhiyun 	if (err)
332*4882a593Smuzhiyun 		goto err_put_rdmarc;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	return 0;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun err_put_rdmarc:
337*4882a593Smuzhiyun 	mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun err_put_altc:
340*4882a593Smuzhiyun 	mlx4_table_put(dev, &qp_table->altc_table, qpn);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun err_put_auxc:
343*4882a593Smuzhiyun 	mlx4_table_put(dev, &qp_table->auxc_table, qpn);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun err_put_qp:
346*4882a593Smuzhiyun 	mlx4_table_put(dev, &qp_table->qp_table, qpn);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun err_out:
349*4882a593Smuzhiyun 	return err;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
mlx4_qp_alloc_icm(struct mlx4_dev * dev,int qpn)352*4882a593Smuzhiyun static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	u64 param = 0;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (mlx4_is_mfunc(dev)) {
357*4882a593Smuzhiyun 		set_param_l(&param, qpn);
358*4882a593Smuzhiyun 		return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
359*4882a593Smuzhiyun 				    MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
360*4882a593Smuzhiyun 				    MLX4_CMD_WRAPPED);
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 	return __mlx4_qp_alloc_icm(dev, qpn);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
__mlx4_qp_free_icm(struct mlx4_dev * dev,int qpn)365*4882a593Smuzhiyun void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	struct mlx4_priv *priv = mlx4_priv(dev);
368*4882a593Smuzhiyun 	struct mlx4_qp_table *qp_table = &priv->qp_table;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
371*4882a593Smuzhiyun 	mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
372*4882a593Smuzhiyun 	mlx4_table_put(dev, &qp_table->altc_table, qpn);
373*4882a593Smuzhiyun 	mlx4_table_put(dev, &qp_table->auxc_table, qpn);
374*4882a593Smuzhiyun 	mlx4_table_put(dev, &qp_table->qp_table, qpn);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
mlx4_qp_free_icm(struct mlx4_dev * dev,int qpn)377*4882a593Smuzhiyun static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	u64 in_param = 0;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	if (mlx4_is_mfunc(dev)) {
382*4882a593Smuzhiyun 		set_param_l(&in_param, qpn);
383*4882a593Smuzhiyun 		if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
384*4882a593Smuzhiyun 			     MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
385*4882a593Smuzhiyun 			     MLX4_CMD_WRAPPED))
386*4882a593Smuzhiyun 			mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
387*4882a593Smuzhiyun 	} else
388*4882a593Smuzhiyun 		__mlx4_qp_free_icm(dev, qpn);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
mlx4_qp_lookup(struct mlx4_dev * dev,u32 qpn)391*4882a593Smuzhiyun struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
394*4882a593Smuzhiyun 	struct mlx4_qp *qp;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	spin_lock_irq(&qp_table->lock);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	qp = __mlx4_qp_lookup(dev, qpn);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	spin_unlock_irq(&qp_table->lock);
401*4882a593Smuzhiyun 	return qp;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
mlx4_qp_alloc(struct mlx4_dev * dev,int qpn,struct mlx4_qp * qp)404*4882a593Smuzhiyun int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	struct mlx4_priv *priv = mlx4_priv(dev);
407*4882a593Smuzhiyun 	struct mlx4_qp_table *qp_table = &priv->qp_table;
408*4882a593Smuzhiyun 	int err;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	if (!qpn)
411*4882a593Smuzhiyun 		return -EINVAL;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	qp->qpn = qpn;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	err = mlx4_qp_alloc_icm(dev, qpn);
416*4882a593Smuzhiyun 	if (err)
417*4882a593Smuzhiyun 		return err;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	spin_lock_irq(&qp_table->lock);
420*4882a593Smuzhiyun 	err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
421*4882a593Smuzhiyun 				(dev->caps.num_qps - 1), qp);
422*4882a593Smuzhiyun 	spin_unlock_irq(&qp_table->lock);
423*4882a593Smuzhiyun 	if (err)
424*4882a593Smuzhiyun 		goto err_icm;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	refcount_set(&qp->refcount, 1);
427*4882a593Smuzhiyun 	init_completion(&qp->free);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	return 0;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun err_icm:
432*4882a593Smuzhiyun 	mlx4_qp_free_icm(dev, qpn);
433*4882a593Smuzhiyun 	return err;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
437*4882a593Smuzhiyun 
mlx4_update_qp(struct mlx4_dev * dev,u32 qpn,enum mlx4_update_qp_attr attr,struct mlx4_update_qp_params * params)438*4882a593Smuzhiyun int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
439*4882a593Smuzhiyun 		   enum mlx4_update_qp_attr attr,
440*4882a593Smuzhiyun 		   struct mlx4_update_qp_params *params)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	struct mlx4_cmd_mailbox *mailbox;
443*4882a593Smuzhiyun 	struct mlx4_update_qp_context *cmd;
444*4882a593Smuzhiyun 	u64 pri_addr_path_mask = 0;
445*4882a593Smuzhiyun 	u64 qp_mask = 0;
446*4882a593Smuzhiyun 	int err = 0;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
449*4882a593Smuzhiyun 		return -EINVAL;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	mailbox = mlx4_alloc_cmd_mailbox(dev);
452*4882a593Smuzhiyun 	if (IS_ERR(mailbox))
453*4882a593Smuzhiyun 		return PTR_ERR(mailbox);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	cmd = (struct mlx4_update_qp_context *)mailbox->buf;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	if (attr & MLX4_UPDATE_QP_SMAC) {
458*4882a593Smuzhiyun 		pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
459*4882a593Smuzhiyun 		cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) {
463*4882a593Smuzhiyun 		if (!(dev->caps.flags2
464*4882a593Smuzhiyun 		      & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
465*4882a593Smuzhiyun 			mlx4_warn(dev,
466*4882a593Smuzhiyun 				  "Trying to set src check LB, but it isn't supported\n");
467*4882a593Smuzhiyun 			err = -EOPNOTSUPP;
468*4882a593Smuzhiyun 			goto out;
469*4882a593Smuzhiyun 		}
470*4882a593Smuzhiyun 		pri_addr_path_mask |=
471*4882a593Smuzhiyun 			1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB;
472*4882a593Smuzhiyun 		if (params->flags &
473*4882a593Smuzhiyun 		    MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) {
474*4882a593Smuzhiyun 			cmd->qp_context.pri_path.fl |=
475*4882a593Smuzhiyun 				MLX4_FL_ETH_SRC_CHECK_MC_LB;
476*4882a593Smuzhiyun 		}
477*4882a593Smuzhiyun 	}
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	if (attr & MLX4_UPDATE_QP_VSD) {
480*4882a593Smuzhiyun 		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
481*4882a593Smuzhiyun 		if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
482*4882a593Smuzhiyun 			cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
486*4882a593Smuzhiyun 		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
487*4882a593Smuzhiyun 		cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
491*4882a593Smuzhiyun 		if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
492*4882a593Smuzhiyun 			mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
493*4882a593Smuzhiyun 			err = -EOPNOTSUPP;
494*4882a593Smuzhiyun 			goto out;
495*4882a593Smuzhiyun 		}
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
498*4882a593Smuzhiyun 		cmd->qp_context.qos_vport = params->qos_vport;
499*4882a593Smuzhiyun 	}
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
502*4882a593Smuzhiyun 	cmd->qp_mask = cpu_to_be64(qp_mask);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
505*4882a593Smuzhiyun 		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
506*4882a593Smuzhiyun 		       MLX4_CMD_NATIVE);
507*4882a593Smuzhiyun out:
508*4882a593Smuzhiyun 	mlx4_free_cmd_mailbox(dev, mailbox);
509*4882a593Smuzhiyun 	return err;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_update_qp);
512*4882a593Smuzhiyun 
mlx4_qp_remove(struct mlx4_dev * dev,struct mlx4_qp * qp)513*4882a593Smuzhiyun void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
516*4882a593Smuzhiyun 	unsigned long flags;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	spin_lock_irqsave(&qp_table->lock, flags);
519*4882a593Smuzhiyun 	radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
520*4882a593Smuzhiyun 	spin_unlock_irqrestore(&qp_table->lock, flags);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_qp_remove);
523*4882a593Smuzhiyun 
mlx4_qp_free(struct mlx4_dev * dev,struct mlx4_qp * qp)524*4882a593Smuzhiyun void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	if (refcount_dec_and_test(&qp->refcount))
527*4882a593Smuzhiyun 		complete(&qp->free);
528*4882a593Smuzhiyun 	wait_for_completion(&qp->free);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	mlx4_qp_free_icm(dev, qp->qpn);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_qp_free);
533*4882a593Smuzhiyun 
mlx4_CONF_SPECIAL_QP(struct mlx4_dev * dev,u32 base_qpn)534*4882a593Smuzhiyun static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
537*4882a593Smuzhiyun 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun #define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2
541*4882a593Smuzhiyun #define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1
542*4882a593Smuzhiyun #define MLX4_QP_TABLE_RAW_ETH_SIZE     256
543*4882a593Smuzhiyun 
mlx4_create_zones(struct mlx4_dev * dev,u32 reserved_bottom_general,u32 reserved_top_general,u32 reserved_bottom_rss,u32 start_offset_rss,u32 max_table_offset)544*4882a593Smuzhiyun static int mlx4_create_zones(struct mlx4_dev *dev,
545*4882a593Smuzhiyun 			     u32 reserved_bottom_general,
546*4882a593Smuzhiyun 			     u32 reserved_top_general,
547*4882a593Smuzhiyun 			     u32 reserved_bottom_rss,
548*4882a593Smuzhiyun 			     u32 start_offset_rss,
549*4882a593Smuzhiyun 			     u32 max_table_offset)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
552*4882a593Smuzhiyun 	struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL;
553*4882a593Smuzhiyun 	int bitmap_initialized = 0;
554*4882a593Smuzhiyun 	u32 last_offset;
555*4882a593Smuzhiyun 	int k;
556*4882a593Smuzhiyun 	int err;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	if (NULL == qp_table->zones)
561*4882a593Smuzhiyun 		return -ENOMEM;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (NULL == bitmap) {
566*4882a593Smuzhiyun 		err = -ENOMEM;
567*4882a593Smuzhiyun 		goto free_zone;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
571*4882a593Smuzhiyun 			       (1 << 23) - 1, reserved_bottom_general,
572*4882a593Smuzhiyun 			       reserved_top_general);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if (err)
575*4882a593Smuzhiyun 		goto free_bitmap;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	++bitmap_initialized;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL,
580*4882a593Smuzhiyun 				MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO |
581*4882a593Smuzhiyun 				MLX4_ZONE_USE_RR, 0,
582*4882a593Smuzhiyun 				0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (err)
585*4882a593Smuzhiyun 		goto free_bitmap;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS,
588*4882a593Smuzhiyun 			       reserved_bottom_rss,
589*4882a593Smuzhiyun 			       reserved_bottom_rss - 1,
590*4882a593Smuzhiyun 			       dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
591*4882a593Smuzhiyun 			       reserved_bottom_rss - start_offset_rss);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	if (err)
594*4882a593Smuzhiyun 		goto free_bitmap;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	++bitmap_initialized;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS,
599*4882a593Smuzhiyun 				MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
600*4882a593Smuzhiyun 				MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
601*4882a593Smuzhiyun 				MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY,
602*4882a593Smuzhiyun 				0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	if (err)
605*4882a593Smuzhiyun 		goto free_bitmap;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
608*4882a593Smuzhiyun 	/*  We have a single zone for the A0 steering QPs area of the FW. This area
609*4882a593Smuzhiyun 	 *  needs to be split into subareas. One set of subareas is for RSS QPs
610*4882a593Smuzhiyun 	 *  (in which qp number bits 6 and/or 7 are set); the other set of subareas
611*4882a593Smuzhiyun 	 *  is for RAW_ETH QPs, which require that both bits 6 and 7 are zero.
612*4882a593Smuzhiyun 	 *  Currently, the values returned by the FW (A0 steering area starting qp number
613*4882a593Smuzhiyun 	 *  and A0 steering area size) are such that there are only two subareas -- one
614*4882a593Smuzhiyun 	 *  for RSS and one for RAW_ETH.
615*4882a593Smuzhiyun 	 */
616*4882a593Smuzhiyun 	for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]);
617*4882a593Smuzhiyun 	     k++) {
618*4882a593Smuzhiyun 		int size;
619*4882a593Smuzhiyun 		u32 offset = start_offset_rss;
620*4882a593Smuzhiyun 		u32 bf_mask;
621*4882a593Smuzhiyun 		u32 requested_size;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 		/* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates
624*4882a593Smuzhiyun 		 * a mask of all LSB bits set until (and not including) the first
625*4882a593Smuzhiyun 		 * set bit of  MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK
626*4882a593Smuzhiyun 		 * is 0xc0, bf_mask will be 0x3f.
627*4882a593Smuzhiyun 		 */
628*4882a593Smuzhiyun 		bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1;
629*4882a593Smuzhiyun 		requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 		if (((last_offset & MLX4_BF_QP_SKIP_MASK) &&
632*4882a593Smuzhiyun 		     ((int)(max_table_offset - last_offset)) >=
633*4882a593Smuzhiyun 		     roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) ||
634*4882a593Smuzhiyun 		    (!(last_offset & MLX4_BF_QP_SKIP_MASK) &&
635*4882a593Smuzhiyun 		     !((last_offset + requested_size - 1) &
636*4882a593Smuzhiyun 		       MLX4_BF_QP_SKIP_MASK)))
637*4882a593Smuzhiyun 			size = requested_size;
638*4882a593Smuzhiyun 		else {
639*4882a593Smuzhiyun 			u32 candidate_offset =
640*4882a593Smuzhiyun 				(last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 			if (last_offset & MLX4_BF_QP_SKIP_MASK)
643*4882a593Smuzhiyun 				last_offset = candidate_offset;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 			/* From this point, the BF bits are 0 */
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 			if (last_offset > max_table_offset) {
648*4882a593Smuzhiyun 				/* need to skip */
649*4882a593Smuzhiyun 				size = -1;
650*4882a593Smuzhiyun 			} else {
651*4882a593Smuzhiyun 				size = min3(max_table_offset - last_offset,
652*4882a593Smuzhiyun 					    bf_mask - (last_offset & bf_mask),
653*4882a593Smuzhiyun 					    requested_size);
654*4882a593Smuzhiyun 				if (size < requested_size) {
655*4882a593Smuzhiyun 					int candidate_size;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 					candidate_size = min3(
658*4882a593Smuzhiyun 						max_table_offset - candidate_offset,
659*4882a593Smuzhiyun 						bf_mask - (last_offset & bf_mask),
660*4882a593Smuzhiyun 						requested_size);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 					/*  We will not take this path if last_offset was
663*4882a593Smuzhiyun 					 *  already set above to candidate_offset
664*4882a593Smuzhiyun 					 */
665*4882a593Smuzhiyun 					if (candidate_size > size) {
666*4882a593Smuzhiyun 						last_offset = candidate_offset;
667*4882a593Smuzhiyun 						size = candidate_size;
668*4882a593Smuzhiyun 					}
669*4882a593Smuzhiyun 				}
670*4882a593Smuzhiyun 			}
671*4882a593Smuzhiyun 		}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 		if (size > 0) {
674*4882a593Smuzhiyun 			/* mlx4_bitmap_alloc_range will find a contiguous range of "size"
675*4882a593Smuzhiyun 			 * QPs in which both bits 6 and 7 are zero, because we pass it the
676*4882a593Smuzhiyun 			 * MLX4_BF_SKIP_MASK).
677*4882a593Smuzhiyun 			 */
678*4882a593Smuzhiyun 			offset = mlx4_bitmap_alloc_range(
679*4882a593Smuzhiyun 					*bitmap + MLX4_QP_TABLE_ZONE_RSS,
680*4882a593Smuzhiyun 					size, 1,
681*4882a593Smuzhiyun 					MLX4_BF_QP_SKIP_MASK);
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 			if (offset == (u32)-1) {
684*4882a593Smuzhiyun 				err = -ENOMEM;
685*4882a593Smuzhiyun 				break;
686*4882a593Smuzhiyun 			}
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 			last_offset = offset + size;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 			err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size),
691*4882a593Smuzhiyun 					       roundup_pow_of_two(size) - 1, 0,
692*4882a593Smuzhiyun 					       roundup_pow_of_two(size) - size);
693*4882a593Smuzhiyun 		} else {
694*4882a593Smuzhiyun 			/* Add an empty bitmap, we'll allocate from different zones (since
695*4882a593Smuzhiyun 			 * at least one is reserved)
696*4882a593Smuzhiyun 			 */
697*4882a593Smuzhiyun 			err = mlx4_bitmap_init(*bitmap + k, 1,
698*4882a593Smuzhiyun 					       MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
699*4882a593Smuzhiyun 					       0);
700*4882a593Smuzhiyun 			if (!err)
701*4882a593Smuzhiyun 				mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
702*4882a593Smuzhiyun 		}
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 		if (err)
705*4882a593Smuzhiyun 			break;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 		++bitmap_initialized;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 		err = mlx4_zone_add_one(qp_table->zones, *bitmap + k,
710*4882a593Smuzhiyun 					MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
711*4882a593Smuzhiyun 					MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
712*4882a593Smuzhiyun 					MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY,
713*4882a593Smuzhiyun 					offset, qp_table->zones_uids + k);
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 		if (err)
716*4882a593Smuzhiyun 			break;
717*4882a593Smuzhiyun 	}
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	if (err)
720*4882a593Smuzhiyun 		goto free_bitmap;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	qp_table->bitmap_gen = *bitmap;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	return err;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun free_bitmap:
727*4882a593Smuzhiyun 	for (k = 0; k < bitmap_initialized; k++)
728*4882a593Smuzhiyun 		mlx4_bitmap_cleanup(*bitmap + k);
729*4882a593Smuzhiyun 	kfree(bitmap);
730*4882a593Smuzhiyun free_zone:
731*4882a593Smuzhiyun 	mlx4_zone_allocator_destroy(qp_table->zones);
732*4882a593Smuzhiyun 	return err;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun 
mlx4_cleanup_qp_zones(struct mlx4_dev * dev)735*4882a593Smuzhiyun static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	if (qp_table->zones) {
740*4882a593Smuzhiyun 		int i;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 		for (i = 0;
743*4882a593Smuzhiyun 		     i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]);
744*4882a593Smuzhiyun 		     i++) {
745*4882a593Smuzhiyun 			struct mlx4_bitmap *bitmap =
746*4882a593Smuzhiyun 				mlx4_zone_get_bitmap(qp_table->zones,
747*4882a593Smuzhiyun 						     qp_table->zones_uids[i]);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 			mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]);
750*4882a593Smuzhiyun 			if (NULL == bitmap)
751*4882a593Smuzhiyun 				continue;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 			mlx4_bitmap_cleanup(bitmap);
754*4882a593Smuzhiyun 		}
755*4882a593Smuzhiyun 		mlx4_zone_allocator_destroy(qp_table->zones);
756*4882a593Smuzhiyun 		kfree(qp_table->bitmap_gen);
757*4882a593Smuzhiyun 		qp_table->bitmap_gen = NULL;
758*4882a593Smuzhiyun 		qp_table->zones = NULL;
759*4882a593Smuzhiyun 	}
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
mlx4_init_qp_table(struct mlx4_dev * dev)762*4882a593Smuzhiyun int mlx4_init_qp_table(struct mlx4_dev *dev)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
765*4882a593Smuzhiyun 	int err;
766*4882a593Smuzhiyun 	int reserved_from_top = 0;
767*4882a593Smuzhiyun 	int reserved_from_bot;
768*4882a593Smuzhiyun 	int k;
769*4882a593Smuzhiyun 	int fixed_reserved_from_bot_rv = 0;
770*4882a593Smuzhiyun 	int bottom_reserved_for_rss_bitmap;
771*4882a593Smuzhiyun 	u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
772*4882a593Smuzhiyun 			dev->caps.dmfs_high_rate_qpn_range;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	spin_lock_init(&qp_table->lock);
775*4882a593Smuzhiyun 	INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
776*4882a593Smuzhiyun 	if (mlx4_is_slave(dev))
777*4882a593Smuzhiyun 		return 0;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	/* We reserve 2 extra QPs per port for the special QPs.  The
780*4882a593Smuzhiyun 	 * block of special QPs must be aligned to a multiple of 8, so
781*4882a593Smuzhiyun 	 * round up.
782*4882a593Smuzhiyun 	 *
783*4882a593Smuzhiyun 	 * We also reserve the MSB of the 24-bit QP number to indicate
784*4882a593Smuzhiyun 	 * that a QP is an XRC QP.
785*4882a593Smuzhiyun 	 */
786*4882a593Smuzhiyun 	for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++)
787*4882a593Smuzhiyun 		fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	if (fixed_reserved_from_bot_rv < max_table_offset)
790*4882a593Smuzhiyun 		fixed_reserved_from_bot_rv = max_table_offset;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	/* We reserve at least 1 extra for bitmaps that we don't have enough space for*/
793*4882a593Smuzhiyun 	bottom_reserved_for_rss_bitmap =
794*4882a593Smuzhiyun 		roundup_pow_of_two(fixed_reserved_from_bot_rv + 1);
795*4882a593Smuzhiyun 	dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	{
798*4882a593Smuzhiyun 		int sort[MLX4_NUM_QP_REGION];
799*4882a593Smuzhiyun 		int i, j;
800*4882a593Smuzhiyun 		int last_base = dev->caps.num_qps;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 		for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
803*4882a593Smuzhiyun 			sort[i] = i;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 		for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
806*4882a593Smuzhiyun 			for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
807*4882a593Smuzhiyun 				if (dev->caps.reserved_qps_cnt[sort[j]] >
808*4882a593Smuzhiyun 				    dev->caps.reserved_qps_cnt[sort[j - 1]])
809*4882a593Smuzhiyun 					swap(sort[j], sort[j - 1]);
810*4882a593Smuzhiyun 			}
811*4882a593Smuzhiyun 		}
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 		for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) {
814*4882a593Smuzhiyun 			last_base -= dev->caps.reserved_qps_cnt[sort[i]];
815*4882a593Smuzhiyun 			dev->caps.reserved_qps_base[sort[i]] = last_base;
816*4882a593Smuzhiyun 			reserved_from_top +=
817*4882a593Smuzhiyun 				dev->caps.reserved_qps_cnt[sort[i]];
818*4882a593Smuzhiyun 		}
819*4882a593Smuzhiyun 	}
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun        /* Reserve 8 real SQPs in both native and SRIOV modes.
822*4882a593Smuzhiyun 	* In addition, in SRIOV mode, reserve 8 proxy SQPs per function
823*4882a593Smuzhiyun 	* (for all PFs and VFs), and 8 corresponding tunnel QPs.
824*4882a593Smuzhiyun 	* Each proxy SQP works opposite its own tunnel QP.
825*4882a593Smuzhiyun 	*
826*4882a593Smuzhiyun 	* The QPs are arranged as follows:
827*4882a593Smuzhiyun 	* a. 8 real SQPs
828*4882a593Smuzhiyun 	* b. All the proxy SQPs (8 per function)
829*4882a593Smuzhiyun 	* c. All the tunnel QPs (8 per function)
830*4882a593Smuzhiyun 	*/
831*4882a593Smuzhiyun 	reserved_from_bot = mlx4_num_reserved_sqps(dev);
832*4882a593Smuzhiyun 	if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
833*4882a593Smuzhiyun 		mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
834*4882a593Smuzhiyun 		return -EINVAL;
835*4882a593Smuzhiyun 	}
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
838*4882a593Smuzhiyun 				bottom_reserved_for_rss_bitmap,
839*4882a593Smuzhiyun 				fixed_reserved_from_bot_rv,
840*4882a593Smuzhiyun 				max_table_offset);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	if (err)
843*4882a593Smuzhiyun 		return err;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	if (mlx4_is_mfunc(dev)) {
846*4882a593Smuzhiyun 		/* for PPF use */
847*4882a593Smuzhiyun 		dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
848*4882a593Smuzhiyun 		dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 		/* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
851*4882a593Smuzhiyun 		 * since the PF does not call mlx4_slave_caps */
852*4882a593Smuzhiyun 		dev->caps.spec_qps = kcalloc(dev->caps.num_ports,
853*4882a593Smuzhiyun 					     sizeof(*dev->caps.spec_qps),
854*4882a593Smuzhiyun 					     GFP_KERNEL);
855*4882a593Smuzhiyun 		if (!dev->caps.spec_qps) {
856*4882a593Smuzhiyun 			err = -ENOMEM;
857*4882a593Smuzhiyun 			goto err_mem;
858*4882a593Smuzhiyun 		}
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 		for (k = 0; k < dev->caps.num_ports; k++) {
861*4882a593Smuzhiyun 			dev->caps.spec_qps[k].qp0_proxy = dev->phys_caps.base_proxy_sqpn +
862*4882a593Smuzhiyun 				8 * mlx4_master_func_num(dev) + k;
863*4882a593Smuzhiyun 			dev->caps.spec_qps[k].qp0_tunnel = dev->caps.spec_qps[k].qp0_proxy + 8 * MLX4_MFUNC_MAX;
864*4882a593Smuzhiyun 			dev->caps.spec_qps[k].qp1_proxy = dev->phys_caps.base_proxy_sqpn +
865*4882a593Smuzhiyun 				8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
866*4882a593Smuzhiyun 			dev->caps.spec_qps[k].qp1_tunnel = dev->caps.spec_qps[k].qp1_proxy + 8 * MLX4_MFUNC_MAX;
867*4882a593Smuzhiyun 		}
868*4882a593Smuzhiyun 	}
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
872*4882a593Smuzhiyun 	if (err)
873*4882a593Smuzhiyun 		goto err_mem;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	return err;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun err_mem:
878*4882a593Smuzhiyun 	kfree(dev->caps.spec_qps);
879*4882a593Smuzhiyun 	dev->caps.spec_qps = NULL;
880*4882a593Smuzhiyun 	mlx4_cleanup_qp_zones(dev);
881*4882a593Smuzhiyun 	return err;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun 
mlx4_cleanup_qp_table(struct mlx4_dev * dev)884*4882a593Smuzhiyun void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun 	if (mlx4_is_slave(dev))
887*4882a593Smuzhiyun 		return;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	mlx4_CONF_SPECIAL_QP(dev, 0);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	mlx4_cleanup_qp_zones(dev);
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun 
mlx4_qp_query(struct mlx4_dev * dev,struct mlx4_qp * qp,struct mlx4_qp_context * context)894*4882a593Smuzhiyun int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
895*4882a593Smuzhiyun 		  struct mlx4_qp_context *context)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun 	struct mlx4_cmd_mailbox *mailbox;
898*4882a593Smuzhiyun 	int err;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	mailbox = mlx4_alloc_cmd_mailbox(dev);
901*4882a593Smuzhiyun 	if (IS_ERR(mailbox))
902*4882a593Smuzhiyun 		return PTR_ERR(mailbox);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
905*4882a593Smuzhiyun 			   MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
906*4882a593Smuzhiyun 			   MLX4_CMD_WRAPPED);
907*4882a593Smuzhiyun 	if (!err)
908*4882a593Smuzhiyun 		memcpy(context, mailbox->buf + 8, sizeof(*context));
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	mlx4_free_cmd_mailbox(dev, mailbox);
911*4882a593Smuzhiyun 	return err;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_qp_query);
914*4882a593Smuzhiyun 
mlx4_qp_to_ready(struct mlx4_dev * dev,struct mlx4_mtt * mtt,struct mlx4_qp_context * context,struct mlx4_qp * qp,enum mlx4_qp_state * qp_state)915*4882a593Smuzhiyun int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
916*4882a593Smuzhiyun 		     struct mlx4_qp_context *context,
917*4882a593Smuzhiyun 		     struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun 	int err;
920*4882a593Smuzhiyun 	int i;
921*4882a593Smuzhiyun 	enum mlx4_qp_state states[] = {
922*4882a593Smuzhiyun 		MLX4_QP_STATE_RST,
923*4882a593Smuzhiyun 		MLX4_QP_STATE_INIT,
924*4882a593Smuzhiyun 		MLX4_QP_STATE_RTR,
925*4882a593Smuzhiyun 		MLX4_QP_STATE_RTS
926*4882a593Smuzhiyun 	};
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
929*4882a593Smuzhiyun 		context->flags &= cpu_to_be32(~(0xf << 28));
930*4882a593Smuzhiyun 		context->flags |= cpu_to_be32(states[i + 1] << 28);
931*4882a593Smuzhiyun 		if (states[i + 1] != MLX4_QP_STATE_RTR)
932*4882a593Smuzhiyun 			context->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
933*4882a593Smuzhiyun 		err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
934*4882a593Smuzhiyun 				     context, 0, 0, qp);
935*4882a593Smuzhiyun 		if (err) {
936*4882a593Smuzhiyun 			mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
937*4882a593Smuzhiyun 				 states[i + 1], err);
938*4882a593Smuzhiyun 			return err;
939*4882a593Smuzhiyun 		}
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 		*qp_state = states[i + 1];
942*4882a593Smuzhiyun 	}
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	return 0;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
947*4882a593Smuzhiyun 
mlx4_qp_roce_entropy(struct mlx4_dev * dev,u32 qpn)948*4882a593Smuzhiyun u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	struct mlx4_qp_context context;
951*4882a593Smuzhiyun 	struct mlx4_qp qp;
952*4882a593Smuzhiyun 	int err;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	qp.qpn = qpn;
955*4882a593Smuzhiyun 	err = mlx4_qp_query(dev, &qp, &context);
956*4882a593Smuzhiyun 	if (!err) {
957*4882a593Smuzhiyun 		u32 dest_qpn = be32_to_cpu(context.remote_qpn) & 0xffffff;
958*4882a593Smuzhiyun 		u16 folded_dst = folded_qp(dest_qpn);
959*4882a593Smuzhiyun 		u16 folded_src = folded_qp(qpn);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 		return (dest_qpn != qpn) ?
962*4882a593Smuzhiyun 			((folded_dst ^ folded_src) | 0xC000) :
963*4882a593Smuzhiyun 			folded_src | 0xC000;
964*4882a593Smuzhiyun 	}
965*4882a593Smuzhiyun 	return 0xdead;
966*4882a593Smuzhiyun }
967