1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun * OpenIB.org BSD license below:
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun * without modification, are permitted provided that the following
12*4882a593Smuzhiyun * conditions are met:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * - Redistributions of source code must retain the above
15*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun * disclaimer.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun * provided with the distribution.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun * SOFTWARE.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <linux/module.h>
34*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include "iw_cxgb4.h"
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static int db_delay_usecs = 1;
39*4882a593Smuzhiyun module_param(db_delay_usecs, int, 0644);
40*4882a593Smuzhiyun MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun static int ocqp_support = 1;
43*4882a593Smuzhiyun module_param(ocqp_support, int, 0644);
44*4882a593Smuzhiyun MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun int db_fc_threshold = 1000;
47*4882a593Smuzhiyun module_param(db_fc_threshold, int, 0644);
48*4882a593Smuzhiyun MODULE_PARM_DESC(db_fc_threshold,
49*4882a593Smuzhiyun "QP count/threshold that triggers"
50*4882a593Smuzhiyun " automatic db flow control mode (default = 1000)");
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun int db_coalescing_threshold;
53*4882a593Smuzhiyun module_param(db_coalescing_threshold, int, 0644);
54*4882a593Smuzhiyun MODULE_PARM_DESC(db_coalescing_threshold,
55*4882a593Smuzhiyun "QP count/threshold that triggers"
56*4882a593Smuzhiyun " disabling db coalescing (default = 0)");
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static int max_fr_immd = T4_MAX_FR_IMMD;
59*4882a593Smuzhiyun module_param(max_fr_immd, int, 0644);
60*4882a593Smuzhiyun MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
61*4882a593Smuzhiyun
alloc_ird(struct c4iw_dev * dev,u32 ird)62*4882a593Smuzhiyun static int alloc_ird(struct c4iw_dev *dev, u32 ird)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun int ret = 0;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun xa_lock_irq(&dev->qps);
67*4882a593Smuzhiyun if (ird <= dev->avail_ird)
68*4882a593Smuzhiyun dev->avail_ird -= ird;
69*4882a593Smuzhiyun else
70*4882a593Smuzhiyun ret = -ENOMEM;
71*4882a593Smuzhiyun xa_unlock_irq(&dev->qps);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (ret)
74*4882a593Smuzhiyun dev_warn(&dev->rdev.lldi.pdev->dev,
75*4882a593Smuzhiyun "device IRD resources exhausted\n");
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun return ret;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
free_ird(struct c4iw_dev * dev,int ird)80*4882a593Smuzhiyun static void free_ird(struct c4iw_dev *dev, int ird)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun xa_lock_irq(&dev->qps);
83*4882a593Smuzhiyun dev->avail_ird += ird;
84*4882a593Smuzhiyun xa_unlock_irq(&dev->qps);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
set_state(struct c4iw_qp * qhp,enum c4iw_qp_state state)87*4882a593Smuzhiyun static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun unsigned long flag;
90*4882a593Smuzhiyun spin_lock_irqsave(&qhp->lock, flag);
91*4882a593Smuzhiyun qhp->attr.state = state;
92*4882a593Smuzhiyun spin_unlock_irqrestore(&qhp->lock, flag);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
dealloc_oc_sq(struct c4iw_rdev * rdev,struct t4_sq * sq)95*4882a593Smuzhiyun static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
dealloc_host_sq(struct c4iw_rdev * rdev,struct t4_sq * sq)100*4882a593Smuzhiyun static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
103*4882a593Smuzhiyun dma_unmap_addr(sq, mapping));
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
dealloc_sq(struct c4iw_rdev * rdev,struct t4_sq * sq)106*4882a593Smuzhiyun static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun if (t4_sq_onchip(sq))
109*4882a593Smuzhiyun dealloc_oc_sq(rdev, sq);
110*4882a593Smuzhiyun else
111*4882a593Smuzhiyun dealloc_host_sq(rdev, sq);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
alloc_oc_sq(struct c4iw_rdev * rdev,struct t4_sq * sq)114*4882a593Smuzhiyun static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun if (!ocqp_support || !ocqp_supported(&rdev->lldi))
117*4882a593Smuzhiyun return -ENOSYS;
118*4882a593Smuzhiyun sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
119*4882a593Smuzhiyun if (!sq->dma_addr)
120*4882a593Smuzhiyun return -ENOMEM;
121*4882a593Smuzhiyun sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
122*4882a593Smuzhiyun rdev->lldi.vr->ocq.start;
123*4882a593Smuzhiyun sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
124*4882a593Smuzhiyun rdev->lldi.vr->ocq.start);
125*4882a593Smuzhiyun sq->flags |= T4_SQ_ONCHIP;
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
alloc_host_sq(struct c4iw_rdev * rdev,struct t4_sq * sq)129*4882a593Smuzhiyun static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
132*4882a593Smuzhiyun &(sq->dma_addr), GFP_KERNEL);
133*4882a593Smuzhiyun if (!sq->queue)
134*4882a593Smuzhiyun return -ENOMEM;
135*4882a593Smuzhiyun sq->phys_addr = virt_to_phys(sq->queue);
136*4882a593Smuzhiyun dma_unmap_addr_set(sq, mapping, sq->dma_addr);
137*4882a593Smuzhiyun return 0;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
alloc_sq(struct c4iw_rdev * rdev,struct t4_sq * sq,int user)140*4882a593Smuzhiyun static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun int ret = -ENOSYS;
143*4882a593Smuzhiyun if (user)
144*4882a593Smuzhiyun ret = alloc_oc_sq(rdev, sq);
145*4882a593Smuzhiyun if (ret)
146*4882a593Smuzhiyun ret = alloc_host_sq(rdev, sq);
147*4882a593Smuzhiyun return ret;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
destroy_qp(struct c4iw_rdev * rdev,struct t4_wq * wq,struct c4iw_dev_ucontext * uctx,int has_rq)150*4882a593Smuzhiyun static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
151*4882a593Smuzhiyun struct c4iw_dev_ucontext *uctx, int has_rq)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * uP clears EQ contexts when the connection exits rdma mode,
155*4882a593Smuzhiyun * so no need to post a RESET WR for these EQs.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun dealloc_sq(rdev, &wq->sq);
158*4882a593Smuzhiyun kfree(wq->sq.sw_sq);
159*4882a593Smuzhiyun c4iw_put_qpid(rdev, wq->sq.qid, uctx);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (has_rq) {
162*4882a593Smuzhiyun dma_free_coherent(&rdev->lldi.pdev->dev,
163*4882a593Smuzhiyun wq->rq.memsize, wq->rq.queue,
164*4882a593Smuzhiyun dma_unmap_addr(&wq->rq, mapping));
165*4882a593Smuzhiyun c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
166*4882a593Smuzhiyun kfree(wq->rq.sw_rq);
167*4882a593Smuzhiyun c4iw_put_qpid(rdev, wq->rq.qid, uctx);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
174*4882a593Smuzhiyun * then this is a user mapping so compute the page-aligned physical address
175*4882a593Smuzhiyun * for mapping.
176*4882a593Smuzhiyun */
c4iw_bar2_addrs(struct c4iw_rdev * rdev,unsigned int qid,enum cxgb4_bar2_qtype qtype,unsigned int * pbar2_qid,u64 * pbar2_pa)177*4882a593Smuzhiyun void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
178*4882a593Smuzhiyun enum cxgb4_bar2_qtype qtype,
179*4882a593Smuzhiyun unsigned int *pbar2_qid, u64 *pbar2_pa)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun u64 bar2_qoffset;
182*4882a593Smuzhiyun int ret;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
185*4882a593Smuzhiyun pbar2_pa ? 1 : 0,
186*4882a593Smuzhiyun &bar2_qoffset, pbar2_qid);
187*4882a593Smuzhiyun if (ret)
188*4882a593Smuzhiyun return NULL;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (pbar2_pa)
191*4882a593Smuzhiyun *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (is_t4(rdev->lldi.adapter_type))
194*4882a593Smuzhiyun return NULL;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return rdev->bar2_kva + bar2_qoffset;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
create_qp(struct c4iw_rdev * rdev,struct t4_wq * wq,struct t4_cq * rcq,struct t4_cq * scq,struct c4iw_dev_ucontext * uctx,struct c4iw_wr_wait * wr_waitp,int need_rq)199*4882a593Smuzhiyun static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
200*4882a593Smuzhiyun struct t4_cq *rcq, struct t4_cq *scq,
201*4882a593Smuzhiyun struct c4iw_dev_ucontext *uctx,
202*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp,
203*4882a593Smuzhiyun int need_rq)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun int user = (uctx != &rdev->uctx);
206*4882a593Smuzhiyun struct fw_ri_res_wr *res_wr;
207*4882a593Smuzhiyun struct fw_ri_res *res;
208*4882a593Smuzhiyun int wr_len;
209*4882a593Smuzhiyun struct sk_buff *skb;
210*4882a593Smuzhiyun int ret = 0;
211*4882a593Smuzhiyun int eqsize;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun wq->sq.qid = c4iw_get_qpid(rdev, uctx);
214*4882a593Smuzhiyun if (!wq->sq.qid)
215*4882a593Smuzhiyun return -ENOMEM;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (need_rq) {
218*4882a593Smuzhiyun wq->rq.qid = c4iw_get_qpid(rdev, uctx);
219*4882a593Smuzhiyun if (!wq->rq.qid) {
220*4882a593Smuzhiyun ret = -ENOMEM;
221*4882a593Smuzhiyun goto free_sq_qid;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun if (!user) {
226*4882a593Smuzhiyun wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq),
227*4882a593Smuzhiyun GFP_KERNEL);
228*4882a593Smuzhiyun if (!wq->sq.sw_sq) {
229*4882a593Smuzhiyun ret = -ENOMEM;
230*4882a593Smuzhiyun goto free_rq_qid;//FIXME
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun if (need_rq) {
234*4882a593Smuzhiyun wq->rq.sw_rq = kcalloc(wq->rq.size,
235*4882a593Smuzhiyun sizeof(*wq->rq.sw_rq),
236*4882a593Smuzhiyun GFP_KERNEL);
237*4882a593Smuzhiyun if (!wq->rq.sw_rq) {
238*4882a593Smuzhiyun ret = -ENOMEM;
239*4882a593Smuzhiyun goto free_sw_sq;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun if (need_rq) {
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun * RQT must be a power of 2 and at least 16 deep.
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun wq->rq.rqt_size =
249*4882a593Smuzhiyun roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
250*4882a593Smuzhiyun wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
251*4882a593Smuzhiyun if (!wq->rq.rqt_hwaddr) {
252*4882a593Smuzhiyun ret = -ENOMEM;
253*4882a593Smuzhiyun goto free_sw_rq;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun ret = alloc_sq(rdev, &wq->sq, user);
258*4882a593Smuzhiyun if (ret)
259*4882a593Smuzhiyun goto free_hwaddr;
260*4882a593Smuzhiyun memset(wq->sq.queue, 0, wq->sq.memsize);
261*4882a593Smuzhiyun dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if (need_rq) {
264*4882a593Smuzhiyun wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
265*4882a593Smuzhiyun wq->rq.memsize,
266*4882a593Smuzhiyun &wq->rq.dma_addr,
267*4882a593Smuzhiyun GFP_KERNEL);
268*4882a593Smuzhiyun if (!wq->rq.queue) {
269*4882a593Smuzhiyun ret = -ENOMEM;
270*4882a593Smuzhiyun goto free_sq;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
273*4882a593Smuzhiyun wq->sq.queue,
274*4882a593Smuzhiyun (unsigned long long)virt_to_phys(wq->sq.queue),
275*4882a593Smuzhiyun wq->rq.queue,
276*4882a593Smuzhiyun (unsigned long long)virt_to_phys(wq->rq.queue));
277*4882a593Smuzhiyun dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun wq->db = rdev->lldi.db_reg;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid,
283*4882a593Smuzhiyun CXGB4_BAR2_QTYPE_EGRESS,
284*4882a593Smuzhiyun &wq->sq.bar2_qid,
285*4882a593Smuzhiyun user ? &wq->sq.bar2_pa : NULL);
286*4882a593Smuzhiyun if (need_rq)
287*4882a593Smuzhiyun wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
288*4882a593Smuzhiyun CXGB4_BAR2_QTYPE_EGRESS,
289*4882a593Smuzhiyun &wq->rq.bar2_qid,
290*4882a593Smuzhiyun user ? &wq->rq.bar2_pa : NULL);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun * User mode must have bar2 access.
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
296*4882a593Smuzhiyun pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
297*4882a593Smuzhiyun pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
298*4882a593Smuzhiyun ret = -EINVAL;
299*4882a593Smuzhiyun goto free_dma;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun wq->rdev = rdev;
303*4882a593Smuzhiyun wq->rq.msn = 1;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* build fw_ri_res_wr */
306*4882a593Smuzhiyun wr_len = sizeof(*res_wr) + 2 * sizeof(*res);
307*4882a593Smuzhiyun if (need_rq)
308*4882a593Smuzhiyun wr_len += sizeof(*res);
309*4882a593Smuzhiyun skb = alloc_skb(wr_len, GFP_KERNEL);
310*4882a593Smuzhiyun if (!skb) {
311*4882a593Smuzhiyun ret = -ENOMEM;
312*4882a593Smuzhiyun goto free_dma;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun res_wr = __skb_put_zero(skb, wr_len);
317*4882a593Smuzhiyun res_wr->op_nres = cpu_to_be32(
318*4882a593Smuzhiyun FW_WR_OP_V(FW_RI_RES_WR) |
319*4882a593Smuzhiyun FW_RI_RES_WR_NRES_V(need_rq ? 2 : 1) |
320*4882a593Smuzhiyun FW_WR_COMPL_F);
321*4882a593Smuzhiyun res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
322*4882a593Smuzhiyun res_wr->cookie = (uintptr_t)wr_waitp;
323*4882a593Smuzhiyun res = res_wr->res;
324*4882a593Smuzhiyun res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
325*4882a593Smuzhiyun res->u.sqrq.op = FW_RI_RES_OP_WRITE;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /*
328*4882a593Smuzhiyun * eqsize is the number of 64B entries plus the status page size.
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
331*4882a593Smuzhiyun rdev->hw_queue.t4_eq_status_entries;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
334*4882a593Smuzhiyun FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
335*4882a593Smuzhiyun FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
336*4882a593Smuzhiyun FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
337*4882a593Smuzhiyun (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
338*4882a593Smuzhiyun FW_RI_RES_WR_IQID_V(scq->cqid));
339*4882a593Smuzhiyun res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
340*4882a593Smuzhiyun FW_RI_RES_WR_DCAEN_V(0) |
341*4882a593Smuzhiyun FW_RI_RES_WR_DCACPU_V(0) |
342*4882a593Smuzhiyun FW_RI_RES_WR_FBMIN_V(2) |
343*4882a593Smuzhiyun (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
344*4882a593Smuzhiyun FW_RI_RES_WR_FBMAX_V(3)) |
345*4882a593Smuzhiyun FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
346*4882a593Smuzhiyun FW_RI_RES_WR_CIDXFTHRESH_V(0) |
347*4882a593Smuzhiyun FW_RI_RES_WR_EQSIZE_V(eqsize));
348*4882a593Smuzhiyun res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
349*4882a593Smuzhiyun res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (need_rq) {
352*4882a593Smuzhiyun res++;
353*4882a593Smuzhiyun res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
354*4882a593Smuzhiyun res->u.sqrq.op = FW_RI_RES_OP_WRITE;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun * eqsize is the number of 64B entries plus the status page size
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
360*4882a593Smuzhiyun rdev->hw_queue.t4_eq_status_entries;
361*4882a593Smuzhiyun res->u.sqrq.fetchszm_to_iqid =
362*4882a593Smuzhiyun /* no host cidx updates */
363*4882a593Smuzhiyun cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
364*4882a593Smuzhiyun /* don't keep in chip cache */
365*4882a593Smuzhiyun FW_RI_RES_WR_CPRIO_V(0) |
366*4882a593Smuzhiyun /* set by uP at ri_init time */
367*4882a593Smuzhiyun FW_RI_RES_WR_PCIECHN_V(0) |
368*4882a593Smuzhiyun FW_RI_RES_WR_IQID_V(rcq->cqid));
369*4882a593Smuzhiyun res->u.sqrq.dcaen_to_eqsize =
370*4882a593Smuzhiyun cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
371*4882a593Smuzhiyun FW_RI_RES_WR_DCACPU_V(0) |
372*4882a593Smuzhiyun FW_RI_RES_WR_FBMIN_V(2) |
373*4882a593Smuzhiyun FW_RI_RES_WR_FBMAX_V(3) |
374*4882a593Smuzhiyun FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
375*4882a593Smuzhiyun FW_RI_RES_WR_CIDXFTHRESH_V(0) |
376*4882a593Smuzhiyun FW_RI_RES_WR_EQSIZE_V(eqsize));
377*4882a593Smuzhiyun res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
378*4882a593Smuzhiyun res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun c4iw_init_wr_wait(wr_waitp);
382*4882a593Smuzhiyun ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
383*4882a593Smuzhiyun if (ret)
384*4882a593Smuzhiyun goto free_dma;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
387*4882a593Smuzhiyun wq->sq.qid, wq->rq.qid, wq->db,
388*4882a593Smuzhiyun wq->sq.bar2_va, wq->rq.bar2_va);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun return 0;
391*4882a593Smuzhiyun free_dma:
392*4882a593Smuzhiyun if (need_rq)
393*4882a593Smuzhiyun dma_free_coherent(&rdev->lldi.pdev->dev,
394*4882a593Smuzhiyun wq->rq.memsize, wq->rq.queue,
395*4882a593Smuzhiyun dma_unmap_addr(&wq->rq, mapping));
396*4882a593Smuzhiyun free_sq:
397*4882a593Smuzhiyun dealloc_sq(rdev, &wq->sq);
398*4882a593Smuzhiyun free_hwaddr:
399*4882a593Smuzhiyun if (need_rq)
400*4882a593Smuzhiyun c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
401*4882a593Smuzhiyun free_sw_rq:
402*4882a593Smuzhiyun if (need_rq)
403*4882a593Smuzhiyun kfree(wq->rq.sw_rq);
404*4882a593Smuzhiyun free_sw_sq:
405*4882a593Smuzhiyun kfree(wq->sq.sw_sq);
406*4882a593Smuzhiyun free_rq_qid:
407*4882a593Smuzhiyun if (need_rq)
408*4882a593Smuzhiyun c4iw_put_qpid(rdev, wq->rq.qid, uctx);
409*4882a593Smuzhiyun free_sq_qid:
410*4882a593Smuzhiyun c4iw_put_qpid(rdev, wq->sq.qid, uctx);
411*4882a593Smuzhiyun return ret;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
build_immd(struct t4_sq * sq,struct fw_ri_immd * immdp,const struct ib_send_wr * wr,int max,u32 * plenp)414*4882a593Smuzhiyun static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
415*4882a593Smuzhiyun const struct ib_send_wr *wr, int max, u32 *plenp)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun u8 *dstp, *srcp;
418*4882a593Smuzhiyun u32 plen = 0;
419*4882a593Smuzhiyun int i;
420*4882a593Smuzhiyun int rem, len;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun dstp = (u8 *)immdp->data;
423*4882a593Smuzhiyun for (i = 0; i < wr->num_sge; i++) {
424*4882a593Smuzhiyun if ((plen + wr->sg_list[i].length) > max)
425*4882a593Smuzhiyun return -EMSGSIZE;
426*4882a593Smuzhiyun srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
427*4882a593Smuzhiyun plen += wr->sg_list[i].length;
428*4882a593Smuzhiyun rem = wr->sg_list[i].length;
429*4882a593Smuzhiyun while (rem) {
430*4882a593Smuzhiyun if (dstp == (u8 *)&sq->queue[sq->size])
431*4882a593Smuzhiyun dstp = (u8 *)sq->queue;
432*4882a593Smuzhiyun if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
433*4882a593Smuzhiyun len = rem;
434*4882a593Smuzhiyun else
435*4882a593Smuzhiyun len = (u8 *)&sq->queue[sq->size] - dstp;
436*4882a593Smuzhiyun memcpy(dstp, srcp, len);
437*4882a593Smuzhiyun dstp += len;
438*4882a593Smuzhiyun srcp += len;
439*4882a593Smuzhiyun rem -= len;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp));
443*4882a593Smuzhiyun if (len)
444*4882a593Smuzhiyun memset(dstp, 0, len);
445*4882a593Smuzhiyun immdp->op = FW_RI_DATA_IMMD;
446*4882a593Smuzhiyun immdp->r1 = 0;
447*4882a593Smuzhiyun immdp->r2 = 0;
448*4882a593Smuzhiyun immdp->immdlen = cpu_to_be32(plen);
449*4882a593Smuzhiyun *plenp = plen;
450*4882a593Smuzhiyun return 0;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
build_isgl(__be64 * queue_start,__be64 * queue_end,struct fw_ri_isgl * isglp,struct ib_sge * sg_list,int num_sge,u32 * plenp)453*4882a593Smuzhiyun static int build_isgl(__be64 *queue_start, __be64 *queue_end,
454*4882a593Smuzhiyun struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
455*4882a593Smuzhiyun int num_sge, u32 *plenp)
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun int i;
459*4882a593Smuzhiyun u32 plen = 0;
460*4882a593Smuzhiyun __be64 *flitp;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun if ((__be64 *)isglp == queue_end)
463*4882a593Smuzhiyun isglp = (struct fw_ri_isgl *)queue_start;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun flitp = (__be64 *)isglp->sge;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun for (i = 0; i < num_sge; i++) {
468*4882a593Smuzhiyun if ((plen + sg_list[i].length) < plen)
469*4882a593Smuzhiyun return -EMSGSIZE;
470*4882a593Smuzhiyun plen += sg_list[i].length;
471*4882a593Smuzhiyun *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
472*4882a593Smuzhiyun sg_list[i].length);
473*4882a593Smuzhiyun if (++flitp == queue_end)
474*4882a593Smuzhiyun flitp = queue_start;
475*4882a593Smuzhiyun *flitp = cpu_to_be64(sg_list[i].addr);
476*4882a593Smuzhiyun if (++flitp == queue_end)
477*4882a593Smuzhiyun flitp = queue_start;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun *flitp = (__force __be64)0;
480*4882a593Smuzhiyun isglp->op = FW_RI_DATA_ISGL;
481*4882a593Smuzhiyun isglp->r1 = 0;
482*4882a593Smuzhiyun isglp->nsge = cpu_to_be16(num_sge);
483*4882a593Smuzhiyun isglp->r2 = 0;
484*4882a593Smuzhiyun if (plenp)
485*4882a593Smuzhiyun *plenp = plen;
486*4882a593Smuzhiyun return 0;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
build_rdma_send(struct t4_sq * sq,union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)489*4882a593Smuzhiyun static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
490*4882a593Smuzhiyun const struct ib_send_wr *wr, u8 *len16)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun u32 plen;
493*4882a593Smuzhiyun int size;
494*4882a593Smuzhiyun int ret;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun if (wr->num_sge > T4_MAX_SEND_SGE)
497*4882a593Smuzhiyun return -EINVAL;
498*4882a593Smuzhiyun switch (wr->opcode) {
499*4882a593Smuzhiyun case IB_WR_SEND:
500*4882a593Smuzhiyun if (wr->send_flags & IB_SEND_SOLICITED)
501*4882a593Smuzhiyun wqe->send.sendop_pkd = cpu_to_be32(
502*4882a593Smuzhiyun FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
503*4882a593Smuzhiyun else
504*4882a593Smuzhiyun wqe->send.sendop_pkd = cpu_to_be32(
505*4882a593Smuzhiyun FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
506*4882a593Smuzhiyun wqe->send.stag_inv = 0;
507*4882a593Smuzhiyun break;
508*4882a593Smuzhiyun case IB_WR_SEND_WITH_INV:
509*4882a593Smuzhiyun if (wr->send_flags & IB_SEND_SOLICITED)
510*4882a593Smuzhiyun wqe->send.sendop_pkd = cpu_to_be32(
511*4882a593Smuzhiyun FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
512*4882a593Smuzhiyun else
513*4882a593Smuzhiyun wqe->send.sendop_pkd = cpu_to_be32(
514*4882a593Smuzhiyun FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
515*4882a593Smuzhiyun wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
516*4882a593Smuzhiyun break;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun default:
519*4882a593Smuzhiyun return -EINVAL;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun wqe->send.r3 = 0;
522*4882a593Smuzhiyun wqe->send.r4 = 0;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun plen = 0;
525*4882a593Smuzhiyun if (wr->num_sge) {
526*4882a593Smuzhiyun if (wr->send_flags & IB_SEND_INLINE) {
527*4882a593Smuzhiyun ret = build_immd(sq, wqe->send.u.immd_src, wr,
528*4882a593Smuzhiyun T4_MAX_SEND_INLINE, &plen);
529*4882a593Smuzhiyun if (ret)
530*4882a593Smuzhiyun return ret;
531*4882a593Smuzhiyun size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) +
532*4882a593Smuzhiyun plen;
533*4882a593Smuzhiyun } else {
534*4882a593Smuzhiyun ret = build_isgl((__be64 *)sq->queue,
535*4882a593Smuzhiyun (__be64 *)&sq->queue[sq->size],
536*4882a593Smuzhiyun wqe->send.u.isgl_src,
537*4882a593Smuzhiyun wr->sg_list, wr->num_sge, &plen);
538*4882a593Smuzhiyun if (ret)
539*4882a593Smuzhiyun return ret;
540*4882a593Smuzhiyun size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) +
541*4882a593Smuzhiyun wr->num_sge * sizeof(struct fw_ri_sge);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun } else {
544*4882a593Smuzhiyun wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
545*4882a593Smuzhiyun wqe->send.u.immd_src[0].r1 = 0;
546*4882a593Smuzhiyun wqe->send.u.immd_src[0].r2 = 0;
547*4882a593Smuzhiyun wqe->send.u.immd_src[0].immdlen = 0;
548*4882a593Smuzhiyun size = sizeof(wqe->send) + sizeof(struct fw_ri_immd);
549*4882a593Smuzhiyun plen = 0;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun *len16 = DIV_ROUND_UP(size, 16);
552*4882a593Smuzhiyun wqe->send.plen = cpu_to_be32(plen);
553*4882a593Smuzhiyun return 0;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
build_rdma_write(struct t4_sq * sq,union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)556*4882a593Smuzhiyun static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
557*4882a593Smuzhiyun const struct ib_send_wr *wr, u8 *len16)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun u32 plen;
560*4882a593Smuzhiyun int size;
561*4882a593Smuzhiyun int ret;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun if (wr->num_sge > T4_MAX_SEND_SGE)
564*4882a593Smuzhiyun return -EINVAL;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /*
567*4882a593Smuzhiyun * iWARP protocol supports 64 bit immediate data but rdma api
568*4882a593Smuzhiyun * limits it to 32bit.
569*4882a593Smuzhiyun */
570*4882a593Smuzhiyun if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
571*4882a593Smuzhiyun wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data;
572*4882a593Smuzhiyun else
573*4882a593Smuzhiyun wqe->write.iw_imm_data.ib_imm_data.imm_data32 = 0;
574*4882a593Smuzhiyun wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
575*4882a593Smuzhiyun wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
576*4882a593Smuzhiyun if (wr->num_sge) {
577*4882a593Smuzhiyun if (wr->send_flags & IB_SEND_INLINE) {
578*4882a593Smuzhiyun ret = build_immd(sq, wqe->write.u.immd_src, wr,
579*4882a593Smuzhiyun T4_MAX_WRITE_INLINE, &plen);
580*4882a593Smuzhiyun if (ret)
581*4882a593Smuzhiyun return ret;
582*4882a593Smuzhiyun size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) +
583*4882a593Smuzhiyun plen;
584*4882a593Smuzhiyun } else {
585*4882a593Smuzhiyun ret = build_isgl((__be64 *)sq->queue,
586*4882a593Smuzhiyun (__be64 *)&sq->queue[sq->size],
587*4882a593Smuzhiyun wqe->write.u.isgl_src,
588*4882a593Smuzhiyun wr->sg_list, wr->num_sge, &plen);
589*4882a593Smuzhiyun if (ret)
590*4882a593Smuzhiyun return ret;
591*4882a593Smuzhiyun size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) +
592*4882a593Smuzhiyun wr->num_sge * sizeof(struct fw_ri_sge);
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun } else {
595*4882a593Smuzhiyun wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
596*4882a593Smuzhiyun wqe->write.u.immd_src[0].r1 = 0;
597*4882a593Smuzhiyun wqe->write.u.immd_src[0].r2 = 0;
598*4882a593Smuzhiyun wqe->write.u.immd_src[0].immdlen = 0;
599*4882a593Smuzhiyun size = sizeof(wqe->write) + sizeof(struct fw_ri_immd);
600*4882a593Smuzhiyun plen = 0;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun *len16 = DIV_ROUND_UP(size, 16);
603*4882a593Smuzhiyun wqe->write.plen = cpu_to_be32(plen);
604*4882a593Smuzhiyun return 0;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
build_immd_cmpl(struct t4_sq * sq,struct fw_ri_immd_cmpl * immdp,struct ib_send_wr * wr)607*4882a593Smuzhiyun static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp,
608*4882a593Smuzhiyun struct ib_send_wr *wr)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16);
611*4882a593Smuzhiyun memset(immdp->r1, 0, 6);
612*4882a593Smuzhiyun immdp->op = FW_RI_DATA_IMMD;
613*4882a593Smuzhiyun immdp->immdlen = 16;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
build_rdma_write_cmpl(struct t4_sq * sq,struct fw_ri_rdma_write_cmpl_wr * wcwr,const struct ib_send_wr * wr,u8 * len16)616*4882a593Smuzhiyun static void build_rdma_write_cmpl(struct t4_sq *sq,
617*4882a593Smuzhiyun struct fw_ri_rdma_write_cmpl_wr *wcwr,
618*4882a593Smuzhiyun const struct ib_send_wr *wr, u8 *len16)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun u32 plen;
621*4882a593Smuzhiyun int size;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /*
624*4882a593Smuzhiyun * This code assumes the struct fields preceding the write isgl
625*4882a593Smuzhiyun * fit in one 64B WR slot. This is because the WQE is built
626*4882a593Smuzhiyun * directly in the dma queue, and wrapping is only handled
627*4882a593Smuzhiyun * by the code buildling sgls. IE the "fixed part" of the wr
628*4882a593Smuzhiyun * structs must all fit in 64B. The WQE build code should probably be
629*4882a593Smuzhiyun * redesigned to avoid this restriction, but for now just add
630*4882a593Smuzhiyun * the BUILD_BUG_ON() to catch if this WQE struct gets too big.
631*4882a593Smuzhiyun */
632*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct fw_ri_rdma_write_cmpl_wr, u) > 64);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
635*4882a593Smuzhiyun wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
636*4882a593Smuzhiyun if (wr->next->opcode == IB_WR_SEND)
637*4882a593Smuzhiyun wcwr->stag_inv = 0;
638*4882a593Smuzhiyun else
639*4882a593Smuzhiyun wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
640*4882a593Smuzhiyun wcwr->r2 = 0;
641*4882a593Smuzhiyun wcwr->r3 = 0;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* SEND_INV SGL */
644*4882a593Smuzhiyun if (wr->next->send_flags & IB_SEND_INLINE)
645*4882a593Smuzhiyun build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next);
646*4882a593Smuzhiyun else
647*4882a593Smuzhiyun build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
648*4882a593Smuzhiyun &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun /* WRITE SGL */
651*4882a593Smuzhiyun build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
652*4882a593Smuzhiyun wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun size = sizeof(*wcwr) + sizeof(struct fw_ri_isgl) +
655*4882a593Smuzhiyun wr->num_sge * sizeof(struct fw_ri_sge);
656*4882a593Smuzhiyun wcwr->plen = cpu_to_be32(plen);
657*4882a593Smuzhiyun *len16 = DIV_ROUND_UP(size, 16);
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
build_rdma_read(union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)660*4882a593Smuzhiyun static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
661*4882a593Smuzhiyun u8 *len16)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun if (wr->num_sge > 1)
664*4882a593Smuzhiyun return -EINVAL;
665*4882a593Smuzhiyun if (wr->num_sge && wr->sg_list[0].length) {
666*4882a593Smuzhiyun wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
667*4882a593Smuzhiyun wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
668*4882a593Smuzhiyun >> 32));
669*4882a593Smuzhiyun wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
670*4882a593Smuzhiyun wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
671*4882a593Smuzhiyun wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
672*4882a593Smuzhiyun wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
673*4882a593Smuzhiyun >> 32));
674*4882a593Smuzhiyun wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
675*4882a593Smuzhiyun } else {
676*4882a593Smuzhiyun wqe->read.stag_src = cpu_to_be32(2);
677*4882a593Smuzhiyun wqe->read.to_src_hi = 0;
678*4882a593Smuzhiyun wqe->read.to_src_lo = 0;
679*4882a593Smuzhiyun wqe->read.stag_sink = cpu_to_be32(2);
680*4882a593Smuzhiyun wqe->read.plen = 0;
681*4882a593Smuzhiyun wqe->read.to_sink_hi = 0;
682*4882a593Smuzhiyun wqe->read.to_sink_lo = 0;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun wqe->read.r2 = 0;
685*4882a593Smuzhiyun wqe->read.r5 = 0;
686*4882a593Smuzhiyun *len16 = DIV_ROUND_UP(sizeof(wqe->read), 16);
687*4882a593Smuzhiyun return 0;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
post_write_cmpl(struct c4iw_qp * qhp,const struct ib_send_wr * wr)690*4882a593Smuzhiyun static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) ||
693*4882a593Smuzhiyun qhp->sq_sig_all;
694*4882a593Smuzhiyun bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
695*4882a593Smuzhiyun qhp->sq_sig_all;
696*4882a593Smuzhiyun struct t4_swsqe *swsqe;
697*4882a593Smuzhiyun union t4_wr *wqe;
698*4882a593Smuzhiyun u16 write_wrid;
699*4882a593Smuzhiyun u8 len16;
700*4882a593Smuzhiyun u16 idx;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /*
703*4882a593Smuzhiyun * The sw_sq entries still look like a WRITE and a SEND and consume
704*4882a593Smuzhiyun * 2 slots. The FW WR, however, will be a single uber-WR.
705*4882a593Smuzhiyun */
706*4882a593Smuzhiyun wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
707*4882a593Smuzhiyun qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
708*4882a593Smuzhiyun build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun /* WRITE swsqe */
711*4882a593Smuzhiyun swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
712*4882a593Smuzhiyun swsqe->opcode = FW_RI_RDMA_WRITE;
713*4882a593Smuzhiyun swsqe->idx = qhp->wq.sq.pidx;
714*4882a593Smuzhiyun swsqe->complete = 0;
715*4882a593Smuzhiyun swsqe->signaled = write_signaled;
716*4882a593Smuzhiyun swsqe->flushed = 0;
717*4882a593Smuzhiyun swsqe->wr_id = wr->wr_id;
718*4882a593Smuzhiyun if (c4iw_wr_log) {
719*4882a593Smuzhiyun swsqe->sge_ts =
720*4882a593Smuzhiyun cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
721*4882a593Smuzhiyun swsqe->host_time = ktime_get();
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun write_wrid = qhp->wq.sq.pidx;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* just bump the sw_sq */
727*4882a593Smuzhiyun qhp->wq.sq.in_use++;
728*4882a593Smuzhiyun if (++qhp->wq.sq.pidx == qhp->wq.sq.size)
729*4882a593Smuzhiyun qhp->wq.sq.pidx = 0;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /* SEND_WITH_INV swsqe */
732*4882a593Smuzhiyun swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
733*4882a593Smuzhiyun if (wr->next->opcode == IB_WR_SEND)
734*4882a593Smuzhiyun swsqe->opcode = FW_RI_SEND;
735*4882a593Smuzhiyun else
736*4882a593Smuzhiyun swsqe->opcode = FW_RI_SEND_WITH_INV;
737*4882a593Smuzhiyun swsqe->idx = qhp->wq.sq.pidx;
738*4882a593Smuzhiyun swsqe->complete = 0;
739*4882a593Smuzhiyun swsqe->signaled = send_signaled;
740*4882a593Smuzhiyun swsqe->flushed = 0;
741*4882a593Smuzhiyun swsqe->wr_id = wr->next->wr_id;
742*4882a593Smuzhiyun if (c4iw_wr_log) {
743*4882a593Smuzhiyun swsqe->sge_ts =
744*4882a593Smuzhiyun cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
745*4882a593Smuzhiyun swsqe->host_time = ktime_get();
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun wqe->write_cmpl.flags_send = send_signaled ? FW_RI_COMPLETION_FLAG : 0;
749*4882a593Smuzhiyun wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun init_wr_hdr(wqe, write_wrid, FW_RI_RDMA_WRITE_CMPL_WR,
752*4882a593Smuzhiyun write_signaled ? FW_RI_COMPLETION_FLAG : 0, len16);
753*4882a593Smuzhiyun t4_sq_produce(&qhp->wq, len16);
754*4882a593Smuzhiyun idx = DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun t4_ring_sq_db(&qhp->wq, idx, wqe);
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
build_rdma_recv(struct c4iw_qp * qhp,union t4_recv_wr * wqe,const struct ib_recv_wr * wr,u8 * len16)759*4882a593Smuzhiyun static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
760*4882a593Smuzhiyun const struct ib_recv_wr *wr, u8 *len16)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun int ret;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun ret = build_isgl((__be64 *)qhp->wq.rq.queue,
765*4882a593Smuzhiyun (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
766*4882a593Smuzhiyun &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
767*4882a593Smuzhiyun if (ret)
768*4882a593Smuzhiyun return ret;
769*4882a593Smuzhiyun *len16 = DIV_ROUND_UP(
770*4882a593Smuzhiyun sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16);
771*4882a593Smuzhiyun return 0;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
build_srq_recv(union t4_recv_wr * wqe,const struct ib_recv_wr * wr,u8 * len16)774*4882a593Smuzhiyun static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr,
775*4882a593Smuzhiyun u8 *len16)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun int ret;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun ret = build_isgl((__be64 *)wqe, (__be64 *)(wqe + 1),
780*4882a593Smuzhiyun &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
781*4882a593Smuzhiyun if (ret)
782*4882a593Smuzhiyun return ret;
783*4882a593Smuzhiyun *len16 = DIV_ROUND_UP(sizeof(wqe->recv) +
784*4882a593Smuzhiyun wr->num_sge * sizeof(struct fw_ri_sge), 16);
785*4882a593Smuzhiyun return 0;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr * fr,const struct ib_reg_wr * wr,struct c4iw_mr * mhp,u8 * len16)788*4882a593Smuzhiyun static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
789*4882a593Smuzhiyun const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
790*4882a593Smuzhiyun u8 *len16)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun __be64 *p = (__be64 *)fr->pbl;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun fr->r2 = cpu_to_be32(0);
795*4882a593Smuzhiyun fr->stag = cpu_to_be32(mhp->ibmr.rkey);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
798*4882a593Smuzhiyun FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
799*4882a593Smuzhiyun FW_RI_TPTE_STAGSTATE_V(1) |
800*4882a593Smuzhiyun FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
801*4882a593Smuzhiyun FW_RI_TPTE_PDID_V(mhp->attr.pdid));
802*4882a593Smuzhiyun fr->tpte.locread_to_qpid = cpu_to_be32(
803*4882a593Smuzhiyun FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
804*4882a593Smuzhiyun FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
805*4882a593Smuzhiyun FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
806*4882a593Smuzhiyun fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
807*4882a593Smuzhiyun PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
808*4882a593Smuzhiyun fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
809*4882a593Smuzhiyun fr->tpte.len_hi = cpu_to_be32(0);
810*4882a593Smuzhiyun fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
811*4882a593Smuzhiyun fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
812*4882a593Smuzhiyun fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun p[0] = cpu_to_be64((u64)mhp->mpl[0]);
815*4882a593Smuzhiyun p[1] = cpu_to_be64((u64)mhp->mpl[1]);
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun
build_memreg(struct t4_sq * sq,union t4_wr * wqe,const struct ib_reg_wr * wr,struct c4iw_mr * mhp,u8 * len16,bool dsgl_supported)820*4882a593Smuzhiyun static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
821*4882a593Smuzhiyun const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
822*4882a593Smuzhiyun u8 *len16, bool dsgl_supported)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun struct fw_ri_immd *imdp;
825*4882a593Smuzhiyun __be64 *p;
826*4882a593Smuzhiyun int i;
827*4882a593Smuzhiyun int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
828*4882a593Smuzhiyun int rem;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
831*4882a593Smuzhiyun return -EINVAL;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun wqe->fr.qpbinde_to_dcacpu = 0;
834*4882a593Smuzhiyun wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
835*4882a593Smuzhiyun wqe->fr.addr_type = FW_RI_VA_BASED_TO;
836*4882a593Smuzhiyun wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
837*4882a593Smuzhiyun wqe->fr.len_hi = 0;
838*4882a593Smuzhiyun wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
839*4882a593Smuzhiyun wqe->fr.stag = cpu_to_be32(wr->key);
840*4882a593Smuzhiyun wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
841*4882a593Smuzhiyun wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
842*4882a593Smuzhiyun 0xffffffff);
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
845*4882a593Smuzhiyun struct fw_ri_dsgl *sglp;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun for (i = 0; i < mhp->mpl_len; i++)
848*4882a593Smuzhiyun mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
851*4882a593Smuzhiyun sglp->op = FW_RI_DATA_DSGL;
852*4882a593Smuzhiyun sglp->r1 = 0;
853*4882a593Smuzhiyun sglp->nsge = cpu_to_be16(1);
854*4882a593Smuzhiyun sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
855*4882a593Smuzhiyun sglp->len0 = cpu_to_be32(pbllen);
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
858*4882a593Smuzhiyun } else {
859*4882a593Smuzhiyun imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
860*4882a593Smuzhiyun imdp->op = FW_RI_DATA_IMMD;
861*4882a593Smuzhiyun imdp->r1 = 0;
862*4882a593Smuzhiyun imdp->r2 = 0;
863*4882a593Smuzhiyun imdp->immdlen = cpu_to_be32(pbllen);
864*4882a593Smuzhiyun p = (__be64 *)(imdp + 1);
865*4882a593Smuzhiyun rem = pbllen;
866*4882a593Smuzhiyun for (i = 0; i < mhp->mpl_len; i++) {
867*4882a593Smuzhiyun *p = cpu_to_be64((u64)mhp->mpl[i]);
868*4882a593Smuzhiyun rem -= sizeof(*p);
869*4882a593Smuzhiyun if (++p == (__be64 *)&sq->queue[sq->size])
870*4882a593Smuzhiyun p = (__be64 *)sq->queue;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun while (rem) {
873*4882a593Smuzhiyun *p = 0;
874*4882a593Smuzhiyun rem -= sizeof(*p);
875*4882a593Smuzhiyun if (++p == (__be64 *)&sq->queue[sq->size])
876*4882a593Smuzhiyun p = (__be64 *)sq->queue;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
879*4882a593Smuzhiyun + pbllen, 16);
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun return 0;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
build_inv_stag(union t4_wr * wqe,const struct ib_send_wr * wr,u8 * len16)884*4882a593Smuzhiyun static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
885*4882a593Smuzhiyun u8 *len16)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
888*4882a593Smuzhiyun wqe->inv.r2 = 0;
889*4882a593Smuzhiyun *len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16);
890*4882a593Smuzhiyun return 0;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun
c4iw_qp_add_ref(struct ib_qp * qp)893*4882a593Smuzhiyun void c4iw_qp_add_ref(struct ib_qp *qp)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun pr_debug("ib_qp %p\n", qp);
896*4882a593Smuzhiyun refcount_inc(&to_c4iw_qp(qp)->qp_refcnt);
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
c4iw_qp_rem_ref(struct ib_qp * qp)899*4882a593Smuzhiyun void c4iw_qp_rem_ref(struct ib_qp *qp)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun pr_debug("ib_qp %p\n", qp);
902*4882a593Smuzhiyun if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt))
903*4882a593Smuzhiyun complete(&to_c4iw_qp(qp)->qp_rel_comp);
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
add_to_fc_list(struct list_head * head,struct list_head * entry)906*4882a593Smuzhiyun static void add_to_fc_list(struct list_head *head, struct list_head *entry)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun if (list_empty(entry))
909*4882a593Smuzhiyun list_add_tail(entry, head);
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
ring_kernel_sq_db(struct c4iw_qp * qhp,u16 inc)912*4882a593Smuzhiyun static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun unsigned long flags;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun xa_lock_irqsave(&qhp->rhp->qps, flags);
917*4882a593Smuzhiyun spin_lock(&qhp->lock);
918*4882a593Smuzhiyun if (qhp->rhp->db_state == NORMAL)
919*4882a593Smuzhiyun t4_ring_sq_db(&qhp->wq, inc, NULL);
920*4882a593Smuzhiyun else {
921*4882a593Smuzhiyun add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
922*4882a593Smuzhiyun qhp->wq.sq.wq_pidx_inc += inc;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun spin_unlock(&qhp->lock);
925*4882a593Smuzhiyun xa_unlock_irqrestore(&qhp->rhp->qps, flags);
926*4882a593Smuzhiyun return 0;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
ring_kernel_rq_db(struct c4iw_qp * qhp,u16 inc)929*4882a593Smuzhiyun static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun unsigned long flags;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun xa_lock_irqsave(&qhp->rhp->qps, flags);
934*4882a593Smuzhiyun spin_lock(&qhp->lock);
935*4882a593Smuzhiyun if (qhp->rhp->db_state == NORMAL)
936*4882a593Smuzhiyun t4_ring_rq_db(&qhp->wq, inc, NULL);
937*4882a593Smuzhiyun else {
938*4882a593Smuzhiyun add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
939*4882a593Smuzhiyun qhp->wq.rq.wq_pidx_inc += inc;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun spin_unlock(&qhp->lock);
942*4882a593Smuzhiyun xa_unlock_irqrestore(&qhp->rhp->qps, flags);
943*4882a593Smuzhiyun return 0;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
ib_to_fw_opcode(int ib_opcode)946*4882a593Smuzhiyun static int ib_to_fw_opcode(int ib_opcode)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun int opcode;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun switch (ib_opcode) {
951*4882a593Smuzhiyun case IB_WR_SEND_WITH_INV:
952*4882a593Smuzhiyun opcode = FW_RI_SEND_WITH_INV;
953*4882a593Smuzhiyun break;
954*4882a593Smuzhiyun case IB_WR_SEND:
955*4882a593Smuzhiyun opcode = FW_RI_SEND;
956*4882a593Smuzhiyun break;
957*4882a593Smuzhiyun case IB_WR_RDMA_WRITE:
958*4882a593Smuzhiyun opcode = FW_RI_RDMA_WRITE;
959*4882a593Smuzhiyun break;
960*4882a593Smuzhiyun case IB_WR_RDMA_WRITE_WITH_IMM:
961*4882a593Smuzhiyun opcode = FW_RI_WRITE_IMMEDIATE;
962*4882a593Smuzhiyun break;
963*4882a593Smuzhiyun case IB_WR_RDMA_READ:
964*4882a593Smuzhiyun case IB_WR_RDMA_READ_WITH_INV:
965*4882a593Smuzhiyun opcode = FW_RI_READ_REQ;
966*4882a593Smuzhiyun break;
967*4882a593Smuzhiyun case IB_WR_REG_MR:
968*4882a593Smuzhiyun opcode = FW_RI_FAST_REGISTER;
969*4882a593Smuzhiyun break;
970*4882a593Smuzhiyun case IB_WR_LOCAL_INV:
971*4882a593Smuzhiyun opcode = FW_RI_LOCAL_INV;
972*4882a593Smuzhiyun break;
973*4882a593Smuzhiyun default:
974*4882a593Smuzhiyun opcode = -EINVAL;
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun return opcode;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
complete_sq_drain_wr(struct c4iw_qp * qhp,const struct ib_send_wr * wr)979*4882a593Smuzhiyun static int complete_sq_drain_wr(struct c4iw_qp *qhp,
980*4882a593Smuzhiyun const struct ib_send_wr *wr)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun struct t4_cqe cqe = {};
983*4882a593Smuzhiyun struct c4iw_cq *schp;
984*4882a593Smuzhiyun unsigned long flag;
985*4882a593Smuzhiyun struct t4_cq *cq;
986*4882a593Smuzhiyun int opcode;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun schp = to_c4iw_cq(qhp->ibqp.send_cq);
989*4882a593Smuzhiyun cq = &schp->cq;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun opcode = ib_to_fw_opcode(wr->opcode);
992*4882a593Smuzhiyun if (opcode < 0)
993*4882a593Smuzhiyun return opcode;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun cqe.u.drain_cookie = wr->wr_id;
996*4882a593Smuzhiyun cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
997*4882a593Smuzhiyun CQE_OPCODE_V(opcode) |
998*4882a593Smuzhiyun CQE_TYPE_V(1) |
999*4882a593Smuzhiyun CQE_SWCQE_V(1) |
1000*4882a593Smuzhiyun CQE_DRAIN_V(1) |
1001*4882a593Smuzhiyun CQE_QPID_V(qhp->wq.sq.qid));
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun spin_lock_irqsave(&schp->lock, flag);
1004*4882a593Smuzhiyun cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
1005*4882a593Smuzhiyun cq->sw_queue[cq->sw_pidx] = cqe;
1006*4882a593Smuzhiyun t4_swcq_produce(cq);
1007*4882a593Smuzhiyun spin_unlock_irqrestore(&schp->lock, flag);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun if (t4_clear_cq_armed(&schp->cq)) {
1010*4882a593Smuzhiyun spin_lock_irqsave(&schp->comp_handler_lock, flag);
1011*4882a593Smuzhiyun (*schp->ibcq.comp_handler)(&schp->ibcq,
1012*4882a593Smuzhiyun schp->ibcq.cq_context);
1013*4882a593Smuzhiyun spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun return 0;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun
complete_sq_drain_wrs(struct c4iw_qp * qhp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)1018*4882a593Smuzhiyun static int complete_sq_drain_wrs(struct c4iw_qp *qhp,
1019*4882a593Smuzhiyun const struct ib_send_wr *wr,
1020*4882a593Smuzhiyun const struct ib_send_wr **bad_wr)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun int ret = 0;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun while (wr) {
1025*4882a593Smuzhiyun ret = complete_sq_drain_wr(qhp, wr);
1026*4882a593Smuzhiyun if (ret) {
1027*4882a593Smuzhiyun *bad_wr = wr;
1028*4882a593Smuzhiyun break;
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun wr = wr->next;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun return ret;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
complete_rq_drain_wr(struct c4iw_qp * qhp,const struct ib_recv_wr * wr)1035*4882a593Smuzhiyun static void complete_rq_drain_wr(struct c4iw_qp *qhp,
1036*4882a593Smuzhiyun const struct ib_recv_wr *wr)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct t4_cqe cqe = {};
1039*4882a593Smuzhiyun struct c4iw_cq *rchp;
1040*4882a593Smuzhiyun unsigned long flag;
1041*4882a593Smuzhiyun struct t4_cq *cq;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1044*4882a593Smuzhiyun cq = &rchp->cq;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun cqe.u.drain_cookie = wr->wr_id;
1047*4882a593Smuzhiyun cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
1048*4882a593Smuzhiyun CQE_OPCODE_V(FW_RI_SEND) |
1049*4882a593Smuzhiyun CQE_TYPE_V(0) |
1050*4882a593Smuzhiyun CQE_SWCQE_V(1) |
1051*4882a593Smuzhiyun CQE_DRAIN_V(1) |
1052*4882a593Smuzhiyun CQE_QPID_V(qhp->wq.sq.qid));
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun spin_lock_irqsave(&rchp->lock, flag);
1055*4882a593Smuzhiyun cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
1056*4882a593Smuzhiyun cq->sw_queue[cq->sw_pidx] = cqe;
1057*4882a593Smuzhiyun t4_swcq_produce(cq);
1058*4882a593Smuzhiyun spin_unlock_irqrestore(&rchp->lock, flag);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun if (t4_clear_cq_armed(&rchp->cq)) {
1061*4882a593Smuzhiyun spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1062*4882a593Smuzhiyun (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1063*4882a593Smuzhiyun rchp->ibcq.cq_context);
1064*4882a593Smuzhiyun spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun
complete_rq_drain_wrs(struct c4iw_qp * qhp,const struct ib_recv_wr * wr)1068*4882a593Smuzhiyun static void complete_rq_drain_wrs(struct c4iw_qp *qhp,
1069*4882a593Smuzhiyun const struct ib_recv_wr *wr)
1070*4882a593Smuzhiyun {
1071*4882a593Smuzhiyun while (wr) {
1072*4882a593Smuzhiyun complete_rq_drain_wr(qhp, wr);
1073*4882a593Smuzhiyun wr = wr->next;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
c4iw_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)1077*4882a593Smuzhiyun int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1078*4882a593Smuzhiyun const struct ib_send_wr **bad_wr)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun int err = 0;
1081*4882a593Smuzhiyun u8 len16 = 0;
1082*4882a593Smuzhiyun enum fw_wr_opcodes fw_opcode = 0;
1083*4882a593Smuzhiyun enum fw_ri_wr_flags fw_flags;
1084*4882a593Smuzhiyun struct c4iw_qp *qhp;
1085*4882a593Smuzhiyun struct c4iw_dev *rhp;
1086*4882a593Smuzhiyun union t4_wr *wqe = NULL;
1087*4882a593Smuzhiyun u32 num_wrs;
1088*4882a593Smuzhiyun struct t4_swsqe *swsqe;
1089*4882a593Smuzhiyun unsigned long flag;
1090*4882a593Smuzhiyun u16 idx = 0;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun qhp = to_c4iw_qp(ibqp);
1093*4882a593Smuzhiyun rhp = qhp->rhp;
1094*4882a593Smuzhiyun spin_lock_irqsave(&qhp->lock, flag);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /*
1097*4882a593Smuzhiyun * If the qp has been flushed, then just insert a special
1098*4882a593Smuzhiyun * drain cqe.
1099*4882a593Smuzhiyun */
1100*4882a593Smuzhiyun if (qhp->wq.flushed) {
1101*4882a593Smuzhiyun spin_unlock_irqrestore(&qhp->lock, flag);
1102*4882a593Smuzhiyun err = complete_sq_drain_wrs(qhp, wr, bad_wr);
1103*4882a593Smuzhiyun return err;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun num_wrs = t4_sq_avail(&qhp->wq);
1106*4882a593Smuzhiyun if (num_wrs == 0) {
1107*4882a593Smuzhiyun spin_unlock_irqrestore(&qhp->lock, flag);
1108*4882a593Smuzhiyun *bad_wr = wr;
1109*4882a593Smuzhiyun return -ENOMEM;
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun /*
1113*4882a593Smuzhiyun * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
1114*4882a593Smuzhiyun * the response for small NVMEe-oF READ requests. If the chain is
1115*4882a593Smuzhiyun * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths
1116*4882a593Smuzhiyun * and lengths meet the requirements of the fw_ri_write_cmpl_wr work
1117*4882a593Smuzhiyun * request, then build and post the write_cmpl WR. If any of the tests
1118*4882a593Smuzhiyun * below are not true, then we continue on with the tradtional WRITE
1119*4882a593Smuzhiyun * and SEND WRs.
1120*4882a593Smuzhiyun */
1121*4882a593Smuzhiyun if (qhp->rhp->rdev.lldi.write_cmpl_support &&
1122*4882a593Smuzhiyun CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >=
1123*4882a593Smuzhiyun CHELSIO_T5 &&
1124*4882a593Smuzhiyun wr && wr->next && !wr->next->next &&
1125*4882a593Smuzhiyun wr->opcode == IB_WR_RDMA_WRITE &&
1126*4882a593Smuzhiyun wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
1127*4882a593Smuzhiyun (wr->next->opcode == IB_WR_SEND ||
1128*4882a593Smuzhiyun wr->next->opcode == IB_WR_SEND_WITH_INV) &&
1129*4882a593Smuzhiyun wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
1130*4882a593Smuzhiyun wr->next->num_sge == 1 && num_wrs >= 2) {
1131*4882a593Smuzhiyun post_write_cmpl(qhp, wr);
1132*4882a593Smuzhiyun spin_unlock_irqrestore(&qhp->lock, flag);
1133*4882a593Smuzhiyun return 0;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun while (wr) {
1137*4882a593Smuzhiyun if (num_wrs == 0) {
1138*4882a593Smuzhiyun err = -ENOMEM;
1139*4882a593Smuzhiyun *bad_wr = wr;
1140*4882a593Smuzhiyun break;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
1143*4882a593Smuzhiyun qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun fw_flags = 0;
1146*4882a593Smuzhiyun if (wr->send_flags & IB_SEND_SOLICITED)
1147*4882a593Smuzhiyun fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
1148*4882a593Smuzhiyun if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
1149*4882a593Smuzhiyun fw_flags |= FW_RI_COMPLETION_FLAG;
1150*4882a593Smuzhiyun swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
1151*4882a593Smuzhiyun switch (wr->opcode) {
1152*4882a593Smuzhiyun case IB_WR_SEND_WITH_INV:
1153*4882a593Smuzhiyun case IB_WR_SEND:
1154*4882a593Smuzhiyun if (wr->send_flags & IB_SEND_FENCE)
1155*4882a593Smuzhiyun fw_flags |= FW_RI_READ_FENCE_FLAG;
1156*4882a593Smuzhiyun fw_opcode = FW_RI_SEND_WR;
1157*4882a593Smuzhiyun if (wr->opcode == IB_WR_SEND)
1158*4882a593Smuzhiyun swsqe->opcode = FW_RI_SEND;
1159*4882a593Smuzhiyun else
1160*4882a593Smuzhiyun swsqe->opcode = FW_RI_SEND_WITH_INV;
1161*4882a593Smuzhiyun err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
1162*4882a593Smuzhiyun break;
1163*4882a593Smuzhiyun case IB_WR_RDMA_WRITE_WITH_IMM:
1164*4882a593Smuzhiyun if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) {
1165*4882a593Smuzhiyun err = -EINVAL;
1166*4882a593Smuzhiyun break;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
1169*4882a593Smuzhiyun fallthrough;
1170*4882a593Smuzhiyun case IB_WR_RDMA_WRITE:
1171*4882a593Smuzhiyun fw_opcode = FW_RI_RDMA_WRITE_WR;
1172*4882a593Smuzhiyun swsqe->opcode = FW_RI_RDMA_WRITE;
1173*4882a593Smuzhiyun err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
1174*4882a593Smuzhiyun break;
1175*4882a593Smuzhiyun case IB_WR_RDMA_READ:
1176*4882a593Smuzhiyun case IB_WR_RDMA_READ_WITH_INV:
1177*4882a593Smuzhiyun fw_opcode = FW_RI_RDMA_READ_WR;
1178*4882a593Smuzhiyun swsqe->opcode = FW_RI_READ_REQ;
1179*4882a593Smuzhiyun if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
1180*4882a593Smuzhiyun c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey);
1181*4882a593Smuzhiyun fw_flags = FW_RI_RDMA_READ_INVALIDATE;
1182*4882a593Smuzhiyun } else {
1183*4882a593Smuzhiyun fw_flags = 0;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun err = build_rdma_read(wqe, wr, &len16);
1186*4882a593Smuzhiyun if (err)
1187*4882a593Smuzhiyun break;
1188*4882a593Smuzhiyun swsqe->read_len = wr->sg_list[0].length;
1189*4882a593Smuzhiyun if (!qhp->wq.sq.oldest_read)
1190*4882a593Smuzhiyun qhp->wq.sq.oldest_read = swsqe;
1191*4882a593Smuzhiyun break;
1192*4882a593Smuzhiyun case IB_WR_REG_MR: {
1193*4882a593Smuzhiyun struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun swsqe->opcode = FW_RI_FAST_REGISTER;
1196*4882a593Smuzhiyun if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
1197*4882a593Smuzhiyun !mhp->attr.state && mhp->mpl_len <= 2) {
1198*4882a593Smuzhiyun fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
1199*4882a593Smuzhiyun build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
1200*4882a593Smuzhiyun mhp, &len16);
1201*4882a593Smuzhiyun } else {
1202*4882a593Smuzhiyun fw_opcode = FW_RI_FR_NSMR_WR;
1203*4882a593Smuzhiyun err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
1204*4882a593Smuzhiyun mhp, &len16,
1205*4882a593Smuzhiyun rhp->rdev.lldi.ulptx_memwrite_dsgl);
1206*4882a593Smuzhiyun if (err)
1207*4882a593Smuzhiyun break;
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun mhp->attr.state = 1;
1210*4882a593Smuzhiyun break;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun case IB_WR_LOCAL_INV:
1213*4882a593Smuzhiyun if (wr->send_flags & IB_SEND_FENCE)
1214*4882a593Smuzhiyun fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
1215*4882a593Smuzhiyun fw_opcode = FW_RI_INV_LSTAG_WR;
1216*4882a593Smuzhiyun swsqe->opcode = FW_RI_LOCAL_INV;
1217*4882a593Smuzhiyun err = build_inv_stag(wqe, wr, &len16);
1218*4882a593Smuzhiyun c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey);
1219*4882a593Smuzhiyun break;
1220*4882a593Smuzhiyun default:
1221*4882a593Smuzhiyun pr_warn("%s post of type=%d TBD!\n", __func__,
1222*4882a593Smuzhiyun wr->opcode);
1223*4882a593Smuzhiyun err = -EINVAL;
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun if (err) {
1226*4882a593Smuzhiyun *bad_wr = wr;
1227*4882a593Smuzhiyun break;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun swsqe->idx = qhp->wq.sq.pidx;
1230*4882a593Smuzhiyun swsqe->complete = 0;
1231*4882a593Smuzhiyun swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
1232*4882a593Smuzhiyun qhp->sq_sig_all;
1233*4882a593Smuzhiyun swsqe->flushed = 0;
1234*4882a593Smuzhiyun swsqe->wr_id = wr->wr_id;
1235*4882a593Smuzhiyun if (c4iw_wr_log) {
1236*4882a593Smuzhiyun swsqe->sge_ts = cxgb4_read_sge_timestamp(
1237*4882a593Smuzhiyun rhp->rdev.lldi.ports[0]);
1238*4882a593Smuzhiyun swsqe->host_time = ktime_get();
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
1244*4882a593Smuzhiyun (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
1245*4882a593Smuzhiyun swsqe->opcode, swsqe->read_len);
1246*4882a593Smuzhiyun wr = wr->next;
1247*4882a593Smuzhiyun num_wrs--;
1248*4882a593Smuzhiyun t4_sq_produce(&qhp->wq, len16);
1249*4882a593Smuzhiyun idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun if (!rhp->rdev.status_page->db_off) {
1252*4882a593Smuzhiyun t4_ring_sq_db(&qhp->wq, idx, wqe);
1253*4882a593Smuzhiyun spin_unlock_irqrestore(&qhp->lock, flag);
1254*4882a593Smuzhiyun } else {
1255*4882a593Smuzhiyun spin_unlock_irqrestore(&qhp->lock, flag);
1256*4882a593Smuzhiyun ring_kernel_sq_db(qhp, idx);
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun return err;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
c4iw_post_receive(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1261*4882a593Smuzhiyun int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1262*4882a593Smuzhiyun const struct ib_recv_wr **bad_wr)
1263*4882a593Smuzhiyun {
1264*4882a593Smuzhiyun int err = 0;
1265*4882a593Smuzhiyun struct c4iw_qp *qhp;
1266*4882a593Smuzhiyun union t4_recv_wr *wqe = NULL;
1267*4882a593Smuzhiyun u32 num_wrs;
1268*4882a593Smuzhiyun u8 len16 = 0;
1269*4882a593Smuzhiyun unsigned long flag;
1270*4882a593Smuzhiyun u16 idx = 0;
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun qhp = to_c4iw_qp(ibqp);
1273*4882a593Smuzhiyun spin_lock_irqsave(&qhp->lock, flag);
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun /*
1276*4882a593Smuzhiyun * If the qp has been flushed, then just insert a special
1277*4882a593Smuzhiyun * drain cqe.
1278*4882a593Smuzhiyun */
1279*4882a593Smuzhiyun if (qhp->wq.flushed) {
1280*4882a593Smuzhiyun spin_unlock_irqrestore(&qhp->lock, flag);
1281*4882a593Smuzhiyun complete_rq_drain_wrs(qhp, wr);
1282*4882a593Smuzhiyun return err;
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun num_wrs = t4_rq_avail(&qhp->wq);
1285*4882a593Smuzhiyun if (num_wrs == 0) {
1286*4882a593Smuzhiyun spin_unlock_irqrestore(&qhp->lock, flag);
1287*4882a593Smuzhiyun *bad_wr = wr;
1288*4882a593Smuzhiyun return -ENOMEM;
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun while (wr) {
1291*4882a593Smuzhiyun if (wr->num_sge > T4_MAX_RECV_SGE) {
1292*4882a593Smuzhiyun err = -EINVAL;
1293*4882a593Smuzhiyun *bad_wr = wr;
1294*4882a593Smuzhiyun break;
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
1297*4882a593Smuzhiyun qhp->wq.rq.wq_pidx *
1298*4882a593Smuzhiyun T4_EQ_ENTRY_SIZE);
1299*4882a593Smuzhiyun if (num_wrs)
1300*4882a593Smuzhiyun err = build_rdma_recv(qhp, wqe, wr, &len16);
1301*4882a593Smuzhiyun else
1302*4882a593Smuzhiyun err = -ENOMEM;
1303*4882a593Smuzhiyun if (err) {
1304*4882a593Smuzhiyun *bad_wr = wr;
1305*4882a593Smuzhiyun break;
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
1309*4882a593Smuzhiyun if (c4iw_wr_log) {
1310*4882a593Smuzhiyun qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
1311*4882a593Smuzhiyun cxgb4_read_sge_timestamp(
1312*4882a593Smuzhiyun qhp->rhp->rdev.lldi.ports[0]);
1313*4882a593Smuzhiyun qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
1314*4882a593Smuzhiyun ktime_get();
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun wqe->recv.opcode = FW_RI_RECV_WR;
1318*4882a593Smuzhiyun wqe->recv.r1 = 0;
1319*4882a593Smuzhiyun wqe->recv.wrid = qhp->wq.rq.pidx;
1320*4882a593Smuzhiyun wqe->recv.r2[0] = 0;
1321*4882a593Smuzhiyun wqe->recv.r2[1] = 0;
1322*4882a593Smuzhiyun wqe->recv.r2[2] = 0;
1323*4882a593Smuzhiyun wqe->recv.len16 = len16;
1324*4882a593Smuzhiyun pr_debug("cookie 0x%llx pidx %u\n",
1325*4882a593Smuzhiyun (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
1326*4882a593Smuzhiyun t4_rq_produce(&qhp->wq, len16);
1327*4882a593Smuzhiyun idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
1328*4882a593Smuzhiyun wr = wr->next;
1329*4882a593Smuzhiyun num_wrs--;
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun if (!qhp->rhp->rdev.status_page->db_off) {
1332*4882a593Smuzhiyun t4_ring_rq_db(&qhp->wq, idx, wqe);
1333*4882a593Smuzhiyun spin_unlock_irqrestore(&qhp->lock, flag);
1334*4882a593Smuzhiyun } else {
1335*4882a593Smuzhiyun spin_unlock_irqrestore(&qhp->lock, flag);
1336*4882a593Smuzhiyun ring_kernel_rq_db(qhp, idx);
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun return err;
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun
defer_srq_wr(struct t4_srq * srq,union t4_recv_wr * wqe,u64 wr_id,u8 len16)1341*4882a593Smuzhiyun static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,
1342*4882a593Smuzhiyun u64 wr_id, u8 len16)
1343*4882a593Smuzhiyun {
1344*4882a593Smuzhiyun struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx];
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n",
1347*4882a593Smuzhiyun __func__, srq->cidx, srq->pidx, srq->wq_pidx,
1348*4882a593Smuzhiyun srq->in_use, srq->ooo_count,
1349*4882a593Smuzhiyun (unsigned long long)wr_id, srq->pending_cidx,
1350*4882a593Smuzhiyun srq->pending_pidx, srq->pending_in_use);
1351*4882a593Smuzhiyun pwr->wr_id = wr_id;
1352*4882a593Smuzhiyun pwr->len16 = len16;
1353*4882a593Smuzhiyun memcpy(&pwr->wqe, wqe, len16 * 16);
1354*4882a593Smuzhiyun t4_srq_produce_pending_wr(srq);
1355*4882a593Smuzhiyun }
1356*4882a593Smuzhiyun
c4iw_post_srq_recv(struct ib_srq * ibsrq,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)1357*4882a593Smuzhiyun int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1358*4882a593Smuzhiyun const struct ib_recv_wr **bad_wr)
1359*4882a593Smuzhiyun {
1360*4882a593Smuzhiyun union t4_recv_wr *wqe, lwqe;
1361*4882a593Smuzhiyun struct c4iw_srq *srq;
1362*4882a593Smuzhiyun unsigned long flag;
1363*4882a593Smuzhiyun u8 len16 = 0;
1364*4882a593Smuzhiyun u16 idx = 0;
1365*4882a593Smuzhiyun int err = 0;
1366*4882a593Smuzhiyun u32 num_wrs;
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun srq = to_c4iw_srq(ibsrq);
1369*4882a593Smuzhiyun spin_lock_irqsave(&srq->lock, flag);
1370*4882a593Smuzhiyun num_wrs = t4_srq_avail(&srq->wq);
1371*4882a593Smuzhiyun if (num_wrs == 0) {
1372*4882a593Smuzhiyun spin_unlock_irqrestore(&srq->lock, flag);
1373*4882a593Smuzhiyun return -ENOMEM;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun while (wr) {
1376*4882a593Smuzhiyun if (wr->num_sge > T4_MAX_RECV_SGE) {
1377*4882a593Smuzhiyun err = -EINVAL;
1378*4882a593Smuzhiyun *bad_wr = wr;
1379*4882a593Smuzhiyun break;
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun wqe = &lwqe;
1382*4882a593Smuzhiyun if (num_wrs)
1383*4882a593Smuzhiyun err = build_srq_recv(wqe, wr, &len16);
1384*4882a593Smuzhiyun else
1385*4882a593Smuzhiyun err = -ENOMEM;
1386*4882a593Smuzhiyun if (err) {
1387*4882a593Smuzhiyun *bad_wr = wr;
1388*4882a593Smuzhiyun break;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun wqe->recv.opcode = FW_RI_RECV_WR;
1392*4882a593Smuzhiyun wqe->recv.r1 = 0;
1393*4882a593Smuzhiyun wqe->recv.wrid = srq->wq.pidx;
1394*4882a593Smuzhiyun wqe->recv.r2[0] = 0;
1395*4882a593Smuzhiyun wqe->recv.r2[1] = 0;
1396*4882a593Smuzhiyun wqe->recv.r2[2] = 0;
1397*4882a593Smuzhiyun wqe->recv.len16 = len16;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun if (srq->wq.ooo_count ||
1400*4882a593Smuzhiyun srq->wq.pending_in_use ||
1401*4882a593Smuzhiyun srq->wq.sw_rq[srq->wq.pidx].valid) {
1402*4882a593Smuzhiyun defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16);
1403*4882a593Smuzhiyun } else {
1404*4882a593Smuzhiyun srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id;
1405*4882a593Smuzhiyun srq->wq.sw_rq[srq->wq.pidx].valid = 1;
1406*4882a593Smuzhiyun c4iw_copy_wr_to_srq(&srq->wq, wqe, len16);
1407*4882a593Smuzhiyun pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n",
1408*4882a593Smuzhiyun __func__, srq->wq.cidx,
1409*4882a593Smuzhiyun srq->wq.pidx, srq->wq.wq_pidx,
1410*4882a593Smuzhiyun srq->wq.in_use,
1411*4882a593Smuzhiyun (unsigned long long)wr->wr_id);
1412*4882a593Smuzhiyun t4_srq_produce(&srq->wq, len16);
1413*4882a593Smuzhiyun idx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun wr = wr->next;
1416*4882a593Smuzhiyun num_wrs--;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun if (idx)
1419*4882a593Smuzhiyun t4_ring_srq_db(&srq->wq, idx, len16, wqe);
1420*4882a593Smuzhiyun spin_unlock_irqrestore(&srq->lock, flag);
1421*4882a593Smuzhiyun return err;
1422*4882a593Smuzhiyun }
1423*4882a593Smuzhiyun
build_term_codes(struct t4_cqe * err_cqe,u8 * layer_type,u8 * ecode)1424*4882a593Smuzhiyun static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
1425*4882a593Smuzhiyun u8 *ecode)
1426*4882a593Smuzhiyun {
1427*4882a593Smuzhiyun int status;
1428*4882a593Smuzhiyun int tagged;
1429*4882a593Smuzhiyun int opcode;
1430*4882a593Smuzhiyun int rqtype;
1431*4882a593Smuzhiyun int send_inv;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun if (!err_cqe) {
1434*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1435*4882a593Smuzhiyun *ecode = 0;
1436*4882a593Smuzhiyun return;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun status = CQE_STATUS(err_cqe);
1440*4882a593Smuzhiyun opcode = CQE_OPCODE(err_cqe);
1441*4882a593Smuzhiyun rqtype = RQ_TYPE(err_cqe);
1442*4882a593Smuzhiyun send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
1443*4882a593Smuzhiyun (opcode == FW_RI_SEND_WITH_SE_INV);
1444*4882a593Smuzhiyun tagged = (opcode == FW_RI_RDMA_WRITE) ||
1445*4882a593Smuzhiyun (rqtype && (opcode == FW_RI_READ_RESP));
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun switch (status) {
1448*4882a593Smuzhiyun case T4_ERR_STAG:
1449*4882a593Smuzhiyun if (send_inv) {
1450*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1451*4882a593Smuzhiyun *ecode = RDMAP_CANT_INV_STAG;
1452*4882a593Smuzhiyun } else {
1453*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1454*4882a593Smuzhiyun *ecode = RDMAP_INV_STAG;
1455*4882a593Smuzhiyun }
1456*4882a593Smuzhiyun break;
1457*4882a593Smuzhiyun case T4_ERR_PDID:
1458*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1459*4882a593Smuzhiyun if ((opcode == FW_RI_SEND_WITH_INV) ||
1460*4882a593Smuzhiyun (opcode == FW_RI_SEND_WITH_SE_INV))
1461*4882a593Smuzhiyun *ecode = RDMAP_CANT_INV_STAG;
1462*4882a593Smuzhiyun else
1463*4882a593Smuzhiyun *ecode = RDMAP_STAG_NOT_ASSOC;
1464*4882a593Smuzhiyun break;
1465*4882a593Smuzhiyun case T4_ERR_QPID:
1466*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1467*4882a593Smuzhiyun *ecode = RDMAP_STAG_NOT_ASSOC;
1468*4882a593Smuzhiyun break;
1469*4882a593Smuzhiyun case T4_ERR_ACCESS:
1470*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1471*4882a593Smuzhiyun *ecode = RDMAP_ACC_VIOL;
1472*4882a593Smuzhiyun break;
1473*4882a593Smuzhiyun case T4_ERR_WRAP:
1474*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1475*4882a593Smuzhiyun *ecode = RDMAP_TO_WRAP;
1476*4882a593Smuzhiyun break;
1477*4882a593Smuzhiyun case T4_ERR_BOUND:
1478*4882a593Smuzhiyun if (tagged) {
1479*4882a593Smuzhiyun *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1480*4882a593Smuzhiyun *ecode = DDPT_BASE_BOUNDS;
1481*4882a593Smuzhiyun } else {
1482*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1483*4882a593Smuzhiyun *ecode = RDMAP_BASE_BOUNDS;
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun break;
1486*4882a593Smuzhiyun case T4_ERR_INVALIDATE_SHARED_MR:
1487*4882a593Smuzhiyun case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1488*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1489*4882a593Smuzhiyun *ecode = RDMAP_CANT_INV_STAG;
1490*4882a593Smuzhiyun break;
1491*4882a593Smuzhiyun case T4_ERR_ECC:
1492*4882a593Smuzhiyun case T4_ERR_ECC_PSTAG:
1493*4882a593Smuzhiyun case T4_ERR_INTERNAL_ERR:
1494*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1495*4882a593Smuzhiyun *ecode = 0;
1496*4882a593Smuzhiyun break;
1497*4882a593Smuzhiyun case T4_ERR_OUT_OF_RQE:
1498*4882a593Smuzhiyun *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1499*4882a593Smuzhiyun *ecode = DDPU_INV_MSN_NOBUF;
1500*4882a593Smuzhiyun break;
1501*4882a593Smuzhiyun case T4_ERR_PBL_ADDR_BOUND:
1502*4882a593Smuzhiyun *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1503*4882a593Smuzhiyun *ecode = DDPT_BASE_BOUNDS;
1504*4882a593Smuzhiyun break;
1505*4882a593Smuzhiyun case T4_ERR_CRC:
1506*4882a593Smuzhiyun *layer_type = LAYER_MPA|DDP_LLP;
1507*4882a593Smuzhiyun *ecode = MPA_CRC_ERR;
1508*4882a593Smuzhiyun break;
1509*4882a593Smuzhiyun case T4_ERR_MARKER:
1510*4882a593Smuzhiyun *layer_type = LAYER_MPA|DDP_LLP;
1511*4882a593Smuzhiyun *ecode = MPA_MARKER_ERR;
1512*4882a593Smuzhiyun break;
1513*4882a593Smuzhiyun case T4_ERR_PDU_LEN_ERR:
1514*4882a593Smuzhiyun *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1515*4882a593Smuzhiyun *ecode = DDPU_MSG_TOOBIG;
1516*4882a593Smuzhiyun break;
1517*4882a593Smuzhiyun case T4_ERR_DDP_VERSION:
1518*4882a593Smuzhiyun if (tagged) {
1519*4882a593Smuzhiyun *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1520*4882a593Smuzhiyun *ecode = DDPT_INV_VERS;
1521*4882a593Smuzhiyun } else {
1522*4882a593Smuzhiyun *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1523*4882a593Smuzhiyun *ecode = DDPU_INV_VERS;
1524*4882a593Smuzhiyun }
1525*4882a593Smuzhiyun break;
1526*4882a593Smuzhiyun case T4_ERR_RDMA_VERSION:
1527*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1528*4882a593Smuzhiyun *ecode = RDMAP_INV_VERS;
1529*4882a593Smuzhiyun break;
1530*4882a593Smuzhiyun case T4_ERR_OPCODE:
1531*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1532*4882a593Smuzhiyun *ecode = RDMAP_INV_OPCODE;
1533*4882a593Smuzhiyun break;
1534*4882a593Smuzhiyun case T4_ERR_DDP_QUEUE_NUM:
1535*4882a593Smuzhiyun *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1536*4882a593Smuzhiyun *ecode = DDPU_INV_QN;
1537*4882a593Smuzhiyun break;
1538*4882a593Smuzhiyun case T4_ERR_MSN:
1539*4882a593Smuzhiyun case T4_ERR_MSN_GAP:
1540*4882a593Smuzhiyun case T4_ERR_MSN_RANGE:
1541*4882a593Smuzhiyun case T4_ERR_IRD_OVERFLOW:
1542*4882a593Smuzhiyun *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1543*4882a593Smuzhiyun *ecode = DDPU_INV_MSN_RANGE;
1544*4882a593Smuzhiyun break;
1545*4882a593Smuzhiyun case T4_ERR_TBIT:
1546*4882a593Smuzhiyun *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1547*4882a593Smuzhiyun *ecode = 0;
1548*4882a593Smuzhiyun break;
1549*4882a593Smuzhiyun case T4_ERR_MO:
1550*4882a593Smuzhiyun *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1551*4882a593Smuzhiyun *ecode = DDPU_INV_MO;
1552*4882a593Smuzhiyun break;
1553*4882a593Smuzhiyun default:
1554*4882a593Smuzhiyun *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1555*4882a593Smuzhiyun *ecode = 0;
1556*4882a593Smuzhiyun break;
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun }
1559*4882a593Smuzhiyun
post_terminate(struct c4iw_qp * qhp,struct t4_cqe * err_cqe,gfp_t gfp)1560*4882a593Smuzhiyun static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1561*4882a593Smuzhiyun gfp_t gfp)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun struct fw_ri_wr *wqe;
1564*4882a593Smuzhiyun struct sk_buff *skb;
1565*4882a593Smuzhiyun struct terminate_message *term;
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
1568*4882a593Smuzhiyun qhp->ep->hwtid);
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
1571*4882a593Smuzhiyun if (WARN_ON(!skb))
1572*4882a593Smuzhiyun return;
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun wqe = __skb_put_zero(skb, sizeof(*wqe));
1577*4882a593Smuzhiyun wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1578*4882a593Smuzhiyun wqe->flowid_len16 = cpu_to_be32(
1579*4882a593Smuzhiyun FW_WR_FLOWID_V(qhp->ep->hwtid) |
1580*4882a593Smuzhiyun FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1583*4882a593Smuzhiyun wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term));
1584*4882a593Smuzhiyun term = (struct terminate_message *)wqe->u.terminate.termmsg;
1585*4882a593Smuzhiyun if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1586*4882a593Smuzhiyun term->layer_etype = qhp->attr.layer_etype;
1587*4882a593Smuzhiyun term->ecode = qhp->attr.ecode;
1588*4882a593Smuzhiyun } else
1589*4882a593Smuzhiyun build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1590*4882a593Smuzhiyun c4iw_ofld_send(&qhp->rhp->rdev, skb);
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun /*
1594*4882a593Smuzhiyun * Assumes qhp lock is held.
1595*4882a593Smuzhiyun */
__flush_qp(struct c4iw_qp * qhp,struct c4iw_cq * rchp,struct c4iw_cq * schp)1596*4882a593Smuzhiyun static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1597*4882a593Smuzhiyun struct c4iw_cq *schp)
1598*4882a593Smuzhiyun {
1599*4882a593Smuzhiyun int count;
1600*4882a593Smuzhiyun int rq_flushed = 0, sq_flushed;
1601*4882a593Smuzhiyun unsigned long flag;
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun /* locking hierarchy: cqs lock first, then qp lock. */
1606*4882a593Smuzhiyun spin_lock_irqsave(&rchp->lock, flag);
1607*4882a593Smuzhiyun if (schp != rchp)
1608*4882a593Smuzhiyun spin_lock(&schp->lock);
1609*4882a593Smuzhiyun spin_lock(&qhp->lock);
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun if (qhp->wq.flushed) {
1612*4882a593Smuzhiyun spin_unlock(&qhp->lock);
1613*4882a593Smuzhiyun if (schp != rchp)
1614*4882a593Smuzhiyun spin_unlock(&schp->lock);
1615*4882a593Smuzhiyun spin_unlock_irqrestore(&rchp->lock, flag);
1616*4882a593Smuzhiyun return;
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun qhp->wq.flushed = 1;
1619*4882a593Smuzhiyun t4_set_wq_in_error(&qhp->wq, 0);
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun c4iw_flush_hw_cq(rchp, qhp);
1622*4882a593Smuzhiyun if (!qhp->srq) {
1623*4882a593Smuzhiyun c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1624*4882a593Smuzhiyun rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun if (schp != rchp)
1628*4882a593Smuzhiyun c4iw_flush_hw_cq(schp, qhp);
1629*4882a593Smuzhiyun sq_flushed = c4iw_flush_sq(qhp);
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun spin_unlock(&qhp->lock);
1632*4882a593Smuzhiyun if (schp != rchp)
1633*4882a593Smuzhiyun spin_unlock(&schp->lock);
1634*4882a593Smuzhiyun spin_unlock_irqrestore(&rchp->lock, flag);
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun if (schp == rchp) {
1637*4882a593Smuzhiyun if ((rq_flushed || sq_flushed) &&
1638*4882a593Smuzhiyun t4_clear_cq_armed(&rchp->cq)) {
1639*4882a593Smuzhiyun spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1640*4882a593Smuzhiyun (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1641*4882a593Smuzhiyun rchp->ibcq.cq_context);
1642*4882a593Smuzhiyun spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun } else {
1645*4882a593Smuzhiyun if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
1646*4882a593Smuzhiyun spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1647*4882a593Smuzhiyun (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1648*4882a593Smuzhiyun rchp->ibcq.cq_context);
1649*4882a593Smuzhiyun spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1650*4882a593Smuzhiyun }
1651*4882a593Smuzhiyun if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
1652*4882a593Smuzhiyun spin_lock_irqsave(&schp->comp_handler_lock, flag);
1653*4882a593Smuzhiyun (*schp->ibcq.comp_handler)(&schp->ibcq,
1654*4882a593Smuzhiyun schp->ibcq.cq_context);
1655*4882a593Smuzhiyun spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun
flush_qp(struct c4iw_qp * qhp)1660*4882a593Smuzhiyun static void flush_qp(struct c4iw_qp *qhp)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun struct c4iw_cq *rchp, *schp;
1663*4882a593Smuzhiyun unsigned long flag;
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1666*4882a593Smuzhiyun schp = to_c4iw_cq(qhp->ibqp.send_cq);
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun if (qhp->ibqp.uobject) {
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
1671*4882a593Smuzhiyun if (qhp->wq.flushed)
1672*4882a593Smuzhiyun return;
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun qhp->wq.flushed = 1;
1675*4882a593Smuzhiyun t4_set_wq_in_error(&qhp->wq, 0);
1676*4882a593Smuzhiyun t4_set_cq_in_error(&rchp->cq);
1677*4882a593Smuzhiyun spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1678*4882a593Smuzhiyun (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1679*4882a593Smuzhiyun spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1680*4882a593Smuzhiyun if (schp != rchp) {
1681*4882a593Smuzhiyun t4_set_cq_in_error(&schp->cq);
1682*4882a593Smuzhiyun spin_lock_irqsave(&schp->comp_handler_lock, flag);
1683*4882a593Smuzhiyun (*schp->ibcq.comp_handler)(&schp->ibcq,
1684*4882a593Smuzhiyun schp->ibcq.cq_context);
1685*4882a593Smuzhiyun spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1686*4882a593Smuzhiyun }
1687*4882a593Smuzhiyun return;
1688*4882a593Smuzhiyun }
1689*4882a593Smuzhiyun __flush_qp(qhp, rchp, schp);
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun
rdma_fini(struct c4iw_dev * rhp,struct c4iw_qp * qhp,struct c4iw_ep * ep)1692*4882a593Smuzhiyun static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1693*4882a593Smuzhiyun struct c4iw_ep *ep)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun struct fw_ri_wr *wqe;
1696*4882a593Smuzhiyun int ret;
1697*4882a593Smuzhiyun struct sk_buff *skb;
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun skb = skb_dequeue(&ep->com.ep_skb_list);
1702*4882a593Smuzhiyun if (WARN_ON(!skb))
1703*4882a593Smuzhiyun return -ENOMEM;
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun wqe = __skb_put_zero(skb, sizeof(*wqe));
1708*4882a593Smuzhiyun wqe->op_compl = cpu_to_be32(
1709*4882a593Smuzhiyun FW_WR_OP_V(FW_RI_INIT_WR) |
1710*4882a593Smuzhiyun FW_WR_COMPL_F);
1711*4882a593Smuzhiyun wqe->flowid_len16 = cpu_to_be32(
1712*4882a593Smuzhiyun FW_WR_FLOWID_V(ep->hwtid) |
1713*4882a593Smuzhiyun FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1714*4882a593Smuzhiyun wqe->cookie = (uintptr_t)ep->com.wr_waitp;
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun wqe->u.fini.type = FW_RI_TYPE_FINI;
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
1719*4882a593Smuzhiyun qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun pr_debug("ret %d\n", ret);
1722*4882a593Smuzhiyun return ret;
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun
build_rtr_msg(u8 p2p_type,struct fw_ri_init * init)1725*4882a593Smuzhiyun static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1726*4882a593Smuzhiyun {
1727*4882a593Smuzhiyun pr_debug("p2p_type = %d\n", p2p_type);
1728*4882a593Smuzhiyun memset(&init->u, 0, sizeof(init->u));
1729*4882a593Smuzhiyun switch (p2p_type) {
1730*4882a593Smuzhiyun case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1731*4882a593Smuzhiyun init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1732*4882a593Smuzhiyun init->u.write.stag_sink = cpu_to_be32(1);
1733*4882a593Smuzhiyun init->u.write.to_sink = cpu_to_be64(1);
1734*4882a593Smuzhiyun init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1735*4882a593Smuzhiyun init->u.write.len16 = DIV_ROUND_UP(
1736*4882a593Smuzhiyun sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16);
1737*4882a593Smuzhiyun break;
1738*4882a593Smuzhiyun case FW_RI_INIT_P2PTYPE_READ_REQ:
1739*4882a593Smuzhiyun init->u.write.opcode = FW_RI_RDMA_READ_WR;
1740*4882a593Smuzhiyun init->u.read.stag_src = cpu_to_be32(1);
1741*4882a593Smuzhiyun init->u.read.to_src_lo = cpu_to_be32(1);
1742*4882a593Smuzhiyun init->u.read.stag_sink = cpu_to_be32(1);
1743*4882a593Smuzhiyun init->u.read.to_sink_lo = cpu_to_be32(1);
1744*4882a593Smuzhiyun init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16);
1745*4882a593Smuzhiyun break;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun
rdma_init(struct c4iw_dev * rhp,struct c4iw_qp * qhp)1749*4882a593Smuzhiyun static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1750*4882a593Smuzhiyun {
1751*4882a593Smuzhiyun struct fw_ri_wr *wqe;
1752*4882a593Smuzhiyun int ret;
1753*4882a593Smuzhiyun struct sk_buff *skb;
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
1756*4882a593Smuzhiyun qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
1759*4882a593Smuzhiyun if (!skb) {
1760*4882a593Smuzhiyun ret = -ENOMEM;
1761*4882a593Smuzhiyun goto out;
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun ret = alloc_ird(rhp, qhp->attr.max_ird);
1764*4882a593Smuzhiyun if (ret) {
1765*4882a593Smuzhiyun qhp->attr.max_ird = 0;
1766*4882a593Smuzhiyun kfree_skb(skb);
1767*4882a593Smuzhiyun goto out;
1768*4882a593Smuzhiyun }
1769*4882a593Smuzhiyun set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun wqe = __skb_put_zero(skb, sizeof(*wqe));
1772*4882a593Smuzhiyun wqe->op_compl = cpu_to_be32(
1773*4882a593Smuzhiyun FW_WR_OP_V(FW_RI_INIT_WR) |
1774*4882a593Smuzhiyun FW_WR_COMPL_F);
1775*4882a593Smuzhiyun wqe->flowid_len16 = cpu_to_be32(
1776*4882a593Smuzhiyun FW_WR_FLOWID_V(qhp->ep->hwtid) |
1777*4882a593Smuzhiyun FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1778*4882a593Smuzhiyun
1779*4882a593Smuzhiyun wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun wqe->u.init.type = FW_RI_TYPE_INIT;
1782*4882a593Smuzhiyun wqe->u.init.mpareqbit_p2ptype =
1783*4882a593Smuzhiyun FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1784*4882a593Smuzhiyun FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
1785*4882a593Smuzhiyun wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1786*4882a593Smuzhiyun if (qhp->attr.mpa_attr.recv_marker_enabled)
1787*4882a593Smuzhiyun wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1788*4882a593Smuzhiyun if (qhp->attr.mpa_attr.xmit_marker_enabled)
1789*4882a593Smuzhiyun wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1790*4882a593Smuzhiyun if (qhp->attr.mpa_attr.crc_enabled)
1791*4882a593Smuzhiyun wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1794*4882a593Smuzhiyun FW_RI_QP_RDMA_WRITE_ENABLE |
1795*4882a593Smuzhiyun FW_RI_QP_BIND_ENABLE;
1796*4882a593Smuzhiyun if (!qhp->ibqp.uobject)
1797*4882a593Smuzhiyun wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1798*4882a593Smuzhiyun FW_RI_QP_STAG0_ENABLE;
1799*4882a593Smuzhiyun wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1800*4882a593Smuzhiyun wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1801*4882a593Smuzhiyun wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1802*4882a593Smuzhiyun wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1803*4882a593Smuzhiyun if (qhp->srq) {
1804*4882a593Smuzhiyun wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ |
1805*4882a593Smuzhiyun qhp->srq->idx);
1806*4882a593Smuzhiyun } else {
1807*4882a593Smuzhiyun wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1808*4882a593Smuzhiyun wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1809*4882a593Smuzhiyun wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1810*4882a593Smuzhiyun rhp->rdev.lldi.vr->rq.start);
1811*4882a593Smuzhiyun }
1812*4882a593Smuzhiyun wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1813*4882a593Smuzhiyun wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1814*4882a593Smuzhiyun wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1815*4882a593Smuzhiyun wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1816*4882a593Smuzhiyun wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1817*4882a593Smuzhiyun wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1818*4882a593Smuzhiyun if (qhp->attr.mpa_attr.initiator)
1819*4882a593Smuzhiyun build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
1822*4882a593Smuzhiyun qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1823*4882a593Smuzhiyun if (!ret)
1824*4882a593Smuzhiyun goto out;
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun free_ird(rhp, qhp->attr.max_ird);
1827*4882a593Smuzhiyun out:
1828*4882a593Smuzhiyun pr_debug("ret %d\n", ret);
1829*4882a593Smuzhiyun return ret;
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun
c4iw_modify_qp(struct c4iw_dev * rhp,struct c4iw_qp * qhp,enum c4iw_qp_attr_mask mask,struct c4iw_qp_attributes * attrs,int internal)1832*4882a593Smuzhiyun int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1833*4882a593Smuzhiyun enum c4iw_qp_attr_mask mask,
1834*4882a593Smuzhiyun struct c4iw_qp_attributes *attrs,
1835*4882a593Smuzhiyun int internal)
1836*4882a593Smuzhiyun {
1837*4882a593Smuzhiyun int ret = 0;
1838*4882a593Smuzhiyun struct c4iw_qp_attributes newattr = qhp->attr;
1839*4882a593Smuzhiyun int disconnect = 0;
1840*4882a593Smuzhiyun int terminate = 0;
1841*4882a593Smuzhiyun int abort = 0;
1842*4882a593Smuzhiyun int free = 0;
1843*4882a593Smuzhiyun struct c4iw_ep *ep = NULL;
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
1846*4882a593Smuzhiyun qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1847*4882a593Smuzhiyun (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun mutex_lock(&qhp->mutex);
1850*4882a593Smuzhiyun
1851*4882a593Smuzhiyun /* Process attr changes if in IDLE */
1852*4882a593Smuzhiyun if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1853*4882a593Smuzhiyun if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1854*4882a593Smuzhiyun ret = -EIO;
1855*4882a593Smuzhiyun goto out;
1856*4882a593Smuzhiyun }
1857*4882a593Smuzhiyun if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1858*4882a593Smuzhiyun newattr.enable_rdma_read = attrs->enable_rdma_read;
1859*4882a593Smuzhiyun if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1860*4882a593Smuzhiyun newattr.enable_rdma_write = attrs->enable_rdma_write;
1861*4882a593Smuzhiyun if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1862*4882a593Smuzhiyun newattr.enable_bind = attrs->enable_bind;
1863*4882a593Smuzhiyun if (mask & C4IW_QP_ATTR_MAX_ORD) {
1864*4882a593Smuzhiyun if (attrs->max_ord > c4iw_max_read_depth) {
1865*4882a593Smuzhiyun ret = -EINVAL;
1866*4882a593Smuzhiyun goto out;
1867*4882a593Smuzhiyun }
1868*4882a593Smuzhiyun newattr.max_ord = attrs->max_ord;
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun if (mask & C4IW_QP_ATTR_MAX_IRD) {
1871*4882a593Smuzhiyun if (attrs->max_ird > cur_max_read_depth(rhp)) {
1872*4882a593Smuzhiyun ret = -EINVAL;
1873*4882a593Smuzhiyun goto out;
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun newattr.max_ird = attrs->max_ird;
1876*4882a593Smuzhiyun }
1877*4882a593Smuzhiyun qhp->attr = newattr;
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun if (mask & C4IW_QP_ATTR_SQ_DB) {
1881*4882a593Smuzhiyun ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
1882*4882a593Smuzhiyun goto out;
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun if (mask & C4IW_QP_ATTR_RQ_DB) {
1885*4882a593Smuzhiyun ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
1886*4882a593Smuzhiyun goto out;
1887*4882a593Smuzhiyun }
1888*4882a593Smuzhiyun
1889*4882a593Smuzhiyun if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1890*4882a593Smuzhiyun goto out;
1891*4882a593Smuzhiyun if (qhp->attr.state == attrs->next_state)
1892*4882a593Smuzhiyun goto out;
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun switch (qhp->attr.state) {
1895*4882a593Smuzhiyun case C4IW_QP_STATE_IDLE:
1896*4882a593Smuzhiyun switch (attrs->next_state) {
1897*4882a593Smuzhiyun case C4IW_QP_STATE_RTS:
1898*4882a593Smuzhiyun if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1899*4882a593Smuzhiyun ret = -EINVAL;
1900*4882a593Smuzhiyun goto out;
1901*4882a593Smuzhiyun }
1902*4882a593Smuzhiyun if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1903*4882a593Smuzhiyun ret = -EINVAL;
1904*4882a593Smuzhiyun goto out;
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun qhp->attr.mpa_attr = attrs->mpa_attr;
1907*4882a593Smuzhiyun qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1908*4882a593Smuzhiyun qhp->ep = qhp->attr.llp_stream_handle;
1909*4882a593Smuzhiyun set_state(qhp, C4IW_QP_STATE_RTS);
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun /*
1912*4882a593Smuzhiyun * Ref the endpoint here and deref when we
1913*4882a593Smuzhiyun * disassociate the endpoint from the QP. This
1914*4882a593Smuzhiyun * happens in CLOSING->IDLE transition or *->ERROR
1915*4882a593Smuzhiyun * transition.
1916*4882a593Smuzhiyun */
1917*4882a593Smuzhiyun c4iw_get_ep(&qhp->ep->com);
1918*4882a593Smuzhiyun ret = rdma_init(rhp, qhp);
1919*4882a593Smuzhiyun if (ret)
1920*4882a593Smuzhiyun goto err;
1921*4882a593Smuzhiyun break;
1922*4882a593Smuzhiyun case C4IW_QP_STATE_ERROR:
1923*4882a593Smuzhiyun set_state(qhp, C4IW_QP_STATE_ERROR);
1924*4882a593Smuzhiyun flush_qp(qhp);
1925*4882a593Smuzhiyun break;
1926*4882a593Smuzhiyun default:
1927*4882a593Smuzhiyun ret = -EINVAL;
1928*4882a593Smuzhiyun goto out;
1929*4882a593Smuzhiyun }
1930*4882a593Smuzhiyun break;
1931*4882a593Smuzhiyun case C4IW_QP_STATE_RTS:
1932*4882a593Smuzhiyun switch (attrs->next_state) {
1933*4882a593Smuzhiyun case C4IW_QP_STATE_CLOSING:
1934*4882a593Smuzhiyun t4_set_wq_in_error(&qhp->wq, 0);
1935*4882a593Smuzhiyun set_state(qhp, C4IW_QP_STATE_CLOSING);
1936*4882a593Smuzhiyun ep = qhp->ep;
1937*4882a593Smuzhiyun if (!internal) {
1938*4882a593Smuzhiyun abort = 0;
1939*4882a593Smuzhiyun disconnect = 1;
1940*4882a593Smuzhiyun c4iw_get_ep(&qhp->ep->com);
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun ret = rdma_fini(rhp, qhp, ep);
1943*4882a593Smuzhiyun if (ret)
1944*4882a593Smuzhiyun goto err;
1945*4882a593Smuzhiyun break;
1946*4882a593Smuzhiyun case C4IW_QP_STATE_TERMINATE:
1947*4882a593Smuzhiyun t4_set_wq_in_error(&qhp->wq, 0);
1948*4882a593Smuzhiyun set_state(qhp, C4IW_QP_STATE_TERMINATE);
1949*4882a593Smuzhiyun qhp->attr.layer_etype = attrs->layer_etype;
1950*4882a593Smuzhiyun qhp->attr.ecode = attrs->ecode;
1951*4882a593Smuzhiyun ep = qhp->ep;
1952*4882a593Smuzhiyun if (!internal) {
1953*4882a593Smuzhiyun c4iw_get_ep(&ep->com);
1954*4882a593Smuzhiyun terminate = 1;
1955*4882a593Smuzhiyun disconnect = 1;
1956*4882a593Smuzhiyun } else {
1957*4882a593Smuzhiyun terminate = qhp->attr.send_term;
1958*4882a593Smuzhiyun ret = rdma_fini(rhp, qhp, ep);
1959*4882a593Smuzhiyun if (ret)
1960*4882a593Smuzhiyun goto err;
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun break;
1963*4882a593Smuzhiyun case C4IW_QP_STATE_ERROR:
1964*4882a593Smuzhiyun t4_set_wq_in_error(&qhp->wq, 0);
1965*4882a593Smuzhiyun set_state(qhp, C4IW_QP_STATE_ERROR);
1966*4882a593Smuzhiyun if (!internal) {
1967*4882a593Smuzhiyun abort = 1;
1968*4882a593Smuzhiyun disconnect = 1;
1969*4882a593Smuzhiyun ep = qhp->ep;
1970*4882a593Smuzhiyun c4iw_get_ep(&qhp->ep->com);
1971*4882a593Smuzhiyun }
1972*4882a593Smuzhiyun goto err;
1973*4882a593Smuzhiyun break;
1974*4882a593Smuzhiyun default:
1975*4882a593Smuzhiyun ret = -EINVAL;
1976*4882a593Smuzhiyun goto out;
1977*4882a593Smuzhiyun }
1978*4882a593Smuzhiyun break;
1979*4882a593Smuzhiyun case C4IW_QP_STATE_CLOSING:
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun /*
1982*4882a593Smuzhiyun * Allow kernel users to move to ERROR for qp draining.
1983*4882a593Smuzhiyun */
1984*4882a593Smuzhiyun if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1985*4882a593Smuzhiyun C4IW_QP_STATE_ERROR)) {
1986*4882a593Smuzhiyun ret = -EINVAL;
1987*4882a593Smuzhiyun goto out;
1988*4882a593Smuzhiyun }
1989*4882a593Smuzhiyun switch (attrs->next_state) {
1990*4882a593Smuzhiyun case C4IW_QP_STATE_IDLE:
1991*4882a593Smuzhiyun flush_qp(qhp);
1992*4882a593Smuzhiyun set_state(qhp, C4IW_QP_STATE_IDLE);
1993*4882a593Smuzhiyun qhp->attr.llp_stream_handle = NULL;
1994*4882a593Smuzhiyun c4iw_put_ep(&qhp->ep->com);
1995*4882a593Smuzhiyun qhp->ep = NULL;
1996*4882a593Smuzhiyun wake_up(&qhp->wait);
1997*4882a593Smuzhiyun break;
1998*4882a593Smuzhiyun case C4IW_QP_STATE_ERROR:
1999*4882a593Smuzhiyun goto err;
2000*4882a593Smuzhiyun default:
2001*4882a593Smuzhiyun ret = -EINVAL;
2002*4882a593Smuzhiyun goto err;
2003*4882a593Smuzhiyun }
2004*4882a593Smuzhiyun break;
2005*4882a593Smuzhiyun case C4IW_QP_STATE_ERROR:
2006*4882a593Smuzhiyun if (attrs->next_state != C4IW_QP_STATE_IDLE) {
2007*4882a593Smuzhiyun ret = -EINVAL;
2008*4882a593Smuzhiyun goto out;
2009*4882a593Smuzhiyun }
2010*4882a593Smuzhiyun if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
2011*4882a593Smuzhiyun ret = -EINVAL;
2012*4882a593Smuzhiyun goto out;
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun set_state(qhp, C4IW_QP_STATE_IDLE);
2015*4882a593Smuzhiyun break;
2016*4882a593Smuzhiyun case C4IW_QP_STATE_TERMINATE:
2017*4882a593Smuzhiyun if (!internal) {
2018*4882a593Smuzhiyun ret = -EINVAL;
2019*4882a593Smuzhiyun goto out;
2020*4882a593Smuzhiyun }
2021*4882a593Smuzhiyun goto err;
2022*4882a593Smuzhiyun break;
2023*4882a593Smuzhiyun default:
2024*4882a593Smuzhiyun pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
2025*4882a593Smuzhiyun ret = -EINVAL;
2026*4882a593Smuzhiyun goto err;
2027*4882a593Smuzhiyun break;
2028*4882a593Smuzhiyun }
2029*4882a593Smuzhiyun goto out;
2030*4882a593Smuzhiyun err:
2031*4882a593Smuzhiyun pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
2032*4882a593Smuzhiyun qhp->wq.sq.qid);
2033*4882a593Smuzhiyun
2034*4882a593Smuzhiyun /* disassociate the LLP connection */
2035*4882a593Smuzhiyun qhp->attr.llp_stream_handle = NULL;
2036*4882a593Smuzhiyun if (!ep)
2037*4882a593Smuzhiyun ep = qhp->ep;
2038*4882a593Smuzhiyun qhp->ep = NULL;
2039*4882a593Smuzhiyun set_state(qhp, C4IW_QP_STATE_ERROR);
2040*4882a593Smuzhiyun free = 1;
2041*4882a593Smuzhiyun abort = 1;
2042*4882a593Smuzhiyun flush_qp(qhp);
2043*4882a593Smuzhiyun wake_up(&qhp->wait);
2044*4882a593Smuzhiyun out:
2045*4882a593Smuzhiyun mutex_unlock(&qhp->mutex);
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun if (terminate)
2048*4882a593Smuzhiyun post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
2049*4882a593Smuzhiyun
2050*4882a593Smuzhiyun /*
2051*4882a593Smuzhiyun * If disconnect is 1, then we need to initiate a disconnect
2052*4882a593Smuzhiyun * on the EP. This can be a normal close (RTS->CLOSING) or
2053*4882a593Smuzhiyun * an abnormal close (RTS/CLOSING->ERROR).
2054*4882a593Smuzhiyun */
2055*4882a593Smuzhiyun if (disconnect) {
2056*4882a593Smuzhiyun c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
2057*4882a593Smuzhiyun GFP_KERNEL);
2058*4882a593Smuzhiyun c4iw_put_ep(&ep->com);
2059*4882a593Smuzhiyun }
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun /*
2062*4882a593Smuzhiyun * If free is 1, then we've disassociated the EP from the QP
2063*4882a593Smuzhiyun * and we need to dereference the EP.
2064*4882a593Smuzhiyun */
2065*4882a593Smuzhiyun if (free)
2066*4882a593Smuzhiyun c4iw_put_ep(&ep->com);
2067*4882a593Smuzhiyun pr_debug("exit state %d\n", qhp->attr.state);
2068*4882a593Smuzhiyun return ret;
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun
c4iw_destroy_qp(struct ib_qp * ib_qp,struct ib_udata * udata)2071*4882a593Smuzhiyun int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
2072*4882a593Smuzhiyun {
2073*4882a593Smuzhiyun struct c4iw_dev *rhp;
2074*4882a593Smuzhiyun struct c4iw_qp *qhp;
2075*4882a593Smuzhiyun struct c4iw_ucontext *ucontext;
2076*4882a593Smuzhiyun struct c4iw_qp_attributes attrs;
2077*4882a593Smuzhiyun
2078*4882a593Smuzhiyun qhp = to_c4iw_qp(ib_qp);
2079*4882a593Smuzhiyun rhp = qhp->rhp;
2080*4882a593Smuzhiyun ucontext = qhp->ucontext;
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun attrs.next_state = C4IW_QP_STATE_ERROR;
2083*4882a593Smuzhiyun if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
2084*4882a593Smuzhiyun c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2085*4882a593Smuzhiyun else
2086*4882a593Smuzhiyun c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
2087*4882a593Smuzhiyun wait_event(qhp->wait, !qhp->ep);
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun xa_lock_irq(&rhp->qps);
2090*4882a593Smuzhiyun __xa_erase(&rhp->qps, qhp->wq.sq.qid);
2091*4882a593Smuzhiyun if (!list_empty(&qhp->db_fc_entry))
2092*4882a593Smuzhiyun list_del_init(&qhp->db_fc_entry);
2093*4882a593Smuzhiyun xa_unlock_irq(&rhp->qps);
2094*4882a593Smuzhiyun free_ird(rhp, qhp->attr.max_ird);
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun c4iw_qp_rem_ref(ib_qp);
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun wait_for_completion(&qhp->qp_rel_comp);
2099*4882a593Smuzhiyun
2100*4882a593Smuzhiyun pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
2101*4882a593Smuzhiyun pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun destroy_qp(&rhp->rdev, &qhp->wq,
2104*4882a593Smuzhiyun ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
2105*4882a593Smuzhiyun
2106*4882a593Smuzhiyun c4iw_put_wr_wait(qhp->wr_waitp);
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun kfree(qhp);
2109*4882a593Smuzhiyun return 0;
2110*4882a593Smuzhiyun }
2111*4882a593Smuzhiyun
c4iw_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * attrs,struct ib_udata * udata)2112*4882a593Smuzhiyun struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
2113*4882a593Smuzhiyun struct ib_udata *udata)
2114*4882a593Smuzhiyun {
2115*4882a593Smuzhiyun struct c4iw_dev *rhp;
2116*4882a593Smuzhiyun struct c4iw_qp *qhp;
2117*4882a593Smuzhiyun struct c4iw_pd *php;
2118*4882a593Smuzhiyun struct c4iw_cq *schp;
2119*4882a593Smuzhiyun struct c4iw_cq *rchp;
2120*4882a593Smuzhiyun struct c4iw_create_qp_resp uresp;
2121*4882a593Smuzhiyun unsigned int sqsize, rqsize = 0;
2122*4882a593Smuzhiyun struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
2123*4882a593Smuzhiyun udata, struct c4iw_ucontext, ibucontext);
2124*4882a593Smuzhiyun int ret;
2125*4882a593Smuzhiyun struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
2126*4882a593Smuzhiyun struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun pr_debug("ib_pd %p\n", pd);
2129*4882a593Smuzhiyun
2130*4882a593Smuzhiyun if (attrs->qp_type != IB_QPT_RC)
2131*4882a593Smuzhiyun return ERR_PTR(-EOPNOTSUPP);
2132*4882a593Smuzhiyun
2133*4882a593Smuzhiyun php = to_c4iw_pd(pd);
2134*4882a593Smuzhiyun rhp = php->rhp;
2135*4882a593Smuzhiyun schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
2136*4882a593Smuzhiyun rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
2137*4882a593Smuzhiyun if (!schp || !rchp)
2138*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2139*4882a593Smuzhiyun
2140*4882a593Smuzhiyun if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
2141*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
2142*4882a593Smuzhiyun
2143*4882a593Smuzhiyun if (!attrs->srq) {
2144*4882a593Smuzhiyun if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
2145*4882a593Smuzhiyun return ERR_PTR(-E2BIG);
2146*4882a593Smuzhiyun rqsize = attrs->cap.max_recv_wr + 1;
2147*4882a593Smuzhiyun if (rqsize < 8)
2148*4882a593Smuzhiyun rqsize = 8;
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun
2151*4882a593Smuzhiyun if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
2152*4882a593Smuzhiyun return ERR_PTR(-E2BIG);
2153*4882a593Smuzhiyun sqsize = attrs->cap.max_send_wr + 1;
2154*4882a593Smuzhiyun if (sqsize < 8)
2155*4882a593Smuzhiyun sqsize = 8;
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
2158*4882a593Smuzhiyun if (!qhp)
2159*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
2160*4882a593Smuzhiyun
2161*4882a593Smuzhiyun qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
2162*4882a593Smuzhiyun if (!qhp->wr_waitp) {
2163*4882a593Smuzhiyun ret = -ENOMEM;
2164*4882a593Smuzhiyun goto err_free_qhp;
2165*4882a593Smuzhiyun }
2166*4882a593Smuzhiyun
2167*4882a593Smuzhiyun qhp->wq.sq.size = sqsize;
2168*4882a593Smuzhiyun qhp->wq.sq.memsize =
2169*4882a593Smuzhiyun (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2170*4882a593Smuzhiyun sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
2171*4882a593Smuzhiyun qhp->wq.sq.flush_cidx = -1;
2172*4882a593Smuzhiyun if (!attrs->srq) {
2173*4882a593Smuzhiyun qhp->wq.rq.size = rqsize;
2174*4882a593Smuzhiyun qhp->wq.rq.memsize =
2175*4882a593Smuzhiyun (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2176*4882a593Smuzhiyun sizeof(*qhp->wq.rq.queue);
2177*4882a593Smuzhiyun }
2178*4882a593Smuzhiyun
2179*4882a593Smuzhiyun if (ucontext) {
2180*4882a593Smuzhiyun qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
2181*4882a593Smuzhiyun if (!attrs->srq)
2182*4882a593Smuzhiyun qhp->wq.rq.memsize =
2183*4882a593Smuzhiyun roundup(qhp->wq.rq.memsize, PAGE_SIZE);
2184*4882a593Smuzhiyun }
2185*4882a593Smuzhiyun
2186*4882a593Smuzhiyun ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
2187*4882a593Smuzhiyun ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2188*4882a593Smuzhiyun qhp->wr_waitp, !attrs->srq);
2189*4882a593Smuzhiyun if (ret)
2190*4882a593Smuzhiyun goto err_free_wr_wait;
2191*4882a593Smuzhiyun
2192*4882a593Smuzhiyun attrs->cap.max_recv_wr = rqsize - 1;
2193*4882a593Smuzhiyun attrs->cap.max_send_wr = sqsize - 1;
2194*4882a593Smuzhiyun attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun qhp->rhp = rhp;
2197*4882a593Smuzhiyun qhp->attr.pd = php->pdid;
2198*4882a593Smuzhiyun qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
2199*4882a593Smuzhiyun qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
2200*4882a593Smuzhiyun qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
2201*4882a593Smuzhiyun qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
2202*4882a593Smuzhiyun qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
2203*4882a593Smuzhiyun if (!attrs->srq) {
2204*4882a593Smuzhiyun qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
2205*4882a593Smuzhiyun qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
2206*4882a593Smuzhiyun }
2207*4882a593Smuzhiyun qhp->attr.state = C4IW_QP_STATE_IDLE;
2208*4882a593Smuzhiyun qhp->attr.next_state = C4IW_QP_STATE_IDLE;
2209*4882a593Smuzhiyun qhp->attr.enable_rdma_read = 1;
2210*4882a593Smuzhiyun qhp->attr.enable_rdma_write = 1;
2211*4882a593Smuzhiyun qhp->attr.enable_bind = 1;
2212*4882a593Smuzhiyun qhp->attr.max_ord = 0;
2213*4882a593Smuzhiyun qhp->attr.max_ird = 0;
2214*4882a593Smuzhiyun qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
2215*4882a593Smuzhiyun spin_lock_init(&qhp->lock);
2216*4882a593Smuzhiyun mutex_init(&qhp->mutex);
2217*4882a593Smuzhiyun init_waitqueue_head(&qhp->wait);
2218*4882a593Smuzhiyun init_completion(&qhp->qp_rel_comp);
2219*4882a593Smuzhiyun refcount_set(&qhp->qp_refcnt, 1);
2220*4882a593Smuzhiyun
2221*4882a593Smuzhiyun ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
2222*4882a593Smuzhiyun if (ret)
2223*4882a593Smuzhiyun goto err_destroy_qp;
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun if (udata && ucontext) {
2226*4882a593Smuzhiyun sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
2227*4882a593Smuzhiyun if (!sq_key_mm) {
2228*4882a593Smuzhiyun ret = -ENOMEM;
2229*4882a593Smuzhiyun goto err_remove_handle;
2230*4882a593Smuzhiyun }
2231*4882a593Smuzhiyun if (!attrs->srq) {
2232*4882a593Smuzhiyun rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
2233*4882a593Smuzhiyun if (!rq_key_mm) {
2234*4882a593Smuzhiyun ret = -ENOMEM;
2235*4882a593Smuzhiyun goto err_free_sq_key;
2236*4882a593Smuzhiyun }
2237*4882a593Smuzhiyun }
2238*4882a593Smuzhiyun sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
2239*4882a593Smuzhiyun if (!sq_db_key_mm) {
2240*4882a593Smuzhiyun ret = -ENOMEM;
2241*4882a593Smuzhiyun goto err_free_rq_key;
2242*4882a593Smuzhiyun }
2243*4882a593Smuzhiyun if (!attrs->srq) {
2244*4882a593Smuzhiyun rq_db_key_mm =
2245*4882a593Smuzhiyun kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
2246*4882a593Smuzhiyun if (!rq_db_key_mm) {
2247*4882a593Smuzhiyun ret = -ENOMEM;
2248*4882a593Smuzhiyun goto err_free_sq_db_key;
2249*4882a593Smuzhiyun }
2250*4882a593Smuzhiyun }
2251*4882a593Smuzhiyun memset(&uresp, 0, sizeof(uresp));
2252*4882a593Smuzhiyun if (t4_sq_onchip(&qhp->wq.sq)) {
2253*4882a593Smuzhiyun ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
2254*4882a593Smuzhiyun GFP_KERNEL);
2255*4882a593Smuzhiyun if (!ma_sync_key_mm) {
2256*4882a593Smuzhiyun ret = -ENOMEM;
2257*4882a593Smuzhiyun goto err_free_rq_db_key;
2258*4882a593Smuzhiyun }
2259*4882a593Smuzhiyun uresp.flags = C4IW_QPF_ONCHIP;
2260*4882a593Smuzhiyun }
2261*4882a593Smuzhiyun if (rhp->rdev.lldi.write_w_imm_support)
2262*4882a593Smuzhiyun uresp.flags |= C4IW_QPF_WRITE_W_IMM;
2263*4882a593Smuzhiyun uresp.qid_mask = rhp->rdev.qpmask;
2264*4882a593Smuzhiyun uresp.sqid = qhp->wq.sq.qid;
2265*4882a593Smuzhiyun uresp.sq_size = qhp->wq.sq.size;
2266*4882a593Smuzhiyun uresp.sq_memsize = qhp->wq.sq.memsize;
2267*4882a593Smuzhiyun if (!attrs->srq) {
2268*4882a593Smuzhiyun uresp.rqid = qhp->wq.rq.qid;
2269*4882a593Smuzhiyun uresp.rq_size = qhp->wq.rq.size;
2270*4882a593Smuzhiyun uresp.rq_memsize = qhp->wq.rq.memsize;
2271*4882a593Smuzhiyun }
2272*4882a593Smuzhiyun spin_lock(&ucontext->mmap_lock);
2273*4882a593Smuzhiyun if (ma_sync_key_mm) {
2274*4882a593Smuzhiyun uresp.ma_sync_key = ucontext->key;
2275*4882a593Smuzhiyun ucontext->key += PAGE_SIZE;
2276*4882a593Smuzhiyun }
2277*4882a593Smuzhiyun uresp.sq_key = ucontext->key;
2278*4882a593Smuzhiyun ucontext->key += PAGE_SIZE;
2279*4882a593Smuzhiyun if (!attrs->srq) {
2280*4882a593Smuzhiyun uresp.rq_key = ucontext->key;
2281*4882a593Smuzhiyun ucontext->key += PAGE_SIZE;
2282*4882a593Smuzhiyun }
2283*4882a593Smuzhiyun uresp.sq_db_gts_key = ucontext->key;
2284*4882a593Smuzhiyun ucontext->key += PAGE_SIZE;
2285*4882a593Smuzhiyun if (!attrs->srq) {
2286*4882a593Smuzhiyun uresp.rq_db_gts_key = ucontext->key;
2287*4882a593Smuzhiyun ucontext->key += PAGE_SIZE;
2288*4882a593Smuzhiyun }
2289*4882a593Smuzhiyun spin_unlock(&ucontext->mmap_lock);
2290*4882a593Smuzhiyun ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2291*4882a593Smuzhiyun if (ret)
2292*4882a593Smuzhiyun goto err_free_ma_sync_key;
2293*4882a593Smuzhiyun sq_key_mm->key = uresp.sq_key;
2294*4882a593Smuzhiyun sq_key_mm->addr = qhp->wq.sq.phys_addr;
2295*4882a593Smuzhiyun sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
2296*4882a593Smuzhiyun insert_mmap(ucontext, sq_key_mm);
2297*4882a593Smuzhiyun if (!attrs->srq) {
2298*4882a593Smuzhiyun rq_key_mm->key = uresp.rq_key;
2299*4882a593Smuzhiyun rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
2300*4882a593Smuzhiyun rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
2301*4882a593Smuzhiyun insert_mmap(ucontext, rq_key_mm);
2302*4882a593Smuzhiyun }
2303*4882a593Smuzhiyun sq_db_key_mm->key = uresp.sq_db_gts_key;
2304*4882a593Smuzhiyun sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
2305*4882a593Smuzhiyun sq_db_key_mm->len = PAGE_SIZE;
2306*4882a593Smuzhiyun insert_mmap(ucontext, sq_db_key_mm);
2307*4882a593Smuzhiyun if (!attrs->srq) {
2308*4882a593Smuzhiyun rq_db_key_mm->key = uresp.rq_db_gts_key;
2309*4882a593Smuzhiyun rq_db_key_mm->addr =
2310*4882a593Smuzhiyun (u64)(unsigned long)qhp->wq.rq.bar2_pa;
2311*4882a593Smuzhiyun rq_db_key_mm->len = PAGE_SIZE;
2312*4882a593Smuzhiyun insert_mmap(ucontext, rq_db_key_mm);
2313*4882a593Smuzhiyun }
2314*4882a593Smuzhiyun if (ma_sync_key_mm) {
2315*4882a593Smuzhiyun ma_sync_key_mm->key = uresp.ma_sync_key;
2316*4882a593Smuzhiyun ma_sync_key_mm->addr =
2317*4882a593Smuzhiyun (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
2318*4882a593Smuzhiyun PCIE_MA_SYNC_A) & PAGE_MASK;
2319*4882a593Smuzhiyun ma_sync_key_mm->len = PAGE_SIZE;
2320*4882a593Smuzhiyun insert_mmap(ucontext, ma_sync_key_mm);
2321*4882a593Smuzhiyun }
2322*4882a593Smuzhiyun
2323*4882a593Smuzhiyun qhp->ucontext = ucontext;
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun if (!attrs->srq) {
2326*4882a593Smuzhiyun qhp->wq.qp_errp =
2327*4882a593Smuzhiyun &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
2328*4882a593Smuzhiyun } else {
2329*4882a593Smuzhiyun qhp->wq.qp_errp =
2330*4882a593Smuzhiyun &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
2331*4882a593Smuzhiyun qhp->wq.srqidxp =
2332*4882a593Smuzhiyun &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
2333*4882a593Smuzhiyun }
2334*4882a593Smuzhiyun
2335*4882a593Smuzhiyun qhp->ibqp.qp_num = qhp->wq.sq.qid;
2336*4882a593Smuzhiyun if (attrs->srq)
2337*4882a593Smuzhiyun qhp->srq = to_c4iw_srq(attrs->srq);
2338*4882a593Smuzhiyun INIT_LIST_HEAD(&qhp->db_fc_entry);
2339*4882a593Smuzhiyun pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
2340*4882a593Smuzhiyun qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
2341*4882a593Smuzhiyun attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
2342*4882a593Smuzhiyun qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
2343*4882a593Smuzhiyun return &qhp->ibqp;
2344*4882a593Smuzhiyun err_free_ma_sync_key:
2345*4882a593Smuzhiyun kfree(ma_sync_key_mm);
2346*4882a593Smuzhiyun err_free_rq_db_key:
2347*4882a593Smuzhiyun if (!attrs->srq)
2348*4882a593Smuzhiyun kfree(rq_db_key_mm);
2349*4882a593Smuzhiyun err_free_sq_db_key:
2350*4882a593Smuzhiyun kfree(sq_db_key_mm);
2351*4882a593Smuzhiyun err_free_rq_key:
2352*4882a593Smuzhiyun if (!attrs->srq)
2353*4882a593Smuzhiyun kfree(rq_key_mm);
2354*4882a593Smuzhiyun err_free_sq_key:
2355*4882a593Smuzhiyun kfree(sq_key_mm);
2356*4882a593Smuzhiyun err_remove_handle:
2357*4882a593Smuzhiyun xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
2358*4882a593Smuzhiyun err_destroy_qp:
2359*4882a593Smuzhiyun destroy_qp(&rhp->rdev, &qhp->wq,
2360*4882a593Smuzhiyun ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
2361*4882a593Smuzhiyun err_free_wr_wait:
2362*4882a593Smuzhiyun c4iw_put_wr_wait(qhp->wr_waitp);
2363*4882a593Smuzhiyun err_free_qhp:
2364*4882a593Smuzhiyun kfree(qhp);
2365*4882a593Smuzhiyun return ERR_PTR(ret);
2366*4882a593Smuzhiyun }
2367*4882a593Smuzhiyun
c4iw_ib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)2368*4882a593Smuzhiyun int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2369*4882a593Smuzhiyun int attr_mask, struct ib_udata *udata)
2370*4882a593Smuzhiyun {
2371*4882a593Smuzhiyun struct c4iw_dev *rhp;
2372*4882a593Smuzhiyun struct c4iw_qp *qhp;
2373*4882a593Smuzhiyun enum c4iw_qp_attr_mask mask = 0;
2374*4882a593Smuzhiyun struct c4iw_qp_attributes attrs = {};
2375*4882a593Smuzhiyun
2376*4882a593Smuzhiyun pr_debug("ib_qp %p\n", ibqp);
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun /* iwarp does not support the RTR state */
2379*4882a593Smuzhiyun if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
2380*4882a593Smuzhiyun attr_mask &= ~IB_QP_STATE;
2381*4882a593Smuzhiyun
2382*4882a593Smuzhiyun /* Make sure we still have something left to do */
2383*4882a593Smuzhiyun if (!attr_mask)
2384*4882a593Smuzhiyun return 0;
2385*4882a593Smuzhiyun
2386*4882a593Smuzhiyun qhp = to_c4iw_qp(ibqp);
2387*4882a593Smuzhiyun rhp = qhp->rhp;
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun attrs.next_state = c4iw_convert_state(attr->qp_state);
2390*4882a593Smuzhiyun attrs.enable_rdma_read = (attr->qp_access_flags &
2391*4882a593Smuzhiyun IB_ACCESS_REMOTE_READ) ? 1 : 0;
2392*4882a593Smuzhiyun attrs.enable_rdma_write = (attr->qp_access_flags &
2393*4882a593Smuzhiyun IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2394*4882a593Smuzhiyun attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
2395*4882a593Smuzhiyun
2396*4882a593Smuzhiyun
2397*4882a593Smuzhiyun mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
2398*4882a593Smuzhiyun mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
2399*4882a593Smuzhiyun (C4IW_QP_ATTR_ENABLE_RDMA_READ |
2400*4882a593Smuzhiyun C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
2401*4882a593Smuzhiyun C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
2402*4882a593Smuzhiyun
2403*4882a593Smuzhiyun /*
2404*4882a593Smuzhiyun * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
2405*4882a593Smuzhiyun * ringing the queue db when we're in DB_FULL mode.
2406*4882a593Smuzhiyun * Only allow this on T4 devices.
2407*4882a593Smuzhiyun */
2408*4882a593Smuzhiyun attrs.sq_db_inc = attr->sq_psn;
2409*4882a593Smuzhiyun attrs.rq_db_inc = attr->rq_psn;
2410*4882a593Smuzhiyun mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
2411*4882a593Smuzhiyun mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
2412*4882a593Smuzhiyun if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
2413*4882a593Smuzhiyun (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
2414*4882a593Smuzhiyun return -EINVAL;
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
2417*4882a593Smuzhiyun }
2418*4882a593Smuzhiyun
c4iw_get_qp(struct ib_device * dev,int qpn)2419*4882a593Smuzhiyun struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
2420*4882a593Smuzhiyun {
2421*4882a593Smuzhiyun pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
2422*4882a593Smuzhiyun return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
2423*4882a593Smuzhiyun }
2424*4882a593Smuzhiyun
c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq * srq)2425*4882a593Smuzhiyun void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq)
2426*4882a593Smuzhiyun {
2427*4882a593Smuzhiyun struct ib_event event = {};
2428*4882a593Smuzhiyun
2429*4882a593Smuzhiyun event.device = &srq->rhp->ibdev;
2430*4882a593Smuzhiyun event.element.srq = &srq->ibsrq;
2431*4882a593Smuzhiyun event.event = IB_EVENT_SRQ_LIMIT_REACHED;
2432*4882a593Smuzhiyun ib_dispatch_event(&event);
2433*4882a593Smuzhiyun }
2434*4882a593Smuzhiyun
c4iw_modify_srq(struct ib_srq * ib_srq,struct ib_srq_attr * attr,enum ib_srq_attr_mask srq_attr_mask,struct ib_udata * udata)2435*4882a593Smuzhiyun int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
2436*4882a593Smuzhiyun enum ib_srq_attr_mask srq_attr_mask,
2437*4882a593Smuzhiyun struct ib_udata *udata)
2438*4882a593Smuzhiyun {
2439*4882a593Smuzhiyun struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
2440*4882a593Smuzhiyun int ret = 0;
2441*4882a593Smuzhiyun
2442*4882a593Smuzhiyun /*
2443*4882a593Smuzhiyun * XXX 0 mask == a SW interrupt for srq_limit reached...
2444*4882a593Smuzhiyun */
2445*4882a593Smuzhiyun if (udata && !srq_attr_mask) {
2446*4882a593Smuzhiyun c4iw_dispatch_srq_limit_reached_event(srq);
2447*4882a593Smuzhiyun goto out;
2448*4882a593Smuzhiyun }
2449*4882a593Smuzhiyun
2450*4882a593Smuzhiyun /* no support for this yet */
2451*4882a593Smuzhiyun if (srq_attr_mask & IB_SRQ_MAX_WR) {
2452*4882a593Smuzhiyun ret = -EINVAL;
2453*4882a593Smuzhiyun goto out;
2454*4882a593Smuzhiyun }
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun if (!udata && (srq_attr_mask & IB_SRQ_LIMIT)) {
2457*4882a593Smuzhiyun srq->armed = true;
2458*4882a593Smuzhiyun srq->srq_limit = attr->srq_limit;
2459*4882a593Smuzhiyun }
2460*4882a593Smuzhiyun out:
2461*4882a593Smuzhiyun return ret;
2462*4882a593Smuzhiyun }
2463*4882a593Smuzhiyun
c4iw_ib_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)2464*4882a593Smuzhiyun int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2465*4882a593Smuzhiyun int attr_mask, struct ib_qp_init_attr *init_attr)
2466*4882a593Smuzhiyun {
2467*4882a593Smuzhiyun struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
2468*4882a593Smuzhiyun
2469*4882a593Smuzhiyun memset(attr, 0, sizeof(*attr));
2470*4882a593Smuzhiyun memset(init_attr, 0, sizeof(*init_attr));
2471*4882a593Smuzhiyun attr->qp_state = to_ib_qp_state(qhp->attr.state);
2472*4882a593Smuzhiyun attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
2473*4882a593Smuzhiyun init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
2474*4882a593Smuzhiyun init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
2475*4882a593Smuzhiyun init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
2476*4882a593Smuzhiyun init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
2477*4882a593Smuzhiyun init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
2478*4882a593Smuzhiyun init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
2479*4882a593Smuzhiyun return 0;
2480*4882a593Smuzhiyun }
2481*4882a593Smuzhiyun
free_srq_queue(struct c4iw_srq * srq,struct c4iw_dev_ucontext * uctx,struct c4iw_wr_wait * wr_waitp)2482*4882a593Smuzhiyun static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2483*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
2484*4882a593Smuzhiyun {
2485*4882a593Smuzhiyun struct c4iw_rdev *rdev = &srq->rhp->rdev;
2486*4882a593Smuzhiyun struct sk_buff *skb = srq->destroy_skb;
2487*4882a593Smuzhiyun struct t4_srq *wq = &srq->wq;
2488*4882a593Smuzhiyun struct fw_ri_res_wr *res_wr;
2489*4882a593Smuzhiyun struct fw_ri_res *res;
2490*4882a593Smuzhiyun int wr_len;
2491*4882a593Smuzhiyun
2492*4882a593Smuzhiyun wr_len = sizeof(*res_wr) + sizeof(*res);
2493*4882a593Smuzhiyun set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2494*4882a593Smuzhiyun
2495*4882a593Smuzhiyun res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2496*4882a593Smuzhiyun memset(res_wr, 0, wr_len);
2497*4882a593Smuzhiyun res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2498*4882a593Smuzhiyun FW_RI_RES_WR_NRES_V(1) |
2499*4882a593Smuzhiyun FW_WR_COMPL_F);
2500*4882a593Smuzhiyun res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2501*4882a593Smuzhiyun res_wr->cookie = (uintptr_t)wr_waitp;
2502*4882a593Smuzhiyun res = res_wr->res;
2503*4882a593Smuzhiyun res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2504*4882a593Smuzhiyun res->u.srq.op = FW_RI_RES_OP_RESET;
2505*4882a593Smuzhiyun res->u.srq.srqid = cpu_to_be32(srq->idx);
2506*4882a593Smuzhiyun res->u.srq.eqid = cpu_to_be32(wq->qid);
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun c4iw_init_wr_wait(wr_waitp);
2509*4882a593Smuzhiyun c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
2510*4882a593Smuzhiyun
2511*4882a593Smuzhiyun dma_free_coherent(&rdev->lldi.pdev->dev,
2512*4882a593Smuzhiyun wq->memsize, wq->queue,
2513*4882a593Smuzhiyun dma_unmap_addr(wq, mapping));
2514*4882a593Smuzhiyun c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2515*4882a593Smuzhiyun kfree(wq->sw_rq);
2516*4882a593Smuzhiyun c4iw_put_qpid(rdev, wq->qid, uctx);
2517*4882a593Smuzhiyun }
2518*4882a593Smuzhiyun
alloc_srq_queue(struct c4iw_srq * srq,struct c4iw_dev_ucontext * uctx,struct c4iw_wr_wait * wr_waitp)2519*4882a593Smuzhiyun static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2520*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
2521*4882a593Smuzhiyun {
2522*4882a593Smuzhiyun struct c4iw_rdev *rdev = &srq->rhp->rdev;
2523*4882a593Smuzhiyun int user = (uctx != &rdev->uctx);
2524*4882a593Smuzhiyun struct t4_srq *wq = &srq->wq;
2525*4882a593Smuzhiyun struct fw_ri_res_wr *res_wr;
2526*4882a593Smuzhiyun struct fw_ri_res *res;
2527*4882a593Smuzhiyun struct sk_buff *skb;
2528*4882a593Smuzhiyun int wr_len;
2529*4882a593Smuzhiyun int eqsize;
2530*4882a593Smuzhiyun int ret = -ENOMEM;
2531*4882a593Smuzhiyun
2532*4882a593Smuzhiyun wq->qid = c4iw_get_qpid(rdev, uctx);
2533*4882a593Smuzhiyun if (!wq->qid)
2534*4882a593Smuzhiyun goto err;
2535*4882a593Smuzhiyun
2536*4882a593Smuzhiyun if (!user) {
2537*4882a593Smuzhiyun wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq),
2538*4882a593Smuzhiyun GFP_KERNEL);
2539*4882a593Smuzhiyun if (!wq->sw_rq)
2540*4882a593Smuzhiyun goto err_put_qpid;
2541*4882a593Smuzhiyun wq->pending_wrs = kcalloc(srq->wq.size,
2542*4882a593Smuzhiyun sizeof(*srq->wq.pending_wrs),
2543*4882a593Smuzhiyun GFP_KERNEL);
2544*4882a593Smuzhiyun if (!wq->pending_wrs)
2545*4882a593Smuzhiyun goto err_free_sw_rq;
2546*4882a593Smuzhiyun }
2547*4882a593Smuzhiyun
2548*4882a593Smuzhiyun wq->rqt_size = wq->size;
2549*4882a593Smuzhiyun wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size);
2550*4882a593Smuzhiyun if (!wq->rqt_hwaddr)
2551*4882a593Smuzhiyun goto err_free_pending_wrs;
2552*4882a593Smuzhiyun wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
2553*4882a593Smuzhiyun T4_RQT_ENTRY_SHIFT;
2554*4882a593Smuzhiyun
2555*4882a593Smuzhiyun wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
2556*4882a593Smuzhiyun &wq->dma_addr, GFP_KERNEL);
2557*4882a593Smuzhiyun if (!wq->queue)
2558*4882a593Smuzhiyun goto err_free_rqtpool;
2559*4882a593Smuzhiyun
2560*4882a593Smuzhiyun dma_unmap_addr_set(wq, mapping, wq->dma_addr);
2561*4882a593Smuzhiyun
2562*4882a593Smuzhiyun wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
2563*4882a593Smuzhiyun &wq->bar2_qid,
2564*4882a593Smuzhiyun user ? &wq->bar2_pa : NULL);
2565*4882a593Smuzhiyun
2566*4882a593Smuzhiyun /*
2567*4882a593Smuzhiyun * User mode must have bar2 access.
2568*4882a593Smuzhiyun */
2569*4882a593Smuzhiyun
2570*4882a593Smuzhiyun if (user && !wq->bar2_va) {
2571*4882a593Smuzhiyun pr_warn(MOD "%s: srqid %u not in BAR2 range.\n",
2572*4882a593Smuzhiyun pci_name(rdev->lldi.pdev), wq->qid);
2573*4882a593Smuzhiyun ret = -EINVAL;
2574*4882a593Smuzhiyun goto err_free_queue;
2575*4882a593Smuzhiyun }
2576*4882a593Smuzhiyun
2577*4882a593Smuzhiyun /* build fw_ri_res_wr */
2578*4882a593Smuzhiyun wr_len = sizeof(*res_wr) + sizeof(*res);
2579*4882a593Smuzhiyun
2580*4882a593Smuzhiyun skb = alloc_skb(wr_len, GFP_KERNEL);
2581*4882a593Smuzhiyun if (!skb)
2582*4882a593Smuzhiyun goto err_free_queue;
2583*4882a593Smuzhiyun set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2584*4882a593Smuzhiyun
2585*4882a593Smuzhiyun res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2586*4882a593Smuzhiyun memset(res_wr, 0, wr_len);
2587*4882a593Smuzhiyun res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2588*4882a593Smuzhiyun FW_RI_RES_WR_NRES_V(1) |
2589*4882a593Smuzhiyun FW_WR_COMPL_F);
2590*4882a593Smuzhiyun res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2591*4882a593Smuzhiyun res_wr->cookie = (uintptr_t)wr_waitp;
2592*4882a593Smuzhiyun res = res_wr->res;
2593*4882a593Smuzhiyun res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2594*4882a593Smuzhiyun res->u.srq.op = FW_RI_RES_OP_WRITE;
2595*4882a593Smuzhiyun
2596*4882a593Smuzhiyun /*
2597*4882a593Smuzhiyun * eqsize is the number of 64B entries plus the status page size.
2598*4882a593Smuzhiyun */
2599*4882a593Smuzhiyun eqsize = wq->size * T4_RQ_NUM_SLOTS +
2600*4882a593Smuzhiyun rdev->hw_queue.t4_eq_status_entries;
2601*4882a593Smuzhiyun res->u.srq.eqid = cpu_to_be32(wq->qid);
2602*4882a593Smuzhiyun res->u.srq.fetchszm_to_iqid =
2603*4882a593Smuzhiyun /* no host cidx updates */
2604*4882a593Smuzhiyun cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
2605*4882a593Smuzhiyun FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
2606*4882a593Smuzhiyun FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
2607*4882a593Smuzhiyun FW_RI_RES_WR_FETCHRO_V(0)); /* relaxed_ordering */
2608*4882a593Smuzhiyun res->u.srq.dcaen_to_eqsize =
2609*4882a593Smuzhiyun cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
2610*4882a593Smuzhiyun FW_RI_RES_WR_DCACPU_V(0) |
2611*4882a593Smuzhiyun FW_RI_RES_WR_FBMIN_V(2) |
2612*4882a593Smuzhiyun FW_RI_RES_WR_FBMAX_V(3) |
2613*4882a593Smuzhiyun FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
2614*4882a593Smuzhiyun FW_RI_RES_WR_CIDXFTHRESH_V(0) |
2615*4882a593Smuzhiyun FW_RI_RES_WR_EQSIZE_V(eqsize));
2616*4882a593Smuzhiyun res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr);
2617*4882a593Smuzhiyun res->u.srq.srqid = cpu_to_be32(srq->idx);
2618*4882a593Smuzhiyun res->u.srq.pdid = cpu_to_be32(srq->pdid);
2619*4882a593Smuzhiyun res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size);
2620*4882a593Smuzhiyun res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr -
2621*4882a593Smuzhiyun rdev->lldi.vr->rq.start);
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun c4iw_init_wr_wait(wr_waitp);
2624*4882a593Smuzhiyun
2625*4882a593Smuzhiyun ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
2626*4882a593Smuzhiyun if (ret)
2627*4882a593Smuzhiyun goto err_free_queue;
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n"
2630*4882a593Smuzhiyun " bar2_addr %p rqt addr 0x%x size %d\n",
2631*4882a593Smuzhiyun __func__, srq->idx, wq->qid, srq->pdid, wq->queue,
2632*4882a593Smuzhiyun (u64)virt_to_phys(wq->queue), wq->bar2_va,
2633*4882a593Smuzhiyun wq->rqt_hwaddr, wq->rqt_size);
2634*4882a593Smuzhiyun
2635*4882a593Smuzhiyun return 0;
2636*4882a593Smuzhiyun err_free_queue:
2637*4882a593Smuzhiyun dma_free_coherent(&rdev->lldi.pdev->dev,
2638*4882a593Smuzhiyun wq->memsize, wq->queue,
2639*4882a593Smuzhiyun dma_unmap_addr(wq, mapping));
2640*4882a593Smuzhiyun err_free_rqtpool:
2641*4882a593Smuzhiyun c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2642*4882a593Smuzhiyun err_free_pending_wrs:
2643*4882a593Smuzhiyun if (!user)
2644*4882a593Smuzhiyun kfree(wq->pending_wrs);
2645*4882a593Smuzhiyun err_free_sw_rq:
2646*4882a593Smuzhiyun if (!user)
2647*4882a593Smuzhiyun kfree(wq->sw_rq);
2648*4882a593Smuzhiyun err_put_qpid:
2649*4882a593Smuzhiyun c4iw_put_qpid(rdev, wq->qid, uctx);
2650*4882a593Smuzhiyun err:
2651*4882a593Smuzhiyun return ret;
2652*4882a593Smuzhiyun }
2653*4882a593Smuzhiyun
c4iw_copy_wr_to_srq(struct t4_srq * srq,union t4_recv_wr * wqe,u8 len16)2654*4882a593Smuzhiyun void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
2655*4882a593Smuzhiyun {
2656*4882a593Smuzhiyun u64 *src, *dst;
2657*4882a593Smuzhiyun
2658*4882a593Smuzhiyun src = (u64 *)wqe;
2659*4882a593Smuzhiyun dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE);
2660*4882a593Smuzhiyun while (len16) {
2661*4882a593Smuzhiyun *dst++ = *src++;
2662*4882a593Smuzhiyun if (dst >= (u64 *)&srq->queue[srq->size])
2663*4882a593Smuzhiyun dst = (u64 *)srq->queue;
2664*4882a593Smuzhiyun *dst++ = *src++;
2665*4882a593Smuzhiyun if (dst >= (u64 *)&srq->queue[srq->size])
2666*4882a593Smuzhiyun dst = (u64 *)srq->queue;
2667*4882a593Smuzhiyun len16--;
2668*4882a593Smuzhiyun }
2669*4882a593Smuzhiyun }
2670*4882a593Smuzhiyun
c4iw_create_srq(struct ib_srq * ib_srq,struct ib_srq_init_attr * attrs,struct ib_udata * udata)2671*4882a593Smuzhiyun int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
2672*4882a593Smuzhiyun struct ib_udata *udata)
2673*4882a593Smuzhiyun {
2674*4882a593Smuzhiyun struct ib_pd *pd = ib_srq->pd;
2675*4882a593Smuzhiyun struct c4iw_dev *rhp;
2676*4882a593Smuzhiyun struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
2677*4882a593Smuzhiyun struct c4iw_pd *php;
2678*4882a593Smuzhiyun struct c4iw_create_srq_resp uresp;
2679*4882a593Smuzhiyun struct c4iw_ucontext *ucontext;
2680*4882a593Smuzhiyun struct c4iw_mm_entry *srq_key_mm, *srq_db_key_mm;
2681*4882a593Smuzhiyun int rqsize;
2682*4882a593Smuzhiyun int ret;
2683*4882a593Smuzhiyun int wr_len;
2684*4882a593Smuzhiyun
2685*4882a593Smuzhiyun pr_debug("%s ib_pd %p\n", __func__, pd);
2686*4882a593Smuzhiyun
2687*4882a593Smuzhiyun php = to_c4iw_pd(pd);
2688*4882a593Smuzhiyun rhp = php->rhp;
2689*4882a593Smuzhiyun
2690*4882a593Smuzhiyun if (!rhp->rdev.lldi.vr->srq.size)
2691*4882a593Smuzhiyun return -EINVAL;
2692*4882a593Smuzhiyun if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
2693*4882a593Smuzhiyun return -E2BIG;
2694*4882a593Smuzhiyun if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
2695*4882a593Smuzhiyun return -E2BIG;
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun /*
2698*4882a593Smuzhiyun * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
2699*4882a593Smuzhiyun */
2700*4882a593Smuzhiyun rqsize = attrs->attr.max_wr + 1;
2701*4882a593Smuzhiyun rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
2702*4882a593Smuzhiyun
2703*4882a593Smuzhiyun ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2704*4882a593Smuzhiyun ibucontext);
2705*4882a593Smuzhiyun
2706*4882a593Smuzhiyun srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
2707*4882a593Smuzhiyun if (!srq->wr_waitp)
2708*4882a593Smuzhiyun return -ENOMEM;
2709*4882a593Smuzhiyun
2710*4882a593Smuzhiyun srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
2711*4882a593Smuzhiyun if (srq->idx < 0) {
2712*4882a593Smuzhiyun ret = -ENOMEM;
2713*4882a593Smuzhiyun goto err_free_wr_wait;
2714*4882a593Smuzhiyun }
2715*4882a593Smuzhiyun
2716*4882a593Smuzhiyun wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
2717*4882a593Smuzhiyun srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
2718*4882a593Smuzhiyun if (!srq->destroy_skb) {
2719*4882a593Smuzhiyun ret = -ENOMEM;
2720*4882a593Smuzhiyun goto err_free_srq_idx;
2721*4882a593Smuzhiyun }
2722*4882a593Smuzhiyun
2723*4882a593Smuzhiyun srq->rhp = rhp;
2724*4882a593Smuzhiyun srq->pdid = php->pdid;
2725*4882a593Smuzhiyun
2726*4882a593Smuzhiyun srq->wq.size = rqsize;
2727*4882a593Smuzhiyun srq->wq.memsize =
2728*4882a593Smuzhiyun (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2729*4882a593Smuzhiyun sizeof(*srq->wq.queue);
2730*4882a593Smuzhiyun if (ucontext)
2731*4882a593Smuzhiyun srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE);
2732*4882a593Smuzhiyun
2733*4882a593Smuzhiyun ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx :
2734*4882a593Smuzhiyun &rhp->rdev.uctx, srq->wr_waitp);
2735*4882a593Smuzhiyun if (ret)
2736*4882a593Smuzhiyun goto err_free_skb;
2737*4882a593Smuzhiyun attrs->attr.max_wr = rqsize - 1;
2738*4882a593Smuzhiyun
2739*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
2740*4882a593Smuzhiyun srq->flags = T4_SRQ_LIMIT_SUPPORT;
2741*4882a593Smuzhiyun
2742*4882a593Smuzhiyun if (udata) {
2743*4882a593Smuzhiyun srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
2744*4882a593Smuzhiyun if (!srq_key_mm) {
2745*4882a593Smuzhiyun ret = -ENOMEM;
2746*4882a593Smuzhiyun goto err_free_queue;
2747*4882a593Smuzhiyun }
2748*4882a593Smuzhiyun srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
2749*4882a593Smuzhiyun if (!srq_db_key_mm) {
2750*4882a593Smuzhiyun ret = -ENOMEM;
2751*4882a593Smuzhiyun goto err_free_srq_key_mm;
2752*4882a593Smuzhiyun }
2753*4882a593Smuzhiyun memset(&uresp, 0, sizeof(uresp));
2754*4882a593Smuzhiyun uresp.flags = srq->flags;
2755*4882a593Smuzhiyun uresp.qid_mask = rhp->rdev.qpmask;
2756*4882a593Smuzhiyun uresp.srqid = srq->wq.qid;
2757*4882a593Smuzhiyun uresp.srq_size = srq->wq.size;
2758*4882a593Smuzhiyun uresp.srq_memsize = srq->wq.memsize;
2759*4882a593Smuzhiyun uresp.rqt_abs_idx = srq->wq.rqt_abs_idx;
2760*4882a593Smuzhiyun spin_lock(&ucontext->mmap_lock);
2761*4882a593Smuzhiyun uresp.srq_key = ucontext->key;
2762*4882a593Smuzhiyun ucontext->key += PAGE_SIZE;
2763*4882a593Smuzhiyun uresp.srq_db_gts_key = ucontext->key;
2764*4882a593Smuzhiyun ucontext->key += PAGE_SIZE;
2765*4882a593Smuzhiyun spin_unlock(&ucontext->mmap_lock);
2766*4882a593Smuzhiyun ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2767*4882a593Smuzhiyun if (ret)
2768*4882a593Smuzhiyun goto err_free_srq_db_key_mm;
2769*4882a593Smuzhiyun srq_key_mm->key = uresp.srq_key;
2770*4882a593Smuzhiyun srq_key_mm->addr = virt_to_phys(srq->wq.queue);
2771*4882a593Smuzhiyun srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
2772*4882a593Smuzhiyun insert_mmap(ucontext, srq_key_mm);
2773*4882a593Smuzhiyun srq_db_key_mm->key = uresp.srq_db_gts_key;
2774*4882a593Smuzhiyun srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
2775*4882a593Smuzhiyun srq_db_key_mm->len = PAGE_SIZE;
2776*4882a593Smuzhiyun insert_mmap(ucontext, srq_db_key_mm);
2777*4882a593Smuzhiyun }
2778*4882a593Smuzhiyun
2779*4882a593Smuzhiyun pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n",
2780*4882a593Smuzhiyun __func__, srq->wq.qid, srq->idx, srq->wq.size,
2781*4882a593Smuzhiyun (unsigned long)srq->wq.memsize, attrs->attr.max_wr);
2782*4882a593Smuzhiyun
2783*4882a593Smuzhiyun spin_lock_init(&srq->lock);
2784*4882a593Smuzhiyun return 0;
2785*4882a593Smuzhiyun
2786*4882a593Smuzhiyun err_free_srq_db_key_mm:
2787*4882a593Smuzhiyun kfree(srq_db_key_mm);
2788*4882a593Smuzhiyun err_free_srq_key_mm:
2789*4882a593Smuzhiyun kfree(srq_key_mm);
2790*4882a593Smuzhiyun err_free_queue:
2791*4882a593Smuzhiyun free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2792*4882a593Smuzhiyun srq->wr_waitp);
2793*4882a593Smuzhiyun err_free_skb:
2794*4882a593Smuzhiyun kfree_skb(srq->destroy_skb);
2795*4882a593Smuzhiyun err_free_srq_idx:
2796*4882a593Smuzhiyun c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2797*4882a593Smuzhiyun err_free_wr_wait:
2798*4882a593Smuzhiyun c4iw_put_wr_wait(srq->wr_waitp);
2799*4882a593Smuzhiyun return ret;
2800*4882a593Smuzhiyun }
2801*4882a593Smuzhiyun
c4iw_destroy_srq(struct ib_srq * ibsrq,struct ib_udata * udata)2802*4882a593Smuzhiyun int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
2803*4882a593Smuzhiyun {
2804*4882a593Smuzhiyun struct c4iw_dev *rhp;
2805*4882a593Smuzhiyun struct c4iw_srq *srq;
2806*4882a593Smuzhiyun struct c4iw_ucontext *ucontext;
2807*4882a593Smuzhiyun
2808*4882a593Smuzhiyun srq = to_c4iw_srq(ibsrq);
2809*4882a593Smuzhiyun rhp = srq->rhp;
2810*4882a593Smuzhiyun
2811*4882a593Smuzhiyun pr_debug("%s id %d\n", __func__, srq->wq.qid);
2812*4882a593Smuzhiyun ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2813*4882a593Smuzhiyun ibucontext);
2814*4882a593Smuzhiyun free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2815*4882a593Smuzhiyun srq->wr_waitp);
2816*4882a593Smuzhiyun c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2817*4882a593Smuzhiyun c4iw_put_wr_wait(srq->wr_waitp);
2818*4882a593Smuzhiyun return 0;
2819*4882a593Smuzhiyun }
2820