1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun * OpenIB.org BSD license below:
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun * without modification, are permitted provided that the following
12*4882a593Smuzhiyun * conditions are met:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * - Redistributions of source code must retain the above
15*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun * disclaimer.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun * provided with the distribution.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun * SOFTWARE.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include "iw_cxgb4.h"
36*4882a593Smuzhiyun
destroy_cq(struct c4iw_rdev * rdev,struct t4_cq * cq,struct c4iw_dev_ucontext * uctx,struct sk_buff * skb,struct c4iw_wr_wait * wr_waitp)37*4882a593Smuzhiyun static void destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
38*4882a593Smuzhiyun struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
39*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun struct fw_ri_res_wr *res_wr;
42*4882a593Smuzhiyun struct fw_ri_res *res;
43*4882a593Smuzhiyun int wr_len;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun wr_len = sizeof(*res_wr) + sizeof(*res);
46*4882a593Smuzhiyun set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun res_wr = __skb_put_zero(skb, wr_len);
49*4882a593Smuzhiyun res_wr->op_nres = cpu_to_be32(
50*4882a593Smuzhiyun FW_WR_OP_V(FW_RI_RES_WR) |
51*4882a593Smuzhiyun FW_RI_RES_WR_NRES_V(1) |
52*4882a593Smuzhiyun FW_WR_COMPL_F);
53*4882a593Smuzhiyun res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
54*4882a593Smuzhiyun res_wr->cookie = (uintptr_t)wr_waitp;
55*4882a593Smuzhiyun res = res_wr->res;
56*4882a593Smuzhiyun res->u.cq.restype = FW_RI_RES_TYPE_CQ;
57*4882a593Smuzhiyun res->u.cq.op = FW_RI_RES_OP_RESET;
58*4882a593Smuzhiyun res->u.cq.iqid = cpu_to_be32(cq->cqid);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun c4iw_init_wr_wait(wr_waitp);
61*4882a593Smuzhiyun c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun kfree(cq->sw_queue);
64*4882a593Smuzhiyun dma_free_coherent(&(rdev->lldi.pdev->dev),
65*4882a593Smuzhiyun cq->memsize, cq->queue,
66*4882a593Smuzhiyun dma_unmap_addr(cq, mapping));
67*4882a593Smuzhiyun c4iw_put_cqid(rdev, cq->cqid, uctx);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
create_cq(struct c4iw_rdev * rdev,struct t4_cq * cq,struct c4iw_dev_ucontext * uctx,struct c4iw_wr_wait * wr_waitp)70*4882a593Smuzhiyun static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
71*4882a593Smuzhiyun struct c4iw_dev_ucontext *uctx,
72*4882a593Smuzhiyun struct c4iw_wr_wait *wr_waitp)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun struct fw_ri_res_wr *res_wr;
75*4882a593Smuzhiyun struct fw_ri_res *res;
76*4882a593Smuzhiyun int wr_len;
77*4882a593Smuzhiyun int user = (uctx != &rdev->uctx);
78*4882a593Smuzhiyun int ret;
79*4882a593Smuzhiyun struct sk_buff *skb;
80*4882a593Smuzhiyun struct c4iw_ucontext *ucontext = NULL;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (user)
83*4882a593Smuzhiyun ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun cq->cqid = c4iw_get_cqid(rdev, uctx);
86*4882a593Smuzhiyun if (!cq->cqid) {
87*4882a593Smuzhiyun ret = -ENOMEM;
88*4882a593Smuzhiyun goto err1;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (!user) {
92*4882a593Smuzhiyun cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
93*4882a593Smuzhiyun if (!cq->sw_queue) {
94*4882a593Smuzhiyun ret = -ENOMEM;
95*4882a593Smuzhiyun goto err2;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
99*4882a593Smuzhiyun &cq->dma_addr, GFP_KERNEL);
100*4882a593Smuzhiyun if (!cq->queue) {
101*4882a593Smuzhiyun ret = -ENOMEM;
102*4882a593Smuzhiyun goto err3;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun dma_unmap_addr_set(cq, mapping, cq->dma_addr);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (user && ucontext->is_32b_cqe) {
107*4882a593Smuzhiyun cq->qp_errp = &((struct t4_status_page *)
108*4882a593Smuzhiyun ((u8 *)cq->queue + (cq->size - 1) *
109*4882a593Smuzhiyun (sizeof(*cq->queue) / 2)))->qp_err;
110*4882a593Smuzhiyun } else {
111*4882a593Smuzhiyun cq->qp_errp = &((struct t4_status_page *)
112*4882a593Smuzhiyun ((u8 *)cq->queue + (cq->size - 1) *
113*4882a593Smuzhiyun sizeof(*cq->queue)))->qp_err;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* build fw_ri_res_wr */
117*4882a593Smuzhiyun wr_len = sizeof(*res_wr) + sizeof(*res);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun skb = alloc_skb(wr_len, GFP_KERNEL);
120*4882a593Smuzhiyun if (!skb) {
121*4882a593Smuzhiyun ret = -ENOMEM;
122*4882a593Smuzhiyun goto err4;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun res_wr = __skb_put_zero(skb, wr_len);
127*4882a593Smuzhiyun res_wr->op_nres = cpu_to_be32(
128*4882a593Smuzhiyun FW_WR_OP_V(FW_RI_RES_WR) |
129*4882a593Smuzhiyun FW_RI_RES_WR_NRES_V(1) |
130*4882a593Smuzhiyun FW_WR_COMPL_F);
131*4882a593Smuzhiyun res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
132*4882a593Smuzhiyun res_wr->cookie = (uintptr_t)wr_waitp;
133*4882a593Smuzhiyun res = res_wr->res;
134*4882a593Smuzhiyun res->u.cq.restype = FW_RI_RES_TYPE_CQ;
135*4882a593Smuzhiyun res->u.cq.op = FW_RI_RES_OP_WRITE;
136*4882a593Smuzhiyun res->u.cq.iqid = cpu_to_be32(cq->cqid);
137*4882a593Smuzhiyun res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
138*4882a593Smuzhiyun FW_RI_RES_WR_IQANUS_V(0) |
139*4882a593Smuzhiyun FW_RI_RES_WR_IQANUD_V(1) |
140*4882a593Smuzhiyun FW_RI_RES_WR_IQANDST_F |
141*4882a593Smuzhiyun FW_RI_RES_WR_IQANDSTINDEX_V(
142*4882a593Smuzhiyun rdev->lldi.ciq_ids[cq->vector]));
143*4882a593Smuzhiyun res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
144*4882a593Smuzhiyun FW_RI_RES_WR_IQDROPRSS_F |
145*4882a593Smuzhiyun FW_RI_RES_WR_IQPCIECH_V(2) |
146*4882a593Smuzhiyun FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
147*4882a593Smuzhiyun FW_RI_RES_WR_IQO_F |
148*4882a593Smuzhiyun ((user && ucontext->is_32b_cqe) ?
149*4882a593Smuzhiyun FW_RI_RES_WR_IQESIZE_V(1) :
150*4882a593Smuzhiyun FW_RI_RES_WR_IQESIZE_V(2)));
151*4882a593Smuzhiyun res->u.cq.iqsize = cpu_to_be16(cq->size);
152*4882a593Smuzhiyun res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun c4iw_init_wr_wait(wr_waitp);
155*4882a593Smuzhiyun ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
156*4882a593Smuzhiyun if (ret)
157*4882a593Smuzhiyun goto err4;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun cq->gen = 1;
160*4882a593Smuzhiyun cq->gts = rdev->lldi.gts_reg;
161*4882a593Smuzhiyun cq->rdev = rdev;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS,
164*4882a593Smuzhiyun &cq->bar2_qid,
165*4882a593Smuzhiyun user ? &cq->bar2_pa : NULL);
166*4882a593Smuzhiyun if (user && !cq->bar2_pa) {
167*4882a593Smuzhiyun pr_warn("%s: cqid %u not in BAR2 range\n",
168*4882a593Smuzhiyun pci_name(rdev->lldi.pdev), cq->cqid);
169*4882a593Smuzhiyun ret = -EINVAL;
170*4882a593Smuzhiyun goto err4;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun return 0;
173*4882a593Smuzhiyun err4:
174*4882a593Smuzhiyun dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
175*4882a593Smuzhiyun dma_unmap_addr(cq, mapping));
176*4882a593Smuzhiyun err3:
177*4882a593Smuzhiyun kfree(cq->sw_queue);
178*4882a593Smuzhiyun err2:
179*4882a593Smuzhiyun c4iw_put_cqid(rdev, cq->cqid, uctx);
180*4882a593Smuzhiyun err1:
181*4882a593Smuzhiyun return ret;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
insert_recv_cqe(struct t4_wq * wq,struct t4_cq * cq,u32 srqidx)184*4882a593Smuzhiyun static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun struct t4_cqe cqe;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
189*4882a593Smuzhiyun wq, cq, cq->sw_cidx, cq->sw_pidx);
190*4882a593Smuzhiyun memset(&cqe, 0, sizeof(cqe));
191*4882a593Smuzhiyun cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
192*4882a593Smuzhiyun CQE_OPCODE_V(FW_RI_SEND) |
193*4882a593Smuzhiyun CQE_TYPE_V(0) |
194*4882a593Smuzhiyun CQE_SWCQE_V(1) |
195*4882a593Smuzhiyun CQE_QPID_V(wq->sq.qid));
196*4882a593Smuzhiyun cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
197*4882a593Smuzhiyun if (srqidx)
198*4882a593Smuzhiyun cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx);
199*4882a593Smuzhiyun cq->sw_queue[cq->sw_pidx] = cqe;
200*4882a593Smuzhiyun t4_swcq_produce(cq);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
c4iw_flush_rq(struct t4_wq * wq,struct t4_cq * cq,int count)203*4882a593Smuzhiyun int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun int flushed = 0;
206*4882a593Smuzhiyun int in_use = wq->rq.in_use - count;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
209*4882a593Smuzhiyun wq, cq, wq->rq.in_use, count);
210*4882a593Smuzhiyun while (in_use--) {
211*4882a593Smuzhiyun insert_recv_cqe(wq, cq, 0);
212*4882a593Smuzhiyun flushed++;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun return flushed;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
insert_sq_cqe(struct t4_wq * wq,struct t4_cq * cq,struct t4_swsqe * swcqe)217*4882a593Smuzhiyun static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
218*4882a593Smuzhiyun struct t4_swsqe *swcqe)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun struct t4_cqe cqe;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
223*4882a593Smuzhiyun wq, cq, cq->sw_cidx, cq->sw_pidx);
224*4882a593Smuzhiyun memset(&cqe, 0, sizeof(cqe));
225*4882a593Smuzhiyun cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
226*4882a593Smuzhiyun CQE_OPCODE_V(swcqe->opcode) |
227*4882a593Smuzhiyun CQE_TYPE_V(1) |
228*4882a593Smuzhiyun CQE_SWCQE_V(1) |
229*4882a593Smuzhiyun CQE_QPID_V(wq->sq.qid));
230*4882a593Smuzhiyun CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
231*4882a593Smuzhiyun cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
232*4882a593Smuzhiyun cq->sw_queue[cq->sw_pidx] = cqe;
233*4882a593Smuzhiyun t4_swcq_produce(cq);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun static void advance_oldest_read(struct t4_wq *wq);
237*4882a593Smuzhiyun
c4iw_flush_sq(struct c4iw_qp * qhp)238*4882a593Smuzhiyun int c4iw_flush_sq(struct c4iw_qp *qhp)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun int flushed = 0;
241*4882a593Smuzhiyun struct t4_wq *wq = &qhp->wq;
242*4882a593Smuzhiyun struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
243*4882a593Smuzhiyun struct t4_cq *cq = &chp->cq;
244*4882a593Smuzhiyun int idx;
245*4882a593Smuzhiyun struct t4_swsqe *swsqe;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (wq->sq.flush_cidx == -1)
248*4882a593Smuzhiyun wq->sq.flush_cidx = wq->sq.cidx;
249*4882a593Smuzhiyun idx = wq->sq.flush_cidx;
250*4882a593Smuzhiyun while (idx != wq->sq.pidx) {
251*4882a593Smuzhiyun swsqe = &wq->sq.sw_sq[idx];
252*4882a593Smuzhiyun swsqe->flushed = 1;
253*4882a593Smuzhiyun insert_sq_cqe(wq, cq, swsqe);
254*4882a593Smuzhiyun if (wq->sq.oldest_read == swsqe) {
255*4882a593Smuzhiyun advance_oldest_read(wq);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun flushed++;
258*4882a593Smuzhiyun if (++idx == wq->sq.size)
259*4882a593Smuzhiyun idx = 0;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun wq->sq.flush_cidx += flushed;
262*4882a593Smuzhiyun if (wq->sq.flush_cidx >= wq->sq.size)
263*4882a593Smuzhiyun wq->sq.flush_cidx -= wq->sq.size;
264*4882a593Smuzhiyun return flushed;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
flush_completed_wrs(struct t4_wq * wq,struct t4_cq * cq)267*4882a593Smuzhiyun static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun struct t4_swsqe *swsqe;
270*4882a593Smuzhiyun int cidx;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (wq->sq.flush_cidx == -1)
273*4882a593Smuzhiyun wq->sq.flush_cidx = wq->sq.cidx;
274*4882a593Smuzhiyun cidx = wq->sq.flush_cidx;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun while (cidx != wq->sq.pidx) {
277*4882a593Smuzhiyun swsqe = &wq->sq.sw_sq[cidx];
278*4882a593Smuzhiyun if (!swsqe->signaled) {
279*4882a593Smuzhiyun if (++cidx == wq->sq.size)
280*4882a593Smuzhiyun cidx = 0;
281*4882a593Smuzhiyun } else if (swsqe->complete) {
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /*
284*4882a593Smuzhiyun * Insert this completed cqe into the swcq.
285*4882a593Smuzhiyun */
286*4882a593Smuzhiyun pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
287*4882a593Smuzhiyun cidx, cq->sw_pidx);
288*4882a593Smuzhiyun swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
289*4882a593Smuzhiyun cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
290*4882a593Smuzhiyun t4_swcq_produce(cq);
291*4882a593Smuzhiyun swsqe->flushed = 1;
292*4882a593Smuzhiyun if (++cidx == wq->sq.size)
293*4882a593Smuzhiyun cidx = 0;
294*4882a593Smuzhiyun wq->sq.flush_cidx = cidx;
295*4882a593Smuzhiyun } else
296*4882a593Smuzhiyun break;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
create_read_req_cqe(struct t4_wq * wq,struct t4_cqe * hw_cqe,struct t4_cqe * read_cqe)300*4882a593Smuzhiyun static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
301*4882a593Smuzhiyun struct t4_cqe *read_cqe)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
304*4882a593Smuzhiyun read_cqe->len = htonl(wq->sq.oldest_read->read_len);
305*4882a593Smuzhiyun read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
306*4882a593Smuzhiyun CQE_SWCQE_V(SW_CQE(hw_cqe)) |
307*4882a593Smuzhiyun CQE_OPCODE_V(FW_RI_READ_REQ) |
308*4882a593Smuzhiyun CQE_TYPE_V(1));
309*4882a593Smuzhiyun read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
advance_oldest_read(struct t4_wq * wq)312*4882a593Smuzhiyun static void advance_oldest_read(struct t4_wq *wq)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (rptr == wq->sq.size)
318*4882a593Smuzhiyun rptr = 0;
319*4882a593Smuzhiyun while (rptr != wq->sq.pidx) {
320*4882a593Smuzhiyun wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
323*4882a593Smuzhiyun return;
324*4882a593Smuzhiyun if (++rptr == wq->sq.size)
325*4882a593Smuzhiyun rptr = 0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun wq->sq.oldest_read = NULL;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun * Move all CQEs from the HWCQ into the SWCQ.
332*4882a593Smuzhiyun * Deal with out-of-order and/or completions that complete
333*4882a593Smuzhiyun * prior unsignalled WRs.
334*4882a593Smuzhiyun */
c4iw_flush_hw_cq(struct c4iw_cq * chp,struct c4iw_qp * flush_qhp)335*4882a593Smuzhiyun void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun struct t4_cqe *hw_cqe, *swcqe, read_cqe;
338*4882a593Smuzhiyun struct c4iw_qp *qhp;
339*4882a593Smuzhiyun struct t4_swsqe *swsqe;
340*4882a593Smuzhiyun int ret;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun pr_debug("cqid 0x%x\n", chp->cq.cqid);
343*4882a593Smuzhiyun ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /*
346*4882a593Smuzhiyun * This logic is similar to poll_cq(), but not quite the same
347*4882a593Smuzhiyun * unfortunately. Need to move pertinent HW CQEs to the SW CQ but
348*4882a593Smuzhiyun * also do any translation magic that poll_cq() normally does.
349*4882a593Smuzhiyun */
350*4882a593Smuzhiyun while (!ret) {
351*4882a593Smuzhiyun qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun * drop CQEs with no associated QP
355*4882a593Smuzhiyun */
356*4882a593Smuzhiyun if (qhp == NULL)
357*4882a593Smuzhiyun goto next_cqe;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (flush_qhp != qhp) {
360*4882a593Smuzhiyun spin_lock(&qhp->lock);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (qhp->wq.flushed == 1)
363*4882a593Smuzhiyun goto next_cqe;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
367*4882a593Smuzhiyun goto next_cqe;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* If we have reached here because of async
372*4882a593Smuzhiyun * event or other error, and have egress error
373*4882a593Smuzhiyun * then drop
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun if (CQE_TYPE(hw_cqe) == 1)
376*4882a593Smuzhiyun goto next_cqe;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* drop peer2peer RTR reads.
379*4882a593Smuzhiyun */
380*4882a593Smuzhiyun if (CQE_WRID_STAG(hw_cqe) == 1)
381*4882a593Smuzhiyun goto next_cqe;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /*
384*4882a593Smuzhiyun * Eat completions for unsignaled read WRs.
385*4882a593Smuzhiyun */
386*4882a593Smuzhiyun if (!qhp->wq.sq.oldest_read->signaled) {
387*4882a593Smuzhiyun advance_oldest_read(&qhp->wq);
388*4882a593Smuzhiyun goto next_cqe;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /*
392*4882a593Smuzhiyun * Don't write to the HWCQ, create a new read req CQE
393*4882a593Smuzhiyun * in local memory and move it into the swcq.
394*4882a593Smuzhiyun */
395*4882a593Smuzhiyun create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
396*4882a593Smuzhiyun hw_cqe = &read_cqe;
397*4882a593Smuzhiyun advance_oldest_read(&qhp->wq);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* if its a SQ completion, then do the magic to move all the
401*4882a593Smuzhiyun * unsignaled and now in-order completions into the swcq.
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun if (SQ_TYPE(hw_cqe)) {
404*4882a593Smuzhiyun swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
405*4882a593Smuzhiyun swsqe->cqe = *hw_cqe;
406*4882a593Smuzhiyun swsqe->complete = 1;
407*4882a593Smuzhiyun flush_completed_wrs(&qhp->wq, &chp->cq);
408*4882a593Smuzhiyun } else {
409*4882a593Smuzhiyun swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
410*4882a593Smuzhiyun *swcqe = *hw_cqe;
411*4882a593Smuzhiyun swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
412*4882a593Smuzhiyun t4_swcq_produce(&chp->cq);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun next_cqe:
415*4882a593Smuzhiyun t4_hwcq_consume(&chp->cq);
416*4882a593Smuzhiyun ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
417*4882a593Smuzhiyun if (qhp && flush_qhp != qhp)
418*4882a593Smuzhiyun spin_unlock(&qhp->lock);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
cqe_completes_wr(struct t4_cqe * cqe,struct t4_wq * wq)422*4882a593Smuzhiyun static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun if (DRAIN_CQE(cqe)) {
425*4882a593Smuzhiyun WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
426*4882a593Smuzhiyun return 0;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
430*4882a593Smuzhiyun return 0;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
433*4882a593Smuzhiyun return 0;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
436*4882a593Smuzhiyun return 0;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
439*4882a593Smuzhiyun return 0;
440*4882a593Smuzhiyun return 1;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
c4iw_count_rcqes(struct t4_cq * cq,struct t4_wq * wq,int * count)443*4882a593Smuzhiyun void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun struct t4_cqe *cqe;
446*4882a593Smuzhiyun u32 ptr;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun *count = 0;
449*4882a593Smuzhiyun pr_debug("count zero %d\n", *count);
450*4882a593Smuzhiyun ptr = cq->sw_cidx;
451*4882a593Smuzhiyun while (ptr != cq->sw_pidx) {
452*4882a593Smuzhiyun cqe = &cq->sw_queue[ptr];
453*4882a593Smuzhiyun if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
454*4882a593Smuzhiyun (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
455*4882a593Smuzhiyun (*count)++;
456*4882a593Smuzhiyun if (++ptr == cq->size)
457*4882a593Smuzhiyun ptr = 0;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun pr_debug("cq %p count %d\n", cq, *count);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
post_pending_srq_wrs(struct t4_srq * srq)462*4882a593Smuzhiyun static void post_pending_srq_wrs(struct t4_srq *srq)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun struct t4_srq_pending_wr *pwr;
465*4882a593Smuzhiyun u16 idx = 0;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun while (srq->pending_in_use) {
468*4882a593Smuzhiyun pwr = &srq->pending_wrs[srq->pending_cidx];
469*4882a593Smuzhiyun srq->sw_rq[srq->pidx].wr_id = pwr->wr_id;
470*4882a593Smuzhiyun srq->sw_rq[srq->pidx].valid = 1;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun pr_debug("%s posting pending cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
473*4882a593Smuzhiyun __func__,
474*4882a593Smuzhiyun srq->cidx, srq->pidx, srq->wq_pidx,
475*4882a593Smuzhiyun srq->in_use, srq->size,
476*4882a593Smuzhiyun (unsigned long long)pwr->wr_id);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun c4iw_copy_wr_to_srq(srq, &pwr->wqe, pwr->len16);
479*4882a593Smuzhiyun t4_srq_consume_pending_wr(srq);
480*4882a593Smuzhiyun t4_srq_produce(srq, pwr->len16);
481*4882a593Smuzhiyun idx += DIV_ROUND_UP(pwr->len16 * 16, T4_EQ_ENTRY_SIZE);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun if (idx) {
485*4882a593Smuzhiyun t4_ring_srq_db(srq, idx, pwr->len16, &pwr->wqe);
486*4882a593Smuzhiyun srq->queue[srq->size].status.host_wq_pidx =
487*4882a593Smuzhiyun srq->wq_pidx;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
reap_srq_cqe(struct t4_cqe * hw_cqe,struct t4_srq * srq)491*4882a593Smuzhiyun static u64 reap_srq_cqe(struct t4_cqe *hw_cqe, struct t4_srq *srq)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun int rel_idx = CQE_ABS_RQE_IDX(hw_cqe) - srq->rqt_abs_idx;
494*4882a593Smuzhiyun u64 wr_id;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun srq->sw_rq[rel_idx].valid = 0;
497*4882a593Smuzhiyun wr_id = srq->sw_rq[rel_idx].wr_id;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun if (rel_idx == srq->cidx) {
500*4882a593Smuzhiyun pr_debug("%s in order cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
501*4882a593Smuzhiyun __func__, rel_idx, srq->cidx, srq->pidx,
502*4882a593Smuzhiyun srq->wq_pidx, srq->in_use, srq->size,
503*4882a593Smuzhiyun (unsigned long long)srq->sw_rq[rel_idx].wr_id);
504*4882a593Smuzhiyun t4_srq_consume(srq);
505*4882a593Smuzhiyun while (srq->ooo_count && !srq->sw_rq[srq->cidx].valid) {
506*4882a593Smuzhiyun pr_debug("%s eat ooo cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
507*4882a593Smuzhiyun __func__, srq->cidx, srq->pidx,
508*4882a593Smuzhiyun srq->wq_pidx, srq->in_use,
509*4882a593Smuzhiyun srq->size, srq->ooo_count,
510*4882a593Smuzhiyun (unsigned long long)
511*4882a593Smuzhiyun srq->sw_rq[srq->cidx].wr_id);
512*4882a593Smuzhiyun t4_srq_consume_ooo(srq);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun if (srq->ooo_count == 0 && srq->pending_in_use)
515*4882a593Smuzhiyun post_pending_srq_wrs(srq);
516*4882a593Smuzhiyun } else {
517*4882a593Smuzhiyun pr_debug("%s ooo cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
518*4882a593Smuzhiyun __func__, rel_idx, srq->cidx,
519*4882a593Smuzhiyun srq->pidx, srq->wq_pidx,
520*4882a593Smuzhiyun srq->in_use, srq->size,
521*4882a593Smuzhiyun srq->ooo_count,
522*4882a593Smuzhiyun (unsigned long long)srq->sw_rq[rel_idx].wr_id);
523*4882a593Smuzhiyun t4_srq_produce_ooo(srq);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun return wr_id;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun /*
529*4882a593Smuzhiyun * poll_cq
530*4882a593Smuzhiyun *
531*4882a593Smuzhiyun * Caller must:
532*4882a593Smuzhiyun * check the validity of the first CQE,
533*4882a593Smuzhiyun * supply the wq assicated with the qpid.
534*4882a593Smuzhiyun *
535*4882a593Smuzhiyun * credit: cq credit to return to sge.
536*4882a593Smuzhiyun * cqe_flushed: 1 iff the CQE is flushed.
537*4882a593Smuzhiyun * cqe: copy of the polled CQE.
538*4882a593Smuzhiyun *
539*4882a593Smuzhiyun * return value:
540*4882a593Smuzhiyun * 0 CQE returned ok.
541*4882a593Smuzhiyun * -EAGAIN CQE skipped, try again.
542*4882a593Smuzhiyun * -EOVERFLOW CQ overflow detected.
543*4882a593Smuzhiyun */
poll_cq(struct t4_wq * wq,struct t4_cq * cq,struct t4_cqe * cqe,u8 * cqe_flushed,u64 * cookie,u32 * credit,struct t4_srq * srq)544*4882a593Smuzhiyun static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
545*4882a593Smuzhiyun u8 *cqe_flushed, u64 *cookie, u32 *credit,
546*4882a593Smuzhiyun struct t4_srq *srq)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun int ret = 0;
549*4882a593Smuzhiyun struct t4_cqe *hw_cqe, read_cqe;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun *cqe_flushed = 0;
552*4882a593Smuzhiyun *credit = 0;
553*4882a593Smuzhiyun ret = t4_next_cqe(cq, &hw_cqe);
554*4882a593Smuzhiyun if (ret)
555*4882a593Smuzhiyun return ret;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
558*4882a593Smuzhiyun CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
559*4882a593Smuzhiyun CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
560*4882a593Smuzhiyun CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
561*4882a593Smuzhiyun CQE_WRID_LOW(hw_cqe));
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /*
564*4882a593Smuzhiyun * skip cqe's not affiliated with a QP.
565*4882a593Smuzhiyun */
566*4882a593Smuzhiyun if (wq == NULL) {
567*4882a593Smuzhiyun ret = -EAGAIN;
568*4882a593Smuzhiyun goto skip_cqe;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /*
572*4882a593Smuzhiyun * skip hw cqe's if the wq is flushed.
573*4882a593Smuzhiyun */
574*4882a593Smuzhiyun if (wq->flushed && !SW_CQE(hw_cqe)) {
575*4882a593Smuzhiyun ret = -EAGAIN;
576*4882a593Smuzhiyun goto skip_cqe;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /*
580*4882a593Smuzhiyun * skip TERMINATE cqes...
581*4882a593Smuzhiyun */
582*4882a593Smuzhiyun if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
583*4882a593Smuzhiyun ret = -EAGAIN;
584*4882a593Smuzhiyun goto skip_cqe;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun /*
588*4882a593Smuzhiyun * Special cqe for drain WR completions...
589*4882a593Smuzhiyun */
590*4882a593Smuzhiyun if (DRAIN_CQE(hw_cqe)) {
591*4882a593Smuzhiyun *cookie = CQE_DRAIN_COOKIE(hw_cqe);
592*4882a593Smuzhiyun *cqe = *hw_cqe;
593*4882a593Smuzhiyun goto skip_cqe;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun /*
597*4882a593Smuzhiyun * Gotta tweak READ completions:
598*4882a593Smuzhiyun * 1) the cqe doesn't contain the sq_wptr from the wr.
599*4882a593Smuzhiyun * 2) opcode not reflected from the wr.
600*4882a593Smuzhiyun * 3) read_len not reflected from the wr.
601*4882a593Smuzhiyun * 4) cq_type is RQ_TYPE not SQ_TYPE.
602*4882a593Smuzhiyun */
603*4882a593Smuzhiyun if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /* If we have reached here because of async
606*4882a593Smuzhiyun * event or other error, and have egress error
607*4882a593Smuzhiyun * then drop
608*4882a593Smuzhiyun */
609*4882a593Smuzhiyun if (CQE_TYPE(hw_cqe) == 1) {
610*4882a593Smuzhiyun if (CQE_STATUS(hw_cqe))
611*4882a593Smuzhiyun t4_set_wq_in_error(wq, 0);
612*4882a593Smuzhiyun ret = -EAGAIN;
613*4882a593Smuzhiyun goto skip_cqe;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /* If this is an unsolicited read response, then the read
617*4882a593Smuzhiyun * was generated by the kernel driver as part of peer-2-peer
618*4882a593Smuzhiyun * connection setup. So ignore the completion.
619*4882a593Smuzhiyun */
620*4882a593Smuzhiyun if (CQE_WRID_STAG(hw_cqe) == 1) {
621*4882a593Smuzhiyun if (CQE_STATUS(hw_cqe))
622*4882a593Smuzhiyun t4_set_wq_in_error(wq, 0);
623*4882a593Smuzhiyun ret = -EAGAIN;
624*4882a593Smuzhiyun goto skip_cqe;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /*
628*4882a593Smuzhiyun * Eat completions for unsignaled read WRs.
629*4882a593Smuzhiyun */
630*4882a593Smuzhiyun if (!wq->sq.oldest_read->signaled) {
631*4882a593Smuzhiyun advance_oldest_read(wq);
632*4882a593Smuzhiyun ret = -EAGAIN;
633*4882a593Smuzhiyun goto skip_cqe;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /*
637*4882a593Smuzhiyun * Don't write to the HWCQ, so create a new read req CQE
638*4882a593Smuzhiyun * in local memory.
639*4882a593Smuzhiyun */
640*4882a593Smuzhiyun create_read_req_cqe(wq, hw_cqe, &read_cqe);
641*4882a593Smuzhiyun hw_cqe = &read_cqe;
642*4882a593Smuzhiyun advance_oldest_read(wq);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
646*4882a593Smuzhiyun *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
647*4882a593Smuzhiyun t4_set_wq_in_error(wq, 0);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun /*
651*4882a593Smuzhiyun * RECV completion.
652*4882a593Smuzhiyun */
653*4882a593Smuzhiyun if (RQ_TYPE(hw_cqe)) {
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /*
656*4882a593Smuzhiyun * HW only validates 4 bits of MSN. So we must validate that
657*4882a593Smuzhiyun * the MSN in the SEND is the next expected MSN. If its not,
658*4882a593Smuzhiyun * then we complete this with T4_ERR_MSN and mark the wq in
659*4882a593Smuzhiyun * error.
660*4882a593Smuzhiyun */
661*4882a593Smuzhiyun if (unlikely(!CQE_STATUS(hw_cqe) &&
662*4882a593Smuzhiyun CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
663*4882a593Smuzhiyun t4_set_wq_in_error(wq, 0);
664*4882a593Smuzhiyun hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun goto proc_cqe;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /*
670*4882a593Smuzhiyun * If we get here its a send completion.
671*4882a593Smuzhiyun *
672*4882a593Smuzhiyun * Handle out of order completion. These get stuffed
673*4882a593Smuzhiyun * in the SW SQ. Then the SW SQ is walked to move any
674*4882a593Smuzhiyun * now in-order completions into the SW CQ. This handles
675*4882a593Smuzhiyun * 2 cases:
676*4882a593Smuzhiyun * 1) reaping unsignaled WRs when the first subsequent
677*4882a593Smuzhiyun * signaled WR is completed.
678*4882a593Smuzhiyun * 2) out of order read completions.
679*4882a593Smuzhiyun */
680*4882a593Smuzhiyun if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
681*4882a593Smuzhiyun struct t4_swsqe *swsqe;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun pr_debug("out of order completion going in sw_sq at idx %u\n",
684*4882a593Smuzhiyun CQE_WRID_SQ_IDX(hw_cqe));
685*4882a593Smuzhiyun swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
686*4882a593Smuzhiyun swsqe->cqe = *hw_cqe;
687*4882a593Smuzhiyun swsqe->complete = 1;
688*4882a593Smuzhiyun ret = -EAGAIN;
689*4882a593Smuzhiyun goto flush_wq;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun proc_cqe:
693*4882a593Smuzhiyun *cqe = *hw_cqe;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun /*
696*4882a593Smuzhiyun * Reap the associated WR(s) that are freed up with this
697*4882a593Smuzhiyun * completion.
698*4882a593Smuzhiyun */
699*4882a593Smuzhiyun if (SQ_TYPE(hw_cqe)) {
700*4882a593Smuzhiyun int idx = CQE_WRID_SQ_IDX(hw_cqe);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /*
703*4882a593Smuzhiyun * Account for any unsignaled completions completed by
704*4882a593Smuzhiyun * this signaled completion. In this case, cidx points
705*4882a593Smuzhiyun * to the first unsignaled one, and idx points to the
706*4882a593Smuzhiyun * signaled one. So adjust in_use based on this delta.
707*4882a593Smuzhiyun * if this is not completing any unsigned wrs, then the
708*4882a593Smuzhiyun * delta will be 0. Handle wrapping also!
709*4882a593Smuzhiyun */
710*4882a593Smuzhiyun if (idx < wq->sq.cidx)
711*4882a593Smuzhiyun wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
712*4882a593Smuzhiyun else
713*4882a593Smuzhiyun wq->sq.in_use -= idx - wq->sq.cidx;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun wq->sq.cidx = (uint16_t)idx;
716*4882a593Smuzhiyun pr_debug("completing sq idx %u\n", wq->sq.cidx);
717*4882a593Smuzhiyun *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
718*4882a593Smuzhiyun if (c4iw_wr_log)
719*4882a593Smuzhiyun c4iw_log_wr_stats(wq, hw_cqe);
720*4882a593Smuzhiyun t4_sq_consume(wq);
721*4882a593Smuzhiyun } else {
722*4882a593Smuzhiyun if (!srq) {
723*4882a593Smuzhiyun pr_debug("completing rq idx %u\n", wq->rq.cidx);
724*4882a593Smuzhiyun *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
725*4882a593Smuzhiyun if (c4iw_wr_log)
726*4882a593Smuzhiyun c4iw_log_wr_stats(wq, hw_cqe);
727*4882a593Smuzhiyun t4_rq_consume(wq);
728*4882a593Smuzhiyun } else {
729*4882a593Smuzhiyun *cookie = reap_srq_cqe(hw_cqe, srq);
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun wq->rq.msn++;
732*4882a593Smuzhiyun goto skip_cqe;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun flush_wq:
736*4882a593Smuzhiyun /*
737*4882a593Smuzhiyun * Flush any completed cqes that are now in-order.
738*4882a593Smuzhiyun */
739*4882a593Smuzhiyun flush_completed_wrs(wq, cq);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun skip_cqe:
742*4882a593Smuzhiyun if (SW_CQE(hw_cqe)) {
743*4882a593Smuzhiyun pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
744*4882a593Smuzhiyun cq, cq->cqid, cq->sw_cidx);
745*4882a593Smuzhiyun t4_swcq_consume(cq);
746*4882a593Smuzhiyun } else {
747*4882a593Smuzhiyun pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
748*4882a593Smuzhiyun cq, cq->cqid, cq->cidx);
749*4882a593Smuzhiyun t4_hwcq_consume(cq);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun return ret;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
__c4iw_poll_cq_one(struct c4iw_cq * chp,struct c4iw_qp * qhp,struct ib_wc * wc,struct c4iw_srq * srq)754*4882a593Smuzhiyun static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
755*4882a593Smuzhiyun struct ib_wc *wc, struct c4iw_srq *srq)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun struct t4_cqe cqe;
758*4882a593Smuzhiyun struct t4_wq *wq = qhp ? &qhp->wq : NULL;
759*4882a593Smuzhiyun u32 credit = 0;
760*4882a593Smuzhiyun u8 cqe_flushed;
761*4882a593Smuzhiyun u64 cookie = 0;
762*4882a593Smuzhiyun int ret;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit,
765*4882a593Smuzhiyun srq ? &srq->wq : NULL);
766*4882a593Smuzhiyun if (ret)
767*4882a593Smuzhiyun goto out;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun wc->wr_id = cookie;
770*4882a593Smuzhiyun wc->qp = qhp ? &qhp->ibqp : NULL;
771*4882a593Smuzhiyun wc->vendor_err = CQE_STATUS(&cqe);
772*4882a593Smuzhiyun wc->wc_flags = 0;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun /*
775*4882a593Smuzhiyun * Simulate a SRQ_LIMIT_REACHED HW notification if required.
776*4882a593Smuzhiyun */
777*4882a593Smuzhiyun if (srq && !(srq->flags & T4_SRQ_LIMIT_SUPPORT) && srq->armed &&
778*4882a593Smuzhiyun srq->wq.in_use < srq->srq_limit)
779*4882a593Smuzhiyun c4iw_dispatch_srq_limit_reached_event(srq);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
782*4882a593Smuzhiyun CQE_QPID(&cqe),
783*4882a593Smuzhiyun CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
784*4882a593Smuzhiyun CQE_STATUS(&cqe), CQE_LEN(&cqe),
785*4882a593Smuzhiyun CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
786*4882a593Smuzhiyun (unsigned long long)cookie);
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun if (CQE_TYPE(&cqe) == 0) {
789*4882a593Smuzhiyun if (!CQE_STATUS(&cqe))
790*4882a593Smuzhiyun wc->byte_len = CQE_LEN(&cqe);
791*4882a593Smuzhiyun else
792*4882a593Smuzhiyun wc->byte_len = 0;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun switch (CQE_OPCODE(&cqe)) {
795*4882a593Smuzhiyun case FW_RI_SEND:
796*4882a593Smuzhiyun wc->opcode = IB_WC_RECV;
797*4882a593Smuzhiyun break;
798*4882a593Smuzhiyun case FW_RI_SEND_WITH_INV:
799*4882a593Smuzhiyun case FW_RI_SEND_WITH_SE_INV:
800*4882a593Smuzhiyun wc->opcode = IB_WC_RECV;
801*4882a593Smuzhiyun wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
802*4882a593Smuzhiyun wc->wc_flags |= IB_WC_WITH_INVALIDATE;
803*4882a593Smuzhiyun c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
804*4882a593Smuzhiyun break;
805*4882a593Smuzhiyun case FW_RI_WRITE_IMMEDIATE:
806*4882a593Smuzhiyun wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
807*4882a593Smuzhiyun wc->ex.imm_data = CQE_IMM_DATA(&cqe);
808*4882a593Smuzhiyun wc->wc_flags |= IB_WC_WITH_IMM;
809*4882a593Smuzhiyun break;
810*4882a593Smuzhiyun default:
811*4882a593Smuzhiyun pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
812*4882a593Smuzhiyun CQE_OPCODE(&cqe), CQE_QPID(&cqe));
813*4882a593Smuzhiyun ret = -EINVAL;
814*4882a593Smuzhiyun goto out;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun } else {
817*4882a593Smuzhiyun switch (CQE_OPCODE(&cqe)) {
818*4882a593Smuzhiyun case FW_RI_WRITE_IMMEDIATE:
819*4882a593Smuzhiyun case FW_RI_RDMA_WRITE:
820*4882a593Smuzhiyun wc->opcode = IB_WC_RDMA_WRITE;
821*4882a593Smuzhiyun break;
822*4882a593Smuzhiyun case FW_RI_READ_REQ:
823*4882a593Smuzhiyun wc->opcode = IB_WC_RDMA_READ;
824*4882a593Smuzhiyun wc->byte_len = CQE_LEN(&cqe);
825*4882a593Smuzhiyun break;
826*4882a593Smuzhiyun case FW_RI_SEND_WITH_INV:
827*4882a593Smuzhiyun case FW_RI_SEND_WITH_SE_INV:
828*4882a593Smuzhiyun wc->opcode = IB_WC_SEND;
829*4882a593Smuzhiyun wc->wc_flags |= IB_WC_WITH_INVALIDATE;
830*4882a593Smuzhiyun break;
831*4882a593Smuzhiyun case FW_RI_SEND:
832*4882a593Smuzhiyun case FW_RI_SEND_WITH_SE:
833*4882a593Smuzhiyun wc->opcode = IB_WC_SEND;
834*4882a593Smuzhiyun break;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun case FW_RI_LOCAL_INV:
837*4882a593Smuzhiyun wc->opcode = IB_WC_LOCAL_INV;
838*4882a593Smuzhiyun break;
839*4882a593Smuzhiyun case FW_RI_FAST_REGISTER:
840*4882a593Smuzhiyun wc->opcode = IB_WC_REG_MR;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun /* Invalidate the MR if the fastreg failed */
843*4882a593Smuzhiyun if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
844*4882a593Smuzhiyun c4iw_invalidate_mr(qhp->rhp,
845*4882a593Smuzhiyun CQE_WRID_FR_STAG(&cqe));
846*4882a593Smuzhiyun break;
847*4882a593Smuzhiyun default:
848*4882a593Smuzhiyun pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
849*4882a593Smuzhiyun CQE_OPCODE(&cqe), CQE_QPID(&cqe));
850*4882a593Smuzhiyun ret = -EINVAL;
851*4882a593Smuzhiyun goto out;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun if (cqe_flushed)
856*4882a593Smuzhiyun wc->status = IB_WC_WR_FLUSH_ERR;
857*4882a593Smuzhiyun else {
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun switch (CQE_STATUS(&cqe)) {
860*4882a593Smuzhiyun case T4_ERR_SUCCESS:
861*4882a593Smuzhiyun wc->status = IB_WC_SUCCESS;
862*4882a593Smuzhiyun break;
863*4882a593Smuzhiyun case T4_ERR_STAG:
864*4882a593Smuzhiyun wc->status = IB_WC_LOC_ACCESS_ERR;
865*4882a593Smuzhiyun break;
866*4882a593Smuzhiyun case T4_ERR_PDID:
867*4882a593Smuzhiyun wc->status = IB_WC_LOC_PROT_ERR;
868*4882a593Smuzhiyun break;
869*4882a593Smuzhiyun case T4_ERR_QPID:
870*4882a593Smuzhiyun case T4_ERR_ACCESS:
871*4882a593Smuzhiyun wc->status = IB_WC_LOC_ACCESS_ERR;
872*4882a593Smuzhiyun break;
873*4882a593Smuzhiyun case T4_ERR_WRAP:
874*4882a593Smuzhiyun wc->status = IB_WC_GENERAL_ERR;
875*4882a593Smuzhiyun break;
876*4882a593Smuzhiyun case T4_ERR_BOUND:
877*4882a593Smuzhiyun wc->status = IB_WC_LOC_LEN_ERR;
878*4882a593Smuzhiyun break;
879*4882a593Smuzhiyun case T4_ERR_INVALIDATE_SHARED_MR:
880*4882a593Smuzhiyun case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
881*4882a593Smuzhiyun wc->status = IB_WC_MW_BIND_ERR;
882*4882a593Smuzhiyun break;
883*4882a593Smuzhiyun case T4_ERR_CRC:
884*4882a593Smuzhiyun case T4_ERR_MARKER:
885*4882a593Smuzhiyun case T4_ERR_PDU_LEN_ERR:
886*4882a593Smuzhiyun case T4_ERR_OUT_OF_RQE:
887*4882a593Smuzhiyun case T4_ERR_DDP_VERSION:
888*4882a593Smuzhiyun case T4_ERR_RDMA_VERSION:
889*4882a593Smuzhiyun case T4_ERR_DDP_QUEUE_NUM:
890*4882a593Smuzhiyun case T4_ERR_MSN:
891*4882a593Smuzhiyun case T4_ERR_TBIT:
892*4882a593Smuzhiyun case T4_ERR_MO:
893*4882a593Smuzhiyun case T4_ERR_MSN_RANGE:
894*4882a593Smuzhiyun case T4_ERR_IRD_OVERFLOW:
895*4882a593Smuzhiyun case T4_ERR_OPCODE:
896*4882a593Smuzhiyun case T4_ERR_INTERNAL_ERR:
897*4882a593Smuzhiyun wc->status = IB_WC_FATAL_ERR;
898*4882a593Smuzhiyun break;
899*4882a593Smuzhiyun case T4_ERR_SWFLUSH:
900*4882a593Smuzhiyun wc->status = IB_WC_WR_FLUSH_ERR;
901*4882a593Smuzhiyun break;
902*4882a593Smuzhiyun default:
903*4882a593Smuzhiyun pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
904*4882a593Smuzhiyun CQE_STATUS(&cqe), CQE_QPID(&cqe));
905*4882a593Smuzhiyun wc->status = IB_WC_FATAL_ERR;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun out:
909*4882a593Smuzhiyun return ret;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun /*
913*4882a593Smuzhiyun * Get one cq entry from c4iw and map it to openib.
914*4882a593Smuzhiyun *
915*4882a593Smuzhiyun * Returns:
916*4882a593Smuzhiyun * 0 cqe returned
917*4882a593Smuzhiyun * -ENODATA EMPTY;
918*4882a593Smuzhiyun * -EAGAIN caller must try again
919*4882a593Smuzhiyun * any other -errno fatal error
920*4882a593Smuzhiyun */
c4iw_poll_cq_one(struct c4iw_cq * chp,struct ib_wc * wc)921*4882a593Smuzhiyun static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun struct c4iw_srq *srq = NULL;
924*4882a593Smuzhiyun struct c4iw_qp *qhp = NULL;
925*4882a593Smuzhiyun struct t4_cqe *rd_cqe;
926*4882a593Smuzhiyun int ret;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun ret = t4_next_cqe(&chp->cq, &rd_cqe);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (ret)
931*4882a593Smuzhiyun return ret;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
934*4882a593Smuzhiyun if (qhp) {
935*4882a593Smuzhiyun spin_lock(&qhp->lock);
936*4882a593Smuzhiyun srq = qhp->srq;
937*4882a593Smuzhiyun if (srq)
938*4882a593Smuzhiyun spin_lock(&srq->lock);
939*4882a593Smuzhiyun ret = __c4iw_poll_cq_one(chp, qhp, wc, srq);
940*4882a593Smuzhiyun spin_unlock(&qhp->lock);
941*4882a593Smuzhiyun if (srq)
942*4882a593Smuzhiyun spin_unlock(&srq->lock);
943*4882a593Smuzhiyun } else {
944*4882a593Smuzhiyun ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL);
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun return ret;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
c4iw_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)949*4882a593Smuzhiyun int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun struct c4iw_cq *chp;
952*4882a593Smuzhiyun unsigned long flags;
953*4882a593Smuzhiyun int npolled;
954*4882a593Smuzhiyun int err = 0;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun chp = to_c4iw_cq(ibcq);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun spin_lock_irqsave(&chp->lock, flags);
959*4882a593Smuzhiyun for (npolled = 0; npolled < num_entries; ++npolled) {
960*4882a593Smuzhiyun do {
961*4882a593Smuzhiyun err = c4iw_poll_cq_one(chp, wc + npolled);
962*4882a593Smuzhiyun } while (err == -EAGAIN);
963*4882a593Smuzhiyun if (err)
964*4882a593Smuzhiyun break;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun spin_unlock_irqrestore(&chp->lock, flags);
967*4882a593Smuzhiyun return !err || err == -ENODATA ? npolled : err;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
c4iw_destroy_cq(struct ib_cq * ib_cq,struct ib_udata * udata)970*4882a593Smuzhiyun int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun struct c4iw_cq *chp;
973*4882a593Smuzhiyun struct c4iw_ucontext *ucontext;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun pr_debug("ib_cq %p\n", ib_cq);
976*4882a593Smuzhiyun chp = to_c4iw_cq(ib_cq);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
979*4882a593Smuzhiyun atomic_dec(&chp->refcnt);
980*4882a593Smuzhiyun wait_event(chp->wait, !atomic_read(&chp->refcnt));
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
983*4882a593Smuzhiyun ibucontext);
984*4882a593Smuzhiyun destroy_cq(&chp->rhp->rdev, &chp->cq,
985*4882a593Smuzhiyun ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
986*4882a593Smuzhiyun chp->destroy_skb, chp->wr_waitp);
987*4882a593Smuzhiyun c4iw_put_wr_wait(chp->wr_waitp);
988*4882a593Smuzhiyun return 0;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
c4iw_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)991*4882a593Smuzhiyun int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
992*4882a593Smuzhiyun struct ib_udata *udata)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun struct ib_device *ibdev = ibcq->device;
995*4882a593Smuzhiyun int entries = attr->cqe;
996*4882a593Smuzhiyun int vector = attr->comp_vector;
997*4882a593Smuzhiyun struct c4iw_dev *rhp = to_c4iw_dev(ibcq->device);
998*4882a593Smuzhiyun struct c4iw_cq *chp = to_c4iw_cq(ibcq);
999*4882a593Smuzhiyun struct c4iw_create_cq ucmd;
1000*4882a593Smuzhiyun struct c4iw_create_cq_resp uresp;
1001*4882a593Smuzhiyun int ret, wr_len;
1002*4882a593Smuzhiyun size_t memsize, hwentries;
1003*4882a593Smuzhiyun struct c4iw_mm_entry *mm, *mm2;
1004*4882a593Smuzhiyun struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
1005*4882a593Smuzhiyun udata, struct c4iw_ucontext, ibucontext);
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun pr_debug("ib_dev %p entries %d\n", ibdev, entries);
1008*4882a593Smuzhiyun if (attr->flags)
1009*4882a593Smuzhiyun return -EINVAL;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun if (entries < 1 || entries > ibdev->attrs.max_cqe)
1012*4882a593Smuzhiyun return -EINVAL;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun if (vector >= rhp->rdev.lldi.nciq)
1015*4882a593Smuzhiyun return -EINVAL;
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun if (udata) {
1018*4882a593Smuzhiyun if (udata->inlen < sizeof(ucmd))
1019*4882a593Smuzhiyun ucontext->is_32b_cqe = 1;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
1023*4882a593Smuzhiyun if (!chp->wr_waitp) {
1024*4882a593Smuzhiyun ret = -ENOMEM;
1025*4882a593Smuzhiyun goto err_free_chp;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun c4iw_init_wr_wait(chp->wr_waitp);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
1030*4882a593Smuzhiyun chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
1031*4882a593Smuzhiyun if (!chp->destroy_skb) {
1032*4882a593Smuzhiyun ret = -ENOMEM;
1033*4882a593Smuzhiyun goto err_free_wr_wait;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /* account for the status page. */
1037*4882a593Smuzhiyun entries++;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun /* IQ needs one extra entry to differentiate full vs empty. */
1040*4882a593Smuzhiyun entries++;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun /*
1043*4882a593Smuzhiyun * entries must be multiple of 16 for HW.
1044*4882a593Smuzhiyun */
1045*4882a593Smuzhiyun entries = roundup(entries, 16);
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun /*
1048*4882a593Smuzhiyun * Make actual HW queue 2x to avoid cdix_inc overflows.
1049*4882a593Smuzhiyun */
1050*4882a593Smuzhiyun hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /*
1053*4882a593Smuzhiyun * Make HW queue at least 64 entries so GTS updates aren't too
1054*4882a593Smuzhiyun * frequent.
1055*4882a593Smuzhiyun */
1056*4882a593Smuzhiyun if (hwentries < 64)
1057*4882a593Smuzhiyun hwentries = 64;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
1060*4882a593Smuzhiyun (sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun /*
1063*4882a593Smuzhiyun * memsize must be a multiple of the page size if its a user cq.
1064*4882a593Smuzhiyun */
1065*4882a593Smuzhiyun if (udata)
1066*4882a593Smuzhiyun memsize = roundup(memsize, PAGE_SIZE);
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun chp->cq.size = hwentries;
1069*4882a593Smuzhiyun chp->cq.memsize = memsize;
1070*4882a593Smuzhiyun chp->cq.vector = vector;
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun ret = create_cq(&rhp->rdev, &chp->cq,
1073*4882a593Smuzhiyun ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1074*4882a593Smuzhiyun chp->wr_waitp);
1075*4882a593Smuzhiyun if (ret)
1076*4882a593Smuzhiyun goto err_free_skb;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun chp->rhp = rhp;
1079*4882a593Smuzhiyun chp->cq.size--; /* status page */
1080*4882a593Smuzhiyun chp->ibcq.cqe = entries - 2;
1081*4882a593Smuzhiyun spin_lock_init(&chp->lock);
1082*4882a593Smuzhiyun spin_lock_init(&chp->comp_handler_lock);
1083*4882a593Smuzhiyun atomic_set(&chp->refcnt, 1);
1084*4882a593Smuzhiyun init_waitqueue_head(&chp->wait);
1085*4882a593Smuzhiyun ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
1086*4882a593Smuzhiyun if (ret)
1087*4882a593Smuzhiyun goto err_destroy_cq;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun if (ucontext) {
1090*4882a593Smuzhiyun ret = -ENOMEM;
1091*4882a593Smuzhiyun mm = kmalloc(sizeof(*mm), GFP_KERNEL);
1092*4882a593Smuzhiyun if (!mm)
1093*4882a593Smuzhiyun goto err_remove_handle;
1094*4882a593Smuzhiyun mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL);
1095*4882a593Smuzhiyun if (!mm2)
1096*4882a593Smuzhiyun goto err_free_mm;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun memset(&uresp, 0, sizeof(uresp));
1099*4882a593Smuzhiyun uresp.qid_mask = rhp->rdev.cqmask;
1100*4882a593Smuzhiyun uresp.cqid = chp->cq.cqid;
1101*4882a593Smuzhiyun uresp.size = chp->cq.size;
1102*4882a593Smuzhiyun uresp.memsize = chp->cq.memsize;
1103*4882a593Smuzhiyun spin_lock(&ucontext->mmap_lock);
1104*4882a593Smuzhiyun uresp.key = ucontext->key;
1105*4882a593Smuzhiyun ucontext->key += PAGE_SIZE;
1106*4882a593Smuzhiyun uresp.gts_key = ucontext->key;
1107*4882a593Smuzhiyun ucontext->key += PAGE_SIZE;
1108*4882a593Smuzhiyun /* communicate to the userspace that
1109*4882a593Smuzhiyun * kernel driver supports 64B CQE
1110*4882a593Smuzhiyun */
1111*4882a593Smuzhiyun uresp.flags |= C4IW_64B_CQE;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun spin_unlock(&ucontext->mmap_lock);
1114*4882a593Smuzhiyun ret = ib_copy_to_udata(udata, &uresp,
1115*4882a593Smuzhiyun ucontext->is_32b_cqe ?
1116*4882a593Smuzhiyun sizeof(uresp) - sizeof(uresp.flags) :
1117*4882a593Smuzhiyun sizeof(uresp));
1118*4882a593Smuzhiyun if (ret)
1119*4882a593Smuzhiyun goto err_free_mm2;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun mm->key = uresp.key;
1122*4882a593Smuzhiyun mm->addr = virt_to_phys(chp->cq.queue);
1123*4882a593Smuzhiyun mm->len = chp->cq.memsize;
1124*4882a593Smuzhiyun insert_mmap(ucontext, mm);
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun mm2->key = uresp.gts_key;
1127*4882a593Smuzhiyun mm2->addr = chp->cq.bar2_pa;
1128*4882a593Smuzhiyun mm2->len = PAGE_SIZE;
1129*4882a593Smuzhiyun insert_mmap(ucontext, mm2);
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr %pad\n",
1133*4882a593Smuzhiyun chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
1134*4882a593Smuzhiyun &chp->cq.dma_addr);
1135*4882a593Smuzhiyun return 0;
1136*4882a593Smuzhiyun err_free_mm2:
1137*4882a593Smuzhiyun kfree(mm2);
1138*4882a593Smuzhiyun err_free_mm:
1139*4882a593Smuzhiyun kfree(mm);
1140*4882a593Smuzhiyun err_remove_handle:
1141*4882a593Smuzhiyun xa_erase_irq(&rhp->cqs, chp->cq.cqid);
1142*4882a593Smuzhiyun err_destroy_cq:
1143*4882a593Smuzhiyun destroy_cq(&chp->rhp->rdev, &chp->cq,
1144*4882a593Smuzhiyun ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
1145*4882a593Smuzhiyun chp->destroy_skb, chp->wr_waitp);
1146*4882a593Smuzhiyun err_free_skb:
1147*4882a593Smuzhiyun kfree_skb(chp->destroy_skb);
1148*4882a593Smuzhiyun err_free_wr_wait:
1149*4882a593Smuzhiyun c4iw_put_wr_wait(chp->wr_waitp);
1150*4882a593Smuzhiyun err_free_chp:
1151*4882a593Smuzhiyun return ret;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
c4iw_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)1154*4882a593Smuzhiyun int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun struct c4iw_cq *chp;
1157*4882a593Smuzhiyun int ret = 0;
1158*4882a593Smuzhiyun unsigned long flag;
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun chp = to_c4iw_cq(ibcq);
1161*4882a593Smuzhiyun spin_lock_irqsave(&chp->lock, flag);
1162*4882a593Smuzhiyun t4_arm_cq(&chp->cq,
1163*4882a593Smuzhiyun (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1164*4882a593Smuzhiyun if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1165*4882a593Smuzhiyun ret = t4_cq_notempty(&chp->cq);
1166*4882a593Smuzhiyun spin_unlock_irqrestore(&chp->lock, flag);
1167*4882a593Smuzhiyun return ret;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun
c4iw_flush_srqidx(struct c4iw_qp * qhp,u32 srqidx)1170*4882a593Smuzhiyun void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun struct c4iw_cq *rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1173*4882a593Smuzhiyun unsigned long flag;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun /* locking heirarchy: cq lock first, then qp lock. */
1176*4882a593Smuzhiyun spin_lock_irqsave(&rchp->lock, flag);
1177*4882a593Smuzhiyun spin_lock(&qhp->lock);
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun /* create a SRQ RECV CQE for srqidx */
1180*4882a593Smuzhiyun insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx);
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun spin_unlock(&qhp->lock);
1183*4882a593Smuzhiyun spin_unlock_irqrestore(&rchp->lock, flag);
1184*4882a593Smuzhiyun }
1185