Lines Matching +full:supports +full:- +full:cqe

2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
44 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
66 xa_lock_irq(&dev->qps); in alloc_ird()
67 if (ird <= dev->avail_ird) in alloc_ird()
68 dev->avail_ird -= ird; in alloc_ird()
70 ret = -ENOMEM; in alloc_ird()
71 xa_unlock_irq(&dev->qps); in alloc_ird()
74 dev_warn(&dev->rdev.lldi.pdev->dev, in alloc_ird()
82 xa_lock_irq(&dev->qps); in free_ird()
83 dev->avail_ird += ird; in free_ird()
84 xa_unlock_irq(&dev->qps); in free_ird()
90 spin_lock_irqsave(&qhp->lock, flag); in set_state()
91 qhp->attr.state = state; in set_state()
92 spin_unlock_irqrestore(&qhp->lock, flag); in set_state()
97 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); in dealloc_oc_sq()
102 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, in dealloc_host_sq()
116 if (!ocqp_support || !ocqp_supported(&rdev->lldi)) in alloc_oc_sq()
117 return -ENOSYS; in alloc_oc_sq()
118 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); in alloc_oc_sq()
119 if (!sq->dma_addr) in alloc_oc_sq()
120 return -ENOMEM; in alloc_oc_sq()
121 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr - in alloc_oc_sq()
122 rdev->lldi.vr->ocq.start; in alloc_oc_sq()
123 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr - in alloc_oc_sq()
124 rdev->lldi.vr->ocq.start); in alloc_oc_sq()
125 sq->flags |= T4_SQ_ONCHIP; in alloc_oc_sq()
131 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize, in alloc_host_sq()
132 &(sq->dma_addr), GFP_KERNEL); in alloc_host_sq()
133 if (!sq->queue) in alloc_host_sq()
134 return -ENOMEM; in alloc_host_sq()
135 sq->phys_addr = virt_to_phys(sq->queue); in alloc_host_sq()
136 dma_unmap_addr_set(sq, mapping, sq->dma_addr); in alloc_host_sq()
142 int ret = -ENOSYS; in alloc_sq()
157 dealloc_sq(rdev, &wq->sq); in destroy_qp()
158 kfree(wq->sq.sw_sq); in destroy_qp()
159 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in destroy_qp()
162 dma_free_coherent(&rdev->lldi.pdev->dev, in destroy_qp()
163 wq->rq.memsize, wq->rq.queue, in destroy_qp()
164 dma_unmap_addr(&wq->rq, mapping)); in destroy_qp()
165 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in destroy_qp()
166 kfree(wq->rq.sw_rq); in destroy_qp()
167 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp()
174 * then this is a user mapping so compute the page-aligned physical address
184 ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype, in c4iw_bar2_addrs()
191 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; in c4iw_bar2_addrs()
193 if (is_t4(rdev->lldi.adapter_type)) in c4iw_bar2_addrs()
196 return rdev->bar2_kva + bar2_qoffset; in c4iw_bar2_addrs()
205 int user = (uctx != &rdev->uctx); in create_qp()
213 wq->sq.qid = c4iw_get_qpid(rdev, uctx); in create_qp()
214 if (!wq->sq.qid) in create_qp()
215 return -ENOMEM; in create_qp()
218 wq->rq.qid = c4iw_get_qpid(rdev, uctx); in create_qp()
219 if (!wq->rq.qid) { in create_qp()
220 ret = -ENOMEM; in create_qp()
226 wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq), in create_qp()
228 if (!wq->sq.sw_sq) { in create_qp()
229 ret = -ENOMEM; in create_qp()
234 wq->rq.sw_rq = kcalloc(wq->rq.size, in create_qp()
235 sizeof(*wq->rq.sw_rq), in create_qp()
237 if (!wq->rq.sw_rq) { in create_qp()
238 ret = -ENOMEM; in create_qp()
248 wq->rq.rqt_size = in create_qp()
249 roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); in create_qp()
250 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); in create_qp()
251 if (!wq->rq.rqt_hwaddr) { in create_qp()
252 ret = -ENOMEM; in create_qp()
257 ret = alloc_sq(rdev, &wq->sq, user); in create_qp()
260 memset(wq->sq.queue, 0, wq->sq.memsize); in create_qp()
261 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); in create_qp()
264 wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, in create_qp()
265 wq->rq.memsize, in create_qp()
266 &wq->rq.dma_addr, in create_qp()
268 if (!wq->rq.queue) { in create_qp()
269 ret = -ENOMEM; in create_qp()
273 wq->sq.queue, in create_qp()
274 (unsigned long long)virt_to_phys(wq->sq.queue), in create_qp()
275 wq->rq.queue, in create_qp()
276 (unsigned long long)virt_to_phys(wq->rq.queue)); in create_qp()
277 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); in create_qp()
280 wq->db = rdev->lldi.db_reg; in create_qp()
282 wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, in create_qp()
284 &wq->sq.bar2_qid, in create_qp()
285 user ? &wq->sq.bar2_pa : NULL); in create_qp()
287 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, in create_qp()
289 &wq->rq.bar2_qid, in create_qp()
290 user ? &wq->rq.bar2_pa : NULL); in create_qp()
295 if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) { in create_qp()
297 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); in create_qp()
298 ret = -EINVAL; in create_qp()
302 wq->rdev = rdev; in create_qp()
303 wq->rq.msn = 1; in create_qp()
311 ret = -ENOMEM; in create_qp()
317 res_wr->op_nres = cpu_to_be32( in create_qp()
321 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); in create_qp()
322 res_wr->cookie = (uintptr_t)wr_waitp; in create_qp()
323 res = res_wr->res; in create_qp()
324 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; in create_qp()
325 res->u.sqrq.op = FW_RI_RES_OP_WRITE; in create_qp()
330 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + in create_qp()
331 rdev->hw_queue.t4_eq_status_entries; in create_qp()
333 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( in create_qp()
337 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) | in create_qp()
338 FW_RI_RES_WR_IQID_V(scq->cqid)); in create_qp()
339 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( in create_qp()
343 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) : in create_qp()
348 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); in create_qp()
349 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); in create_qp()
353 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; in create_qp()
354 res->u.sqrq.op = FW_RI_RES_OP_WRITE; in create_qp()
359 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + in create_qp()
360 rdev->hw_queue.t4_eq_status_entries; in create_qp()
361 res->u.sqrq.fetchszm_to_iqid = in create_qp()
368 FW_RI_RES_WR_IQID_V(rcq->cqid)); in create_qp()
369 res->u.sqrq.dcaen_to_eqsize = in create_qp()
377 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); in create_qp()
378 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); in create_qp()
382 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__); in create_qp()
387 wq->sq.qid, wq->rq.qid, wq->db, in create_qp()
388 wq->sq.bar2_va, wq->rq.bar2_va); in create_qp()
393 dma_free_coherent(&rdev->lldi.pdev->dev, in create_qp()
394 wq->rq.memsize, wq->rq.queue, in create_qp()
395 dma_unmap_addr(&wq->rq, mapping)); in create_qp()
397 dealloc_sq(rdev, &wq->sq); in create_qp()
400 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in create_qp()
403 kfree(wq->rq.sw_rq); in create_qp()
405 kfree(wq->sq.sw_sq); in create_qp()
408 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in create_qp()
410 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in create_qp()
422 dstp = (u8 *)immdp->data; in build_immd()
423 for (i = 0; i < wr->num_sge; i++) { in build_immd()
424 if ((plen + wr->sg_list[i].length) > max) in build_immd()
425 return -EMSGSIZE; in build_immd()
426 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; in build_immd()
427 plen += wr->sg_list[i].length; in build_immd()
428 rem = wr->sg_list[i].length; in build_immd()
430 if (dstp == (u8 *)&sq->queue[sq->size]) in build_immd()
431 dstp = (u8 *)sq->queue; in build_immd()
432 if (rem <= (u8 *)&sq->queue[sq->size] - dstp) in build_immd()
435 len = (u8 *)&sq->queue[sq->size] - dstp; in build_immd()
439 rem -= len; in build_immd()
442 len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp)); in build_immd()
445 immdp->op = FW_RI_DATA_IMMD; in build_immd()
446 immdp->r1 = 0; in build_immd()
447 immdp->r2 = 0; in build_immd()
448 immdp->immdlen = cpu_to_be32(plen); in build_immd()
465 flitp = (__be64 *)isglp->sge; in build_isgl()
469 return -EMSGSIZE; in build_isgl()
480 isglp->op = FW_RI_DATA_ISGL; in build_isgl()
481 isglp->r1 = 0; in build_isgl()
482 isglp->nsge = cpu_to_be16(num_sge); in build_isgl()
483 isglp->r2 = 0; in build_isgl()
496 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_send()
497 return -EINVAL; in build_rdma_send()
498 switch (wr->opcode) { in build_rdma_send()
500 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
501 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
504 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
506 wqe->send.stag_inv = 0; in build_rdma_send()
509 if (wr->send_flags & IB_SEND_SOLICITED) in build_rdma_send()
510 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
513 wqe->send.sendop_pkd = cpu_to_be32( in build_rdma_send()
515 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_rdma_send()
519 return -EINVAL; in build_rdma_send()
521 wqe->send.r3 = 0; in build_rdma_send()
522 wqe->send.r4 = 0; in build_rdma_send()
525 if (wr->num_sge) { in build_rdma_send()
526 if (wr->send_flags & IB_SEND_INLINE) { in build_rdma_send()
527 ret = build_immd(sq, wqe->send.u.immd_src, wr, in build_rdma_send()
531 size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) + in build_rdma_send()
534 ret = build_isgl((__be64 *)sq->queue, in build_rdma_send()
535 (__be64 *)&sq->queue[sq->size], in build_rdma_send()
536 wqe->send.u.isgl_src, in build_rdma_send()
537 wr->sg_list, wr->num_sge, &plen); in build_rdma_send()
540 size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) + in build_rdma_send()
541 wr->num_sge * sizeof(struct fw_ri_sge); in build_rdma_send()
544 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; in build_rdma_send()
545 wqe->send.u.immd_src[0].r1 = 0; in build_rdma_send()
546 wqe->send.u.immd_src[0].r2 = 0; in build_rdma_send()
547 wqe->send.u.immd_src[0].immdlen = 0; in build_rdma_send()
548 size = sizeof(wqe->send) + sizeof(struct fw_ri_immd); in build_rdma_send()
552 wqe->send.plen = cpu_to_be32(plen); in build_rdma_send()
563 if (wr->num_sge > T4_MAX_SEND_SGE) in build_rdma_write()
564 return -EINVAL; in build_rdma_write()
567 * iWARP protocol supports 64 bit immediate data but rdma api in build_rdma_write()
570 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) in build_rdma_write()
571 wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data; in build_rdma_write()
573 wqe->write.iw_imm_data.ib_imm_data.imm_data32 = 0; in build_rdma_write()
574 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); in build_rdma_write()
575 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); in build_rdma_write()
576 if (wr->num_sge) { in build_rdma_write()
577 if (wr->send_flags & IB_SEND_INLINE) { in build_rdma_write()
578 ret = build_immd(sq, wqe->write.u.immd_src, wr, in build_rdma_write()
582 size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) + in build_rdma_write()
585 ret = build_isgl((__be64 *)sq->queue, in build_rdma_write()
586 (__be64 *)&sq->queue[sq->size], in build_rdma_write()
587 wqe->write.u.isgl_src, in build_rdma_write()
588 wr->sg_list, wr->num_sge, &plen); in build_rdma_write()
591 size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) + in build_rdma_write()
592 wr->num_sge * sizeof(struct fw_ri_sge); in build_rdma_write()
595 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; in build_rdma_write()
596 wqe->write.u.immd_src[0].r1 = 0; in build_rdma_write()
597 wqe->write.u.immd_src[0].r2 = 0; in build_rdma_write()
598 wqe->write.u.immd_src[0].immdlen = 0; in build_rdma_write()
599 size = sizeof(wqe->write) + sizeof(struct fw_ri_immd); in build_rdma_write()
603 wqe->write.plen = cpu_to_be32(plen); in build_rdma_write()
610 memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16); in build_immd_cmpl()
611 memset(immdp->r1, 0, 6); in build_immd_cmpl()
612 immdp->op = FW_RI_DATA_IMMD; in build_immd_cmpl()
613 immdp->immdlen = 16; in build_immd_cmpl()
634 wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey); in build_rdma_write_cmpl()
635 wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr); in build_rdma_write_cmpl()
636 if (wr->next->opcode == IB_WR_SEND) in build_rdma_write_cmpl()
637 wcwr->stag_inv = 0; in build_rdma_write_cmpl()
639 wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey); in build_rdma_write_cmpl()
640 wcwr->r2 = 0; in build_rdma_write_cmpl()
641 wcwr->r3 = 0; in build_rdma_write_cmpl()
644 if (wr->next->send_flags & IB_SEND_INLINE) in build_rdma_write_cmpl()
645 build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next); in build_rdma_write_cmpl()
647 build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], in build_rdma_write_cmpl()
648 &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL); in build_rdma_write_cmpl()
651 build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], in build_rdma_write_cmpl()
652 wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen); in build_rdma_write_cmpl()
655 wr->num_sge * sizeof(struct fw_ri_sge); in build_rdma_write_cmpl()
656 wcwr->plen = cpu_to_be32(plen); in build_rdma_write_cmpl()
663 if (wr->num_sge > 1) in build_rdma_read()
664 return -EINVAL; in build_rdma_read()
665 if (wr->num_sge && wr->sg_list[0].length) { in build_rdma_read()
666 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); in build_rdma_read()
667 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr in build_rdma_read()
669 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr); in build_rdma_read()
670 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); in build_rdma_read()
671 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); in build_rdma_read()
672 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr in build_rdma_read()
674 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); in build_rdma_read()
676 wqe->read.stag_src = cpu_to_be32(2); in build_rdma_read()
677 wqe->read.to_src_hi = 0; in build_rdma_read()
678 wqe->read.to_src_lo = 0; in build_rdma_read()
679 wqe->read.stag_sink = cpu_to_be32(2); in build_rdma_read()
680 wqe->read.plen = 0; in build_rdma_read()
681 wqe->read.to_sink_hi = 0; in build_rdma_read()
682 wqe->read.to_sink_lo = 0; in build_rdma_read()
684 wqe->read.r2 = 0; in build_rdma_read()
685 wqe->read.r5 = 0; in build_rdma_read()
686 *len16 = DIV_ROUND_UP(sizeof(wqe->read), 16); in build_rdma_read()
692 bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) || in post_write_cmpl()
693 qhp->sq_sig_all; in post_write_cmpl()
694 bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) || in post_write_cmpl()
695 qhp->sq_sig_all; in post_write_cmpl()
704 * 2 slots. The FW WR, however, will be a single uber-WR. in post_write_cmpl()
706 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + in post_write_cmpl()
707 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); in post_write_cmpl()
708 build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16); in post_write_cmpl()
711 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; in post_write_cmpl()
712 swsqe->opcode = FW_RI_RDMA_WRITE; in post_write_cmpl()
713 swsqe->idx = qhp->wq.sq.pidx; in post_write_cmpl()
714 swsqe->complete = 0; in post_write_cmpl()
715 swsqe->signaled = write_signaled; in post_write_cmpl()
716 swsqe->flushed = 0; in post_write_cmpl()
717 swsqe->wr_id = wr->wr_id; in post_write_cmpl()
719 swsqe->sge_ts = in post_write_cmpl()
720 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); in post_write_cmpl()
721 swsqe->host_time = ktime_get(); in post_write_cmpl()
724 write_wrid = qhp->wq.sq.pidx; in post_write_cmpl()
727 qhp->wq.sq.in_use++; in post_write_cmpl()
728 if (++qhp->wq.sq.pidx == qhp->wq.sq.size) in post_write_cmpl()
729 qhp->wq.sq.pidx = 0; in post_write_cmpl()
732 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; in post_write_cmpl()
733 if (wr->next->opcode == IB_WR_SEND) in post_write_cmpl()
734 swsqe->opcode = FW_RI_SEND; in post_write_cmpl()
736 swsqe->opcode = FW_RI_SEND_WITH_INV; in post_write_cmpl()
737 swsqe->idx = qhp->wq.sq.pidx; in post_write_cmpl()
738 swsqe->complete = 0; in post_write_cmpl()
739 swsqe->signaled = send_signaled; in post_write_cmpl()
740 swsqe->flushed = 0; in post_write_cmpl()
741 swsqe->wr_id = wr->next->wr_id; in post_write_cmpl()
743 swsqe->sge_ts = in post_write_cmpl()
744 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); in post_write_cmpl()
745 swsqe->host_time = ktime_get(); in post_write_cmpl()
748 wqe->write_cmpl.flags_send = send_signaled ? FW_RI_COMPLETION_FLAG : 0; in post_write_cmpl()
749 wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx; in post_write_cmpl()
753 t4_sq_produce(&qhp->wq, len16); in post_write_cmpl()
756 t4_ring_sq_db(&qhp->wq, idx, wqe); in post_write_cmpl()
764 ret = build_isgl((__be64 *)qhp->wq.rq.queue, in build_rdma_recv()
765 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], in build_rdma_recv()
766 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); in build_rdma_recv()
770 sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16); in build_rdma_recv()
780 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); in build_srq_recv()
783 *len16 = DIV_ROUND_UP(sizeof(wqe->recv) + in build_srq_recv()
784 wr->num_sge * sizeof(struct fw_ri_sge), 16); in build_srq_recv()
792 __be64 *p = (__be64 *)fr->pbl; in build_tpte_memreg()
794 fr->r2 = cpu_to_be32(0); in build_tpte_memreg()
795 fr->stag = cpu_to_be32(mhp->ibmr.rkey); in build_tpte_memreg()
797 fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F | in build_tpte_memreg()
798 FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) | in build_tpte_memreg()
801 FW_RI_TPTE_PDID_V(mhp->attr.pdid)); in build_tpte_memreg()
802 fr->tpte.locread_to_qpid = cpu_to_be32( in build_tpte_memreg()
803 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) | in build_tpte_memreg()
805 FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12)); in build_tpte_memreg()
806 fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V( in build_tpte_memreg()
807 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3)); in build_tpte_memreg()
808 fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0); in build_tpte_memreg()
809 fr->tpte.len_hi = cpu_to_be32(0); in build_tpte_memreg()
810 fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length); in build_tpte_memreg()
811 fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32); in build_tpte_memreg()
812 fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff); in build_tpte_memreg()
814 p[0] = cpu_to_be64((u64)mhp->mpl[0]); in build_tpte_memreg()
815 p[1] = cpu_to_be64((u64)mhp->mpl[1]); in build_tpte_memreg()
827 int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32); in build_memreg()
830 if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl)) in build_memreg()
831 return -EINVAL; in build_memreg()
833 wqe->fr.qpbinde_to_dcacpu = 0; in build_memreg()
834 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12; in build_memreg()
835 wqe->fr.addr_type = FW_RI_VA_BASED_TO; in build_memreg()
836 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access); in build_memreg()
837 wqe->fr.len_hi = 0; in build_memreg()
838 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length); in build_memreg()
839 wqe->fr.stag = cpu_to_be32(wr->key); in build_memreg()
840 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32); in build_memreg()
841 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & in build_memreg()
847 for (i = 0; i < mhp->mpl_len; i++) in build_memreg()
848 mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]); in build_memreg()
850 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); in build_memreg()
851 sglp->op = FW_RI_DATA_DSGL; in build_memreg()
852 sglp->r1 = 0; in build_memreg()
853 sglp->nsge = cpu_to_be16(1); in build_memreg()
854 sglp->addr0 = cpu_to_be64(mhp->mpl_addr); in build_memreg()
855 sglp->len0 = cpu_to_be32(pbllen); in build_memreg()
857 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16); in build_memreg()
859 imdp = (struct fw_ri_immd *)(&wqe->fr + 1); in build_memreg()
860 imdp->op = FW_RI_DATA_IMMD; in build_memreg()
861 imdp->r1 = 0; in build_memreg()
862 imdp->r2 = 0; in build_memreg()
863 imdp->immdlen = cpu_to_be32(pbllen); in build_memreg()
866 for (i = 0; i < mhp->mpl_len; i++) { in build_memreg()
867 *p = cpu_to_be64((u64)mhp->mpl[i]); in build_memreg()
868 rem -= sizeof(*p); in build_memreg()
869 if (++p == (__be64 *)&sq->queue[sq->size]) in build_memreg()
870 p = (__be64 *)sq->queue; in build_memreg()
874 rem -= sizeof(*p); in build_memreg()
875 if (++p == (__be64 *)&sq->queue[sq->size]) in build_memreg()
876 p = (__be64 *)sq->queue; in build_memreg()
878 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp) in build_memreg()
887 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); in build_inv_stag()
888 wqe->inv.r2 = 0; in build_inv_stag()
889 *len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16); in build_inv_stag()
896 refcount_inc(&to_c4iw_qp(qp)->qp_refcnt); in c4iw_qp_add_ref()
902 if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt)) in c4iw_qp_rem_ref()
903 complete(&to_c4iw_qp(qp)->qp_rel_comp); in c4iw_qp_rem_ref()
916 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
917 spin_lock(&qhp->lock); in ring_kernel_sq_db()
918 if (qhp->rhp->db_state == NORMAL) in ring_kernel_sq_db()
919 t4_ring_sq_db(&qhp->wq, inc, NULL); in ring_kernel_sq_db()
921 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_sq_db()
922 qhp->wq.sq.wq_pidx_inc += inc; in ring_kernel_sq_db()
924 spin_unlock(&qhp->lock); in ring_kernel_sq_db()
925 xa_unlock_irqrestore(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
933 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_rq_db()
934 spin_lock(&qhp->lock); in ring_kernel_rq_db()
935 if (qhp->rhp->db_state == NORMAL) in ring_kernel_rq_db()
936 t4_ring_rq_db(&qhp->wq, inc, NULL); in ring_kernel_rq_db()
938 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_rq_db()
939 qhp->wq.rq.wq_pidx_inc += inc; in ring_kernel_rq_db()
941 spin_unlock(&qhp->lock); in ring_kernel_rq_db()
942 xa_unlock_irqrestore(&qhp->rhp->qps, flags); in ring_kernel_rq_db()
974 opcode = -EINVAL; in ib_to_fw_opcode()
982 struct t4_cqe cqe = {}; in complete_sq_drain_wr() local
988 schp = to_c4iw_cq(qhp->ibqp.send_cq); in complete_sq_drain_wr()
989 cq = &schp->cq; in complete_sq_drain_wr()
991 opcode = ib_to_fw_opcode(wr->opcode); in complete_sq_drain_wr()
995 cqe.u.drain_cookie = wr->wr_id; in complete_sq_drain_wr()
996 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in complete_sq_drain_wr()
1001 CQE_QPID_V(qhp->wq.sq.qid)); in complete_sq_drain_wr()
1003 spin_lock_irqsave(&schp->lock, flag); in complete_sq_drain_wr()
1004 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in complete_sq_drain_wr()
1005 cq->sw_queue[cq->sw_pidx] = cqe; in complete_sq_drain_wr()
1007 spin_unlock_irqrestore(&schp->lock, flag); in complete_sq_drain_wr()
1009 if (t4_clear_cq_armed(&schp->cq)) { in complete_sq_drain_wr()
1010 spin_lock_irqsave(&schp->comp_handler_lock, flag); in complete_sq_drain_wr()
1011 (*schp->ibcq.comp_handler)(&schp->ibcq, in complete_sq_drain_wr()
1012 schp->ibcq.cq_context); in complete_sq_drain_wr()
1013 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); in complete_sq_drain_wr()
1030 wr = wr->next; in complete_sq_drain_wrs()
1038 struct t4_cqe cqe = {}; in complete_rq_drain_wr() local
1043 rchp = to_c4iw_cq(qhp->ibqp.recv_cq); in complete_rq_drain_wr()
1044 cq = &rchp->cq; in complete_rq_drain_wr()
1046 cqe.u.drain_cookie = wr->wr_id; in complete_rq_drain_wr()
1047 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in complete_rq_drain_wr()
1052 CQE_QPID_V(qhp->wq.sq.qid)); in complete_rq_drain_wr()
1054 spin_lock_irqsave(&rchp->lock, flag); in complete_rq_drain_wr()
1055 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in complete_rq_drain_wr()
1056 cq->sw_queue[cq->sw_pidx] = cqe; in complete_rq_drain_wr()
1058 spin_unlock_irqrestore(&rchp->lock, flag); in complete_rq_drain_wr()
1060 if (t4_clear_cq_armed(&rchp->cq)) { in complete_rq_drain_wr()
1061 spin_lock_irqsave(&rchp->comp_handler_lock, flag); in complete_rq_drain_wr()
1062 (*rchp->ibcq.comp_handler)(&rchp->ibcq, in complete_rq_drain_wr()
1063 rchp->ibcq.cq_context); in complete_rq_drain_wr()
1064 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); in complete_rq_drain_wr()
1073 wr = wr->next; in complete_rq_drain_wrs()
1093 rhp = qhp->rhp; in c4iw_post_send()
1094 spin_lock_irqsave(&qhp->lock, flag); in c4iw_post_send()
1098 * drain cqe. in c4iw_post_send()
1100 if (qhp->wq.flushed) { in c4iw_post_send()
1101 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
1105 num_wrs = t4_sq_avail(&qhp->wq); in c4iw_post_send()
1107 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
1109 return -ENOMEM; in c4iw_post_send()
1113 * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is in c4iw_post_send()
1114 * the response for small NVMEe-oF READ requests. If the chain is in c4iw_post_send()
1115 * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths in c4iw_post_send()
1121 if (qhp->rhp->rdev.lldi.write_cmpl_support && in c4iw_post_send()
1122 CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >= in c4iw_post_send()
1124 wr && wr->next && !wr->next->next && in c4iw_post_send()
1125 wr->opcode == IB_WR_RDMA_WRITE && in c4iw_post_send()
1126 wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL && in c4iw_post_send()
1127 (wr->next->opcode == IB_WR_SEND || in c4iw_post_send()
1128 wr->next->opcode == IB_WR_SEND_WITH_INV) && in c4iw_post_send()
1129 wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE && in c4iw_post_send()
1130 wr->next->num_sge == 1 && num_wrs >= 2) { in c4iw_post_send()
1132 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
1138 err = -ENOMEM; in c4iw_post_send()
1142 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + in c4iw_post_send()
1143 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); in c4iw_post_send()
1146 if (wr->send_flags & IB_SEND_SOLICITED) in c4iw_post_send()
1148 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) in c4iw_post_send()
1150 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; in c4iw_post_send()
1151 switch (wr->opcode) { in c4iw_post_send()
1154 if (wr->send_flags & IB_SEND_FENCE) in c4iw_post_send()
1157 if (wr->opcode == IB_WR_SEND) in c4iw_post_send()
1158 swsqe->opcode = FW_RI_SEND; in c4iw_post_send()
1160 swsqe->opcode = FW_RI_SEND_WITH_INV; in c4iw_post_send()
1161 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
1164 if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) { in c4iw_post_send()
1165 err = -EINVAL; in c4iw_post_send()
1172 swsqe->opcode = FW_RI_RDMA_WRITE; in c4iw_post_send()
1173 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); in c4iw_post_send()
1178 swsqe->opcode = FW_RI_READ_REQ; in c4iw_post_send()
1179 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) { in c4iw_post_send()
1180 c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey); in c4iw_post_send()
1188 swsqe->read_len = wr->sg_list[0].length; in c4iw_post_send()
1189 if (!qhp->wq.sq.oldest_read) in c4iw_post_send()
1190 qhp->wq.sq.oldest_read = swsqe; in c4iw_post_send()
1193 struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr); in c4iw_post_send()
1195 swsqe->opcode = FW_RI_FAST_REGISTER; in c4iw_post_send()
1196 if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support && in c4iw_post_send()
1197 !mhp->attr.state && mhp->mpl_len <= 2) { in c4iw_post_send()
1199 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr), in c4iw_post_send()
1203 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), in c4iw_post_send()
1205 rhp->rdev.lldi.ulptx_memwrite_dsgl); in c4iw_post_send()
1209 mhp->attr.state = 1; in c4iw_post_send()
1213 if (wr->send_flags & IB_SEND_FENCE) in c4iw_post_send()
1216 swsqe->opcode = FW_RI_LOCAL_INV; in c4iw_post_send()
1218 c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey); in c4iw_post_send()
1222 wr->opcode); in c4iw_post_send()
1223 err = -EINVAL; in c4iw_post_send()
1229 swsqe->idx = qhp->wq.sq.pidx; in c4iw_post_send()
1230 swsqe->complete = 0; in c4iw_post_send()
1231 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || in c4iw_post_send()
1232 qhp->sq_sig_all; in c4iw_post_send()
1233 swsqe->flushed = 0; in c4iw_post_send()
1234 swsqe->wr_id = wr->wr_id; in c4iw_post_send()
1236 swsqe->sge_ts = cxgb4_read_sge_timestamp( in c4iw_post_send()
1237 rhp->rdev.lldi.ports[0]); in c4iw_post_send()
1238 swsqe->host_time = ktime_get(); in c4iw_post_send()
1241 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); in c4iw_post_send()
1244 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, in c4iw_post_send()
1245 swsqe->opcode, swsqe->read_len); in c4iw_post_send()
1246 wr = wr->next; in c4iw_post_send()
1247 num_wrs--; in c4iw_post_send()
1248 t4_sq_produce(&qhp->wq, len16); in c4iw_post_send()
1251 if (!rhp->rdev.status_page->db_off) { in c4iw_post_send()
1252 t4_ring_sq_db(&qhp->wq, idx, wqe); in c4iw_post_send()
1253 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
1255 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_send()
1273 spin_lock_irqsave(&qhp->lock, flag); in c4iw_post_receive()
1277 * drain cqe. in c4iw_post_receive()
1279 if (qhp->wq.flushed) { in c4iw_post_receive()
1280 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
1284 num_wrs = t4_rq_avail(&qhp->wq); in c4iw_post_receive()
1286 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
1288 return -ENOMEM; in c4iw_post_receive()
1291 if (wr->num_sge > T4_MAX_RECV_SGE) { in c4iw_post_receive()
1292 err = -EINVAL; in c4iw_post_receive()
1296 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + in c4iw_post_receive()
1297 qhp->wq.rq.wq_pidx * in c4iw_post_receive()
1302 err = -ENOMEM; in c4iw_post_receive()
1308 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; in c4iw_post_receive()
1310 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = in c4iw_post_receive()
1312 qhp->rhp->rdev.lldi.ports[0]); in c4iw_post_receive()
1313 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time = in c4iw_post_receive()
1317 wqe->recv.opcode = FW_RI_RECV_WR; in c4iw_post_receive()
1318 wqe->recv.r1 = 0; in c4iw_post_receive()
1319 wqe->recv.wrid = qhp->wq.rq.pidx; in c4iw_post_receive()
1320 wqe->recv.r2[0] = 0; in c4iw_post_receive()
1321 wqe->recv.r2[1] = 0; in c4iw_post_receive()
1322 wqe->recv.r2[2] = 0; in c4iw_post_receive()
1323 wqe->recv.len16 = len16; in c4iw_post_receive()
1325 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx); in c4iw_post_receive()
1326 t4_rq_produce(&qhp->wq, len16); in c4iw_post_receive()
1328 wr = wr->next; in c4iw_post_receive()
1329 num_wrs--; in c4iw_post_receive()
1331 if (!qhp->rhp->rdev.status_page->db_off) { in c4iw_post_receive()
1332 t4_ring_rq_db(&qhp->wq, idx, wqe); in c4iw_post_receive()
1333 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
1335 spin_unlock_irqrestore(&qhp->lock, flag); in c4iw_post_receive()
1344 struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx]; in defer_srq_wr()
1347 __func__, srq->cidx, srq->pidx, srq->wq_pidx, in defer_srq_wr()
1348 srq->in_use, srq->ooo_count, in defer_srq_wr()
1349 (unsigned long long)wr_id, srq->pending_cidx, in defer_srq_wr()
1350 srq->pending_pidx, srq->pending_in_use); in defer_srq_wr()
1351 pwr->wr_id = wr_id; in defer_srq_wr()
1352 pwr->len16 = len16; in defer_srq_wr()
1353 memcpy(&pwr->wqe, wqe, len16 * 16); in defer_srq_wr()
1369 spin_lock_irqsave(&srq->lock, flag); in c4iw_post_srq_recv()
1370 num_wrs = t4_srq_avail(&srq->wq); in c4iw_post_srq_recv()
1372 spin_unlock_irqrestore(&srq->lock, flag); in c4iw_post_srq_recv()
1373 return -ENOMEM; in c4iw_post_srq_recv()
1376 if (wr->num_sge > T4_MAX_RECV_SGE) { in c4iw_post_srq_recv()
1377 err = -EINVAL; in c4iw_post_srq_recv()
1385 err = -ENOMEM; in c4iw_post_srq_recv()
1391 wqe->recv.opcode = FW_RI_RECV_WR; in c4iw_post_srq_recv()
1392 wqe->recv.r1 = 0; in c4iw_post_srq_recv()
1393 wqe->recv.wrid = srq->wq.pidx; in c4iw_post_srq_recv()
1394 wqe->recv.r2[0] = 0; in c4iw_post_srq_recv()
1395 wqe->recv.r2[1] = 0; in c4iw_post_srq_recv()
1396 wqe->recv.r2[2] = 0; in c4iw_post_srq_recv()
1397 wqe->recv.len16 = len16; in c4iw_post_srq_recv()
1399 if (srq->wq.ooo_count || in c4iw_post_srq_recv()
1400 srq->wq.pending_in_use || in c4iw_post_srq_recv()
1401 srq->wq.sw_rq[srq->wq.pidx].valid) { in c4iw_post_srq_recv()
1402 defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16); in c4iw_post_srq_recv()
1404 srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id; in c4iw_post_srq_recv()
1405 srq->wq.sw_rq[srq->wq.pidx].valid = 1; in c4iw_post_srq_recv()
1406 c4iw_copy_wr_to_srq(&srq->wq, wqe, len16); in c4iw_post_srq_recv()
1408 __func__, srq->wq.cidx, in c4iw_post_srq_recv()
1409 srq->wq.pidx, srq->wq.wq_pidx, in c4iw_post_srq_recv()
1410 srq->wq.in_use, in c4iw_post_srq_recv()
1411 (unsigned long long)wr->wr_id); in c4iw_post_srq_recv()
1412 t4_srq_produce(&srq->wq, len16); in c4iw_post_srq_recv()
1415 wr = wr->next; in c4iw_post_srq_recv()
1416 num_wrs--; in c4iw_post_srq_recv()
1419 t4_ring_srq_db(&srq->wq, idx, len16, wqe); in c4iw_post_srq_recv()
1420 spin_unlock_irqrestore(&srq->lock, flag); in c4iw_post_srq_recv()
1567 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, in post_terminate()
1568 qhp->ep->hwtid); in post_terminate()
1570 skb = skb_dequeue(&qhp->ep->com.ep_skb_list); in post_terminate()
1574 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); in post_terminate()
1577 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR)); in post_terminate()
1578 wqe->flowid_len16 = cpu_to_be32( in post_terminate()
1579 FW_WR_FLOWID_V(qhp->ep->hwtid) | in post_terminate()
1582 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; in post_terminate()
1583 wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term)); in post_terminate()
1584 term = (struct terminate_message *)wqe->u.terminate.termmsg; in post_terminate()
1585 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) { in post_terminate()
1586 term->layer_etype = qhp->attr.layer_etype; in post_terminate()
1587 term->ecode = qhp->attr.ecode; in post_terminate()
1589 build_term_codes(err_cqe, &term->layer_etype, &term->ecode); in post_terminate()
1590 c4iw_ofld_send(&qhp->rhp->rdev, skb); in post_terminate()
1606 spin_lock_irqsave(&rchp->lock, flag); in __flush_qp()
1608 spin_lock(&schp->lock); in __flush_qp()
1609 spin_lock(&qhp->lock); in __flush_qp()
1611 if (qhp->wq.flushed) { in __flush_qp()
1612 spin_unlock(&qhp->lock); in __flush_qp()
1614 spin_unlock(&schp->lock); in __flush_qp()
1615 spin_unlock_irqrestore(&rchp->lock, flag); in __flush_qp()
1618 qhp->wq.flushed = 1; in __flush_qp()
1619 t4_set_wq_in_error(&qhp->wq, 0); in __flush_qp()
1622 if (!qhp->srq) { in __flush_qp()
1623 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); in __flush_qp()
1624 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); in __flush_qp()
1631 spin_unlock(&qhp->lock); in __flush_qp()
1633 spin_unlock(&schp->lock); in __flush_qp()
1634 spin_unlock_irqrestore(&rchp->lock, flag); in __flush_qp()
1638 t4_clear_cq_armed(&rchp->cq)) { in __flush_qp()
1639 spin_lock_irqsave(&rchp->comp_handler_lock, flag); in __flush_qp()
1640 (*rchp->ibcq.comp_handler)(&rchp->ibcq, in __flush_qp()
1641 rchp->ibcq.cq_context); in __flush_qp()
1642 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); in __flush_qp()
1645 if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) { in __flush_qp()
1646 spin_lock_irqsave(&rchp->comp_handler_lock, flag); in __flush_qp()
1647 (*rchp->ibcq.comp_handler)(&rchp->ibcq, in __flush_qp()
1648 rchp->ibcq.cq_context); in __flush_qp()
1649 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); in __flush_qp()
1651 if (sq_flushed && t4_clear_cq_armed(&schp->cq)) { in __flush_qp()
1652 spin_lock_irqsave(&schp->comp_handler_lock, flag); in __flush_qp()
1653 (*schp->ibcq.comp_handler)(&schp->ibcq, in __flush_qp()
1654 schp->ibcq.cq_context); in __flush_qp()
1655 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); in __flush_qp()
1665 rchp = to_c4iw_cq(qhp->ibqp.recv_cq); in flush_qp()
1666 schp = to_c4iw_cq(qhp->ibqp.send_cq); in flush_qp()
1668 if (qhp->ibqp.uobject) { in flush_qp()
1670 /* for user qps, qhp->wq.flushed is protected by qhp->mutex */ in flush_qp()
1671 if (qhp->wq.flushed) in flush_qp()
1674 qhp->wq.flushed = 1; in flush_qp()
1675 t4_set_wq_in_error(&qhp->wq, 0); in flush_qp()
1676 t4_set_cq_in_error(&rchp->cq); in flush_qp()
1677 spin_lock_irqsave(&rchp->comp_handler_lock, flag); in flush_qp()
1678 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); in flush_qp()
1679 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); in flush_qp()
1681 t4_set_cq_in_error(&schp->cq); in flush_qp()
1682 spin_lock_irqsave(&schp->comp_handler_lock, flag); in flush_qp()
1683 (*schp->ibcq.comp_handler)(&schp->ibcq, in flush_qp()
1684 schp->ibcq.cq_context); in flush_qp()
1685 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); in flush_qp()
1699 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid); in rdma_fini()
1701 skb = skb_dequeue(&ep->com.ep_skb_list); in rdma_fini()
1703 return -ENOMEM; in rdma_fini()
1705 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); in rdma_fini()
1708 wqe->op_compl = cpu_to_be32( in rdma_fini()
1711 wqe->flowid_len16 = cpu_to_be32( in rdma_fini()
1712 FW_WR_FLOWID_V(ep->hwtid) | in rdma_fini()
1714 wqe->cookie = (uintptr_t)ep->com.wr_waitp; in rdma_fini()
1716 wqe->u.fini.type = FW_RI_TYPE_FINI; in rdma_fini()
1718 ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp, in rdma_fini()
1719 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); in rdma_fini()
1728 memset(&init->u, 0, sizeof(init->u)); in build_rtr_msg()
1731 init->u.write.opcode = FW_RI_RDMA_WRITE_WR; in build_rtr_msg()
1732 init->u.write.stag_sink = cpu_to_be32(1); in build_rtr_msg()
1733 init->u.write.to_sink = cpu_to_be64(1); in build_rtr_msg()
1734 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; in build_rtr_msg()
1735 init->u.write.len16 = DIV_ROUND_UP( in build_rtr_msg()
1736 sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16); in build_rtr_msg()
1739 init->u.write.opcode = FW_RI_RDMA_READ_WR; in build_rtr_msg()
1740 init->u.read.stag_src = cpu_to_be32(1); in build_rtr_msg()
1741 init->u.read.to_src_lo = cpu_to_be32(1); in build_rtr_msg()
1742 init->u.read.stag_sink = cpu_to_be32(1); in build_rtr_msg()
1743 init->u.read.to_sink_lo = cpu_to_be32(1); in build_rtr_msg()
1744 init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16); in build_rtr_msg()
1756 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); in rdma_init()
1760 ret = -ENOMEM; in rdma_init()
1763 ret = alloc_ird(rhp, qhp->attr.max_ird); in rdma_init()
1765 qhp->attr.max_ird = 0; in rdma_init()
1769 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); in rdma_init()
1772 wqe->op_compl = cpu_to_be32( in rdma_init()
1775 wqe->flowid_len16 = cpu_to_be32( in rdma_init()
1776 FW_WR_FLOWID_V(qhp->ep->hwtid) | in rdma_init()
1779 wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp; in rdma_init()
1781 wqe->u.init.type = FW_RI_TYPE_INIT; in rdma_init()
1782 wqe->u.init.mpareqbit_p2ptype = in rdma_init()
1783 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) | in rdma_init()
1784 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type); in rdma_init()
1785 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; in rdma_init()
1786 if (qhp->attr.mpa_attr.recv_marker_enabled) in rdma_init()
1787 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; in rdma_init()
1788 if (qhp->attr.mpa_attr.xmit_marker_enabled) in rdma_init()
1789 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; in rdma_init()
1790 if (qhp->attr.mpa_attr.crc_enabled) in rdma_init()
1791 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; in rdma_init()
1793 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | in rdma_init()
1796 if (!qhp->ibqp.uobject) in rdma_init()
1797 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | in rdma_init()
1799 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); in rdma_init()
1800 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); in rdma_init()
1801 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); in rdma_init()
1802 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); in rdma_init()
1803 if (qhp->srq) { in rdma_init()
1804 wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ | in rdma_init()
1805 qhp->srq->idx); in rdma_init()
1807 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); in rdma_init()
1808 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); in rdma_init()
1809 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - in rdma_init()
1810 rhp->rdev.lldi.vr->rq.start); in rdma_init()
1812 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); in rdma_init()
1813 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); in rdma_init()
1814 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); in rdma_init()
1815 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); in rdma_init()
1816 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); in rdma_init()
1817 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); in rdma_init()
1818 if (qhp->attr.mpa_attr.initiator) in rdma_init()
1819 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); in rdma_init()
1821 ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp, in rdma_init()
1822 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); in rdma_init()
1826 free_ird(rhp, qhp->attr.max_ird); in rdma_init()
1838 struct c4iw_qp_attributes newattr = qhp->attr; in c4iw_modify_qp()
1845 pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", in c4iw_modify_qp()
1846 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, in c4iw_modify_qp()
1847 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); in c4iw_modify_qp()
1849 mutex_lock(&qhp->mutex); in c4iw_modify_qp()
1853 if (qhp->attr.state != C4IW_QP_STATE_IDLE) { in c4iw_modify_qp()
1854 ret = -EIO; in c4iw_modify_qp()
1858 newattr.enable_rdma_read = attrs->enable_rdma_read; in c4iw_modify_qp()
1860 newattr.enable_rdma_write = attrs->enable_rdma_write; in c4iw_modify_qp()
1862 newattr.enable_bind = attrs->enable_bind; in c4iw_modify_qp()
1864 if (attrs->max_ord > c4iw_max_read_depth) { in c4iw_modify_qp()
1865 ret = -EINVAL; in c4iw_modify_qp()
1868 newattr.max_ord = attrs->max_ord; in c4iw_modify_qp()
1871 if (attrs->max_ird > cur_max_read_depth(rhp)) { in c4iw_modify_qp()
1872 ret = -EINVAL; in c4iw_modify_qp()
1875 newattr.max_ird = attrs->max_ird; in c4iw_modify_qp()
1877 qhp->attr = newattr; in c4iw_modify_qp()
1881 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc); in c4iw_modify_qp()
1885 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc); in c4iw_modify_qp()
1891 if (qhp->attr.state == attrs->next_state) in c4iw_modify_qp()
1894 switch (qhp->attr.state) { in c4iw_modify_qp()
1896 switch (attrs->next_state) { in c4iw_modify_qp()
1899 ret = -EINVAL; in c4iw_modify_qp()
1903 ret = -EINVAL; in c4iw_modify_qp()
1906 qhp->attr.mpa_attr = attrs->mpa_attr; in c4iw_modify_qp()
1907 qhp->attr.llp_stream_handle = attrs->llp_stream_handle; in c4iw_modify_qp()
1908 qhp->ep = qhp->attr.llp_stream_handle; in c4iw_modify_qp()
1914 * happens in CLOSING->IDLE transition or *->ERROR in c4iw_modify_qp()
1917 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1927 ret = -EINVAL; in c4iw_modify_qp()
1932 switch (attrs->next_state) { in c4iw_modify_qp()
1934 t4_set_wq_in_error(&qhp->wq, 0); in c4iw_modify_qp()
1936 ep = qhp->ep; in c4iw_modify_qp()
1940 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1947 t4_set_wq_in_error(&qhp->wq, 0); in c4iw_modify_qp()
1949 qhp->attr.layer_etype = attrs->layer_etype; in c4iw_modify_qp()
1950 qhp->attr.ecode = attrs->ecode; in c4iw_modify_qp()
1951 ep = qhp->ep; in c4iw_modify_qp()
1953 c4iw_get_ep(&ep->com); in c4iw_modify_qp()
1957 terminate = qhp->attr.send_term; in c4iw_modify_qp()
1964 t4_set_wq_in_error(&qhp->wq, 0); in c4iw_modify_qp()
1969 ep = qhp->ep; in c4iw_modify_qp()
1970 c4iw_get_ep(&qhp->ep->com); in c4iw_modify_qp()
1975 ret = -EINVAL; in c4iw_modify_qp()
1984 if (!internal && (qhp->ibqp.uobject || attrs->next_state != in c4iw_modify_qp()
1986 ret = -EINVAL; in c4iw_modify_qp()
1989 switch (attrs->next_state) { in c4iw_modify_qp()
1993 qhp->attr.llp_stream_handle = NULL; in c4iw_modify_qp()
1994 c4iw_put_ep(&qhp->ep->com); in c4iw_modify_qp()
1995 qhp->ep = NULL; in c4iw_modify_qp()
1996 wake_up(&qhp->wait); in c4iw_modify_qp()
2001 ret = -EINVAL; in c4iw_modify_qp()
2006 if (attrs->next_state != C4IW_QP_STATE_IDLE) { in c4iw_modify_qp()
2007 ret = -EINVAL; in c4iw_modify_qp()
2010 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { in c4iw_modify_qp()
2011 ret = -EINVAL; in c4iw_modify_qp()
2018 ret = -EINVAL; in c4iw_modify_qp()
2024 pr_err("%s in a bad state %d\n", __func__, qhp->attr.state); in c4iw_modify_qp()
2025 ret = -EINVAL; in c4iw_modify_qp()
2031 pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep, in c4iw_modify_qp()
2032 qhp->wq.sq.qid); in c4iw_modify_qp()
2035 qhp->attr.llp_stream_handle = NULL; in c4iw_modify_qp()
2037 ep = qhp->ep; in c4iw_modify_qp()
2038 qhp->ep = NULL; in c4iw_modify_qp()
2043 wake_up(&qhp->wait); in c4iw_modify_qp()
2045 mutex_unlock(&qhp->mutex); in c4iw_modify_qp()
2052 * on the EP. This can be a normal close (RTS->CLOSING) or in c4iw_modify_qp()
2053 * an abnormal close (RTS/CLOSING->ERROR). in c4iw_modify_qp()
2058 c4iw_put_ep(&ep->com); in c4iw_modify_qp()
2066 c4iw_put_ep(&ep->com); in c4iw_modify_qp()
2067 pr_debug("exit state %d\n", qhp->attr.state); in c4iw_modify_qp()
2079 rhp = qhp->rhp; in c4iw_destroy_qp()
2080 ucontext = qhp->ucontext; in c4iw_destroy_qp()
2083 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE) in c4iw_destroy_qp()
2087 wait_event(qhp->wait, !qhp->ep); in c4iw_destroy_qp()
2089 xa_lock_irq(&rhp->qps); in c4iw_destroy_qp()
2090 __xa_erase(&rhp->qps, qhp->wq.sq.qid); in c4iw_destroy_qp()
2091 if (!list_empty(&qhp->db_fc_entry)) in c4iw_destroy_qp()
2092 list_del_init(&qhp->db_fc_entry); in c4iw_destroy_qp()
2093 xa_unlock_irq(&rhp->qps); in c4iw_destroy_qp()
2094 free_ird(rhp, qhp->attr.max_ird); in c4iw_destroy_qp()
2098 wait_for_completion(&qhp->qp_rel_comp); in c4iw_destroy_qp()
2100 pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid); in c4iw_destroy_qp()
2103 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_destroy_qp()
2104 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); in c4iw_destroy_qp()
2106 c4iw_put_wr_wait(qhp->wr_waitp); in c4iw_destroy_qp()
2130 if (attrs->qp_type != IB_QPT_RC) in c4iw_create_qp()
2131 return ERR_PTR(-EOPNOTSUPP); in c4iw_create_qp()
2134 rhp = php->rhp; in c4iw_create_qp()
2135 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); in c4iw_create_qp()
2136 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); in c4iw_create_qp()
2138 return ERR_PTR(-EINVAL); in c4iw_create_qp()
2140 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) in c4iw_create_qp()
2141 return ERR_PTR(-EINVAL); in c4iw_create_qp()
2143 if (!attrs->srq) { in c4iw_create_qp()
2144 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size) in c4iw_create_qp()
2145 return ERR_PTR(-E2BIG); in c4iw_create_qp()
2146 rqsize = attrs->cap.max_recv_wr + 1; in c4iw_create_qp()
2151 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size) in c4iw_create_qp()
2152 return ERR_PTR(-E2BIG); in c4iw_create_qp()
2153 sqsize = attrs->cap.max_send_wr + 1; in c4iw_create_qp()
2159 return ERR_PTR(-ENOMEM); in c4iw_create_qp()
2161 qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); in c4iw_create_qp()
2162 if (!qhp->wr_waitp) { in c4iw_create_qp()
2163 ret = -ENOMEM; in c4iw_create_qp()
2167 qhp->wq.sq.size = sqsize; in c4iw_create_qp()
2168 qhp->wq.sq.memsize = in c4iw_create_qp()
2169 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_qp()
2170 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); in c4iw_create_qp()
2171 qhp->wq.sq.flush_cidx = -1; in c4iw_create_qp()
2172 if (!attrs->srq) { in c4iw_create_qp()
2173 qhp->wq.rq.size = rqsize; in c4iw_create_qp()
2174 qhp->wq.rq.memsize = in c4iw_create_qp()
2175 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_qp()
2176 sizeof(*qhp->wq.rq.queue); in c4iw_create_qp()
2180 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); in c4iw_create_qp()
2181 if (!attrs->srq) in c4iw_create_qp()
2182 qhp->wq.rq.memsize = in c4iw_create_qp()
2183 roundup(qhp->wq.rq.memsize, PAGE_SIZE); in c4iw_create_qp()
2186 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, in c4iw_create_qp()
2187 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_qp()
2188 qhp->wr_waitp, !attrs->srq); in c4iw_create_qp()
2192 attrs->cap.max_recv_wr = rqsize - 1; in c4iw_create_qp()
2193 attrs->cap.max_send_wr = sqsize - 1; in c4iw_create_qp()
2194 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE; in c4iw_create_qp()
2196 qhp->rhp = rhp; in c4iw_create_qp()
2197 qhp->attr.pd = php->pdid; in c4iw_create_qp()
2198 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; in c4iw_create_qp()
2199 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; in c4iw_create_qp()
2200 qhp->attr.sq_num_entries = attrs->cap.max_send_wr; in c4iw_create_qp()
2201 qhp->attr.sq_max_sges = attrs->cap.max_send_sge; in c4iw_create_qp()
2202 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; in c4iw_create_qp()
2203 if (!attrs->srq) { in c4iw_create_qp()
2204 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; in c4iw_create_qp()
2205 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; in c4iw_create_qp()
2207 qhp->attr.state = C4IW_QP_STATE_IDLE; in c4iw_create_qp()
2208 qhp->attr.next_state = C4IW_QP_STATE_IDLE; in c4iw_create_qp()
2209 qhp->attr.enable_rdma_read = 1; in c4iw_create_qp()
2210 qhp->attr.enable_rdma_write = 1; in c4iw_create_qp()
2211 qhp->attr.enable_bind = 1; in c4iw_create_qp()
2212 qhp->attr.max_ord = 0; in c4iw_create_qp()
2213 qhp->attr.max_ird = 0; in c4iw_create_qp()
2214 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; in c4iw_create_qp()
2215 spin_lock_init(&qhp->lock); in c4iw_create_qp()
2216 mutex_init(&qhp->mutex); in c4iw_create_qp()
2217 init_waitqueue_head(&qhp->wait); in c4iw_create_qp()
2218 init_completion(&qhp->qp_rel_comp); in c4iw_create_qp()
2219 refcount_set(&qhp->qp_refcnt, 1); in c4iw_create_qp()
2221 ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL); in c4iw_create_qp()
2228 ret = -ENOMEM; in c4iw_create_qp()
2231 if (!attrs->srq) { in c4iw_create_qp()
2234 ret = -ENOMEM; in c4iw_create_qp()
2240 ret = -ENOMEM; in c4iw_create_qp()
2243 if (!attrs->srq) { in c4iw_create_qp()
2247 ret = -ENOMEM; in c4iw_create_qp()
2252 if (t4_sq_onchip(&qhp->wq.sq)) { in c4iw_create_qp()
2256 ret = -ENOMEM; in c4iw_create_qp()
2261 if (rhp->rdev.lldi.write_w_imm_support) in c4iw_create_qp()
2263 uresp.qid_mask = rhp->rdev.qpmask; in c4iw_create_qp()
2264 uresp.sqid = qhp->wq.sq.qid; in c4iw_create_qp()
2265 uresp.sq_size = qhp->wq.sq.size; in c4iw_create_qp()
2266 uresp.sq_memsize = qhp->wq.sq.memsize; in c4iw_create_qp()
2267 if (!attrs->srq) { in c4iw_create_qp()
2268 uresp.rqid = qhp->wq.rq.qid; in c4iw_create_qp()
2269 uresp.rq_size = qhp->wq.rq.size; in c4iw_create_qp()
2270 uresp.rq_memsize = qhp->wq.rq.memsize; in c4iw_create_qp()
2272 spin_lock(&ucontext->mmap_lock); in c4iw_create_qp()
2274 uresp.ma_sync_key = ucontext->key; in c4iw_create_qp()
2275 ucontext->key += PAGE_SIZE; in c4iw_create_qp()
2277 uresp.sq_key = ucontext->key; in c4iw_create_qp()
2278 ucontext->key += PAGE_SIZE; in c4iw_create_qp()
2279 if (!attrs->srq) { in c4iw_create_qp()
2280 uresp.rq_key = ucontext->key; in c4iw_create_qp()
2281 ucontext->key += PAGE_SIZE; in c4iw_create_qp()
2283 uresp.sq_db_gts_key = ucontext->key; in c4iw_create_qp()
2284 ucontext->key += PAGE_SIZE; in c4iw_create_qp()
2285 if (!attrs->srq) { in c4iw_create_qp()
2286 uresp.rq_db_gts_key = ucontext->key; in c4iw_create_qp()
2287 ucontext->key += PAGE_SIZE; in c4iw_create_qp()
2289 spin_unlock(&ucontext->mmap_lock); in c4iw_create_qp()
2293 sq_key_mm->key = uresp.sq_key; in c4iw_create_qp()
2294 sq_key_mm->addr = qhp->wq.sq.phys_addr; in c4iw_create_qp()
2295 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize); in c4iw_create_qp()
2297 if (!attrs->srq) { in c4iw_create_qp()
2298 rq_key_mm->key = uresp.rq_key; in c4iw_create_qp()
2299 rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue); in c4iw_create_qp()
2300 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize); in c4iw_create_qp()
2303 sq_db_key_mm->key = uresp.sq_db_gts_key; in c4iw_create_qp()
2304 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa; in c4iw_create_qp()
2305 sq_db_key_mm->len = PAGE_SIZE; in c4iw_create_qp()
2307 if (!attrs->srq) { in c4iw_create_qp()
2308 rq_db_key_mm->key = uresp.rq_db_gts_key; in c4iw_create_qp()
2309 rq_db_key_mm->addr = in c4iw_create_qp()
2310 (u64)(unsigned long)qhp->wq.rq.bar2_pa; in c4iw_create_qp()
2311 rq_db_key_mm->len = PAGE_SIZE; in c4iw_create_qp()
2315 ma_sync_key_mm->key = uresp.ma_sync_key; in c4iw_create_qp()
2316 ma_sync_key_mm->addr = in c4iw_create_qp()
2317 (pci_resource_start(rhp->rdev.lldi.pdev, 0) + in c4iw_create_qp()
2319 ma_sync_key_mm->len = PAGE_SIZE; in c4iw_create_qp()
2323 qhp->ucontext = ucontext; in c4iw_create_qp()
2325 if (!attrs->srq) { in c4iw_create_qp()
2326 qhp->wq.qp_errp = in c4iw_create_qp()
2327 &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err; in c4iw_create_qp()
2329 qhp->wq.qp_errp = in c4iw_create_qp()
2330 &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err; in c4iw_create_qp()
2331 qhp->wq.srqidxp = in c4iw_create_qp()
2332 &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx; in c4iw_create_qp()
2335 qhp->ibqp.qp_num = qhp->wq.sq.qid; in c4iw_create_qp()
2336 if (attrs->srq) in c4iw_create_qp()
2337 qhp->srq = to_c4iw_srq(attrs->srq); in c4iw_create_qp()
2338 INIT_LIST_HEAD(&qhp->db_fc_entry); in c4iw_create_qp()
2340 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, in c4iw_create_qp()
2341 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, in c4iw_create_qp()
2342 qhp->wq.rq.memsize, attrs->cap.max_recv_wr); in c4iw_create_qp()
2343 return &qhp->ibqp; in c4iw_create_qp()
2347 if (!attrs->srq) in c4iw_create_qp()
2352 if (!attrs->srq) in c4iw_create_qp()
2357 xa_erase_irq(&rhp->qps, qhp->wq.sq.qid); in c4iw_create_qp()
2359 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_create_qp()
2360 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq); in c4iw_create_qp()
2362 c4iw_put_wr_wait(qhp->wr_waitp); in c4iw_create_qp()
2379 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) in c4iw_ib_modify_qp()
2387 rhp = qhp->rhp; in c4iw_ib_modify_qp()
2389 attrs.next_state = c4iw_convert_state(attr->qp_state); in c4iw_ib_modify_qp()
2390 attrs.enable_rdma_read = (attr->qp_access_flags & in c4iw_ib_modify_qp()
2392 attrs.enable_rdma_write = (attr->qp_access_flags & in c4iw_ib_modify_qp()
2394 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; in c4iw_ib_modify_qp()
2408 attrs.sq_db_inc = attr->sq_psn; in c4iw_ib_modify_qp()
2409 attrs.rq_db_inc = attr->rq_psn; in c4iw_ib_modify_qp()
2412 if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && in c4iw_ib_modify_qp()
2414 return -EINVAL; in c4iw_ib_modify_qp()
2429 event.device = &srq->rhp->ibdev; in c4iw_dispatch_srq_limit_reached_event()
2430 event.element.srq = &srq->ibsrq; in c4iw_dispatch_srq_limit_reached_event()
2452 ret = -EINVAL; in c4iw_modify_srq()
2457 srq->armed = true; in c4iw_modify_srq()
2458 srq->srq_limit = attr->srq_limit; in c4iw_modify_srq()
2471 attr->qp_state = to_ib_qp_state(qhp->attr.state); in c4iw_ib_query_qp()
2472 attr->cur_qp_state = to_ib_qp_state(qhp->attr.state); in c4iw_ib_query_qp()
2473 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; in c4iw_ib_query_qp()
2474 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; in c4iw_ib_query_qp()
2475 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; in c4iw_ib_query_qp()
2476 init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges; in c4iw_ib_query_qp()
2477 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; in c4iw_ib_query_qp()
2478 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; in c4iw_ib_query_qp()
2485 struct c4iw_rdev *rdev = &srq->rhp->rdev; in free_srq_queue()
2486 struct sk_buff *skb = srq->destroy_skb; in free_srq_queue()
2487 struct t4_srq *wq = &srq->wq; in free_srq_queue()
2497 res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) | in free_srq_queue()
2500 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); in free_srq_queue()
2501 res_wr->cookie = (uintptr_t)wr_waitp; in free_srq_queue()
2502 res = res_wr->res; in free_srq_queue()
2503 res->u.srq.restype = FW_RI_RES_TYPE_SRQ; in free_srq_queue()
2504 res->u.srq.op = FW_RI_RES_OP_RESET; in free_srq_queue()
2505 res->u.srq.srqid = cpu_to_be32(srq->idx); in free_srq_queue()
2506 res->u.srq.eqid = cpu_to_be32(wq->qid); in free_srq_queue()
2511 dma_free_coherent(&rdev->lldi.pdev->dev, in free_srq_queue()
2512 wq->memsize, wq->queue, in free_srq_queue()
2514 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size); in free_srq_queue()
2515 kfree(wq->sw_rq); in free_srq_queue()
2516 c4iw_put_qpid(rdev, wq->qid, uctx); in free_srq_queue()
2522 struct c4iw_rdev *rdev = &srq->rhp->rdev; in alloc_srq_queue()
2523 int user = (uctx != &rdev->uctx); in alloc_srq_queue()
2524 struct t4_srq *wq = &srq->wq; in alloc_srq_queue()
2530 int ret = -ENOMEM; in alloc_srq_queue()
2532 wq->qid = c4iw_get_qpid(rdev, uctx); in alloc_srq_queue()
2533 if (!wq->qid) in alloc_srq_queue()
2537 wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq), in alloc_srq_queue()
2539 if (!wq->sw_rq) in alloc_srq_queue()
2541 wq->pending_wrs = kcalloc(srq->wq.size, in alloc_srq_queue()
2542 sizeof(*srq->wq.pending_wrs), in alloc_srq_queue()
2544 if (!wq->pending_wrs) in alloc_srq_queue()
2548 wq->rqt_size = wq->size; in alloc_srq_queue()
2549 wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size); in alloc_srq_queue()
2550 if (!wq->rqt_hwaddr) in alloc_srq_queue()
2552 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> in alloc_srq_queue()
2555 wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize, in alloc_srq_queue()
2556 &wq->dma_addr, GFP_KERNEL); in alloc_srq_queue()
2557 if (!wq->queue) in alloc_srq_queue()
2560 dma_unmap_addr_set(wq, mapping, wq->dma_addr); in alloc_srq_queue()
2562 wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS, in alloc_srq_queue()
2563 &wq->bar2_qid, in alloc_srq_queue()
2564 user ? &wq->bar2_pa : NULL); in alloc_srq_queue()
2570 if (user && !wq->bar2_va) { in alloc_srq_queue()
2572 pci_name(rdev->lldi.pdev), wq->qid); in alloc_srq_queue()
2573 ret = -EINVAL; in alloc_srq_queue()
2587 res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) | in alloc_srq_queue()
2590 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); in alloc_srq_queue()
2591 res_wr->cookie = (uintptr_t)wr_waitp; in alloc_srq_queue()
2592 res = res_wr->res; in alloc_srq_queue()
2593 res->u.srq.restype = FW_RI_RES_TYPE_SRQ; in alloc_srq_queue()
2594 res->u.srq.op = FW_RI_RES_OP_WRITE; in alloc_srq_queue()
2599 eqsize = wq->size * T4_RQ_NUM_SLOTS + in alloc_srq_queue()
2600 rdev->hw_queue.t4_eq_status_entries; in alloc_srq_queue()
2601 res->u.srq.eqid = cpu_to_be32(wq->qid); in alloc_srq_queue()
2602 res->u.srq.fetchszm_to_iqid = in alloc_srq_queue()
2608 res->u.srq.dcaen_to_eqsize = in alloc_srq_queue()
2616 res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr); in alloc_srq_queue()
2617 res->u.srq.srqid = cpu_to_be32(srq->idx); in alloc_srq_queue()
2618 res->u.srq.pdid = cpu_to_be32(srq->pdid); in alloc_srq_queue()
2619 res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size); in alloc_srq_queue()
2620 res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr - in alloc_srq_queue()
2621 rdev->lldi.vr->rq.start); in alloc_srq_queue()
2625 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__); in alloc_srq_queue()
2631 __func__, srq->idx, wq->qid, srq->pdid, wq->queue, in alloc_srq_queue()
2632 (u64)virt_to_phys(wq->queue), wq->bar2_va, in alloc_srq_queue()
2633 wq->rqt_hwaddr, wq->rqt_size); in alloc_srq_queue()
2637 dma_free_coherent(&rdev->lldi.pdev->dev, in alloc_srq_queue()
2638 wq->memsize, wq->queue, in alloc_srq_queue()
2641 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size); in alloc_srq_queue()
2644 kfree(wq->pending_wrs); in alloc_srq_queue()
2647 kfree(wq->sw_rq); in alloc_srq_queue()
2649 c4iw_put_qpid(rdev, wq->qid, uctx); in alloc_srq_queue()
2659 dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE); in c4iw_copy_wr_to_srq()
2662 if (dst >= (u64 *)&srq->queue[srq->size]) in c4iw_copy_wr_to_srq()
2663 dst = (u64 *)srq->queue; in c4iw_copy_wr_to_srq()
2665 if (dst >= (u64 *)&srq->queue[srq->size]) in c4iw_copy_wr_to_srq()
2666 dst = (u64 *)srq->queue; in c4iw_copy_wr_to_srq()
2667 len16--; in c4iw_copy_wr_to_srq()
2674 struct ib_pd *pd = ib_srq->pd; in c4iw_create_srq()
2688 rhp = php->rhp; in c4iw_create_srq()
2690 if (!rhp->rdev.lldi.vr->srq.size) in c4iw_create_srq()
2691 return -EINVAL; in c4iw_create_srq()
2692 if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size) in c4iw_create_srq()
2693 return -E2BIG; in c4iw_create_srq()
2694 if (attrs->attr.max_sge > T4_MAX_RECV_SGE) in c4iw_create_srq()
2695 return -E2BIG; in c4iw_create_srq()
2700 rqsize = attrs->attr.max_wr + 1; in c4iw_create_srq()
2706 srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); in c4iw_create_srq()
2707 if (!srq->wr_waitp) in c4iw_create_srq()
2708 return -ENOMEM; in c4iw_create_srq()
2710 srq->idx = c4iw_alloc_srq_idx(&rhp->rdev); in c4iw_create_srq()
2711 if (srq->idx < 0) { in c4iw_create_srq()
2712 ret = -ENOMEM; in c4iw_create_srq()
2717 srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); in c4iw_create_srq()
2718 if (!srq->destroy_skb) { in c4iw_create_srq()
2719 ret = -ENOMEM; in c4iw_create_srq()
2723 srq->rhp = rhp; in c4iw_create_srq()
2724 srq->pdid = php->pdid; in c4iw_create_srq()
2726 srq->wq.size = rqsize; in c4iw_create_srq()
2727 srq->wq.memsize = in c4iw_create_srq()
2728 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_srq()
2729 sizeof(*srq->wq.queue); in c4iw_create_srq()
2731 srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE); in c4iw_create_srq()
2733 ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx : in c4iw_create_srq()
2734 &rhp->rdev.uctx, srq->wr_waitp); in c4iw_create_srq()
2737 attrs->attr.max_wr = rqsize - 1; in c4iw_create_srq()
2739 if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6) in c4iw_create_srq()
2740 srq->flags = T4_SRQ_LIMIT_SUPPORT; in c4iw_create_srq()
2745 ret = -ENOMEM; in c4iw_create_srq()
2750 ret = -ENOMEM; in c4iw_create_srq()
2754 uresp.flags = srq->flags; in c4iw_create_srq()
2755 uresp.qid_mask = rhp->rdev.qpmask; in c4iw_create_srq()
2756 uresp.srqid = srq->wq.qid; in c4iw_create_srq()
2757 uresp.srq_size = srq->wq.size; in c4iw_create_srq()
2758 uresp.srq_memsize = srq->wq.memsize; in c4iw_create_srq()
2759 uresp.rqt_abs_idx = srq->wq.rqt_abs_idx; in c4iw_create_srq()
2760 spin_lock(&ucontext->mmap_lock); in c4iw_create_srq()
2761 uresp.srq_key = ucontext->key; in c4iw_create_srq()
2762 ucontext->key += PAGE_SIZE; in c4iw_create_srq()
2763 uresp.srq_db_gts_key = ucontext->key; in c4iw_create_srq()
2764 ucontext->key += PAGE_SIZE; in c4iw_create_srq()
2765 spin_unlock(&ucontext->mmap_lock); in c4iw_create_srq()
2769 srq_key_mm->key = uresp.srq_key; in c4iw_create_srq()
2770 srq_key_mm->addr = virt_to_phys(srq->wq.queue); in c4iw_create_srq()
2771 srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize); in c4iw_create_srq()
2773 srq_db_key_mm->key = uresp.srq_db_gts_key; in c4iw_create_srq()
2774 srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa; in c4iw_create_srq()
2775 srq_db_key_mm->len = PAGE_SIZE; in c4iw_create_srq()
2780 __func__, srq->wq.qid, srq->idx, srq->wq.size, in c4iw_create_srq()
2781 (unsigned long)srq->wq.memsize, attrs->attr.max_wr); in c4iw_create_srq()
2783 spin_lock_init(&srq->lock); in c4iw_create_srq()
2791 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_srq()
2792 srq->wr_waitp); in c4iw_create_srq()
2794 kfree_skb(srq->destroy_skb); in c4iw_create_srq()
2796 c4iw_free_srq_idx(&rhp->rdev, srq->idx); in c4iw_create_srq()
2798 c4iw_put_wr_wait(srq->wr_waitp); in c4iw_create_srq()
2809 rhp = srq->rhp; in c4iw_destroy_srq()
2811 pr_debug("%s id %d\n", __func__, srq->wq.qid); in c4iw_destroy_srq()
2814 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_destroy_srq()
2815 srq->wr_waitp); in c4iw_destroy_srq()
2816 c4iw_free_srq_idx(&rhp->rdev, srq->idx); in c4iw_destroy_srq()
2817 c4iw_put_wr_wait(srq->wr_waitp); in c4iw_destroy_srq()