Lines Matching refs:srq

1341 static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,  in defer_srq_wr()  argument
1344 struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx]; in defer_srq_wr()
1347 __func__, srq->cidx, srq->pidx, srq->wq_pidx, in defer_srq_wr()
1348 srq->in_use, srq->ooo_count, in defer_srq_wr()
1349 (unsigned long long)wr_id, srq->pending_cidx, in defer_srq_wr()
1350 srq->pending_pidx, srq->pending_in_use); in defer_srq_wr()
1354 t4_srq_produce_pending_wr(srq); in defer_srq_wr()
1361 struct c4iw_srq *srq; in c4iw_post_srq_recv() local
1368 srq = to_c4iw_srq(ibsrq); in c4iw_post_srq_recv()
1369 spin_lock_irqsave(&srq->lock, flag); in c4iw_post_srq_recv()
1370 num_wrs = t4_srq_avail(&srq->wq); in c4iw_post_srq_recv()
1372 spin_unlock_irqrestore(&srq->lock, flag); in c4iw_post_srq_recv()
1393 wqe->recv.wrid = srq->wq.pidx; in c4iw_post_srq_recv()
1399 if (srq->wq.ooo_count || in c4iw_post_srq_recv()
1400 srq->wq.pending_in_use || in c4iw_post_srq_recv()
1401 srq->wq.sw_rq[srq->wq.pidx].valid) { in c4iw_post_srq_recv()
1402 defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16); in c4iw_post_srq_recv()
1404 srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id; in c4iw_post_srq_recv()
1405 srq->wq.sw_rq[srq->wq.pidx].valid = 1; in c4iw_post_srq_recv()
1406 c4iw_copy_wr_to_srq(&srq->wq, wqe, len16); in c4iw_post_srq_recv()
1408 __func__, srq->wq.cidx, in c4iw_post_srq_recv()
1409 srq->wq.pidx, srq->wq.wq_pidx, in c4iw_post_srq_recv()
1410 srq->wq.in_use, in c4iw_post_srq_recv()
1412 t4_srq_produce(&srq->wq, len16); in c4iw_post_srq_recv()
1419 t4_ring_srq_db(&srq->wq, idx, len16, wqe); in c4iw_post_srq_recv()
1420 spin_unlock_irqrestore(&srq->lock, flag); in c4iw_post_srq_recv()
1622 if (!qhp->srq) { in __flush_qp()
1803 if (qhp->srq) { in rdma_init()
1805 qhp->srq->idx); in rdma_init()
2104 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); in c4iw_destroy_qp()
2143 if (!attrs->srq) { in c4iw_create_qp()
2172 if (!attrs->srq) { in c4iw_create_qp()
2181 if (!attrs->srq) in c4iw_create_qp()
2188 qhp->wr_waitp, !attrs->srq); in c4iw_create_qp()
2203 if (!attrs->srq) { in c4iw_create_qp()
2231 if (!attrs->srq) { in c4iw_create_qp()
2243 if (!attrs->srq) { in c4iw_create_qp()
2267 if (!attrs->srq) { in c4iw_create_qp()
2279 if (!attrs->srq) { in c4iw_create_qp()
2285 if (!attrs->srq) { in c4iw_create_qp()
2297 if (!attrs->srq) { in c4iw_create_qp()
2307 if (!attrs->srq) { in c4iw_create_qp()
2325 if (!attrs->srq) { in c4iw_create_qp()
2336 if (attrs->srq) in c4iw_create_qp()
2337 qhp->srq = to_c4iw_srq(attrs->srq); in c4iw_create_qp()
2347 if (!attrs->srq) in c4iw_create_qp()
2352 if (!attrs->srq) in c4iw_create_qp()
2360 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq); in c4iw_create_qp()
2425 void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq) in c4iw_dispatch_srq_limit_reached_event() argument
2429 event.device = &srq->rhp->ibdev; in c4iw_dispatch_srq_limit_reached_event()
2430 event.element.srq = &srq->ibsrq; in c4iw_dispatch_srq_limit_reached_event()
2439 struct c4iw_srq *srq = to_c4iw_srq(ib_srq); in c4iw_modify_srq() local
2446 c4iw_dispatch_srq_limit_reached_event(srq); in c4iw_modify_srq()
2457 srq->armed = true; in c4iw_modify_srq()
2458 srq->srq_limit = attr->srq_limit; in c4iw_modify_srq()
2482 static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, in free_srq_queue() argument
2485 struct c4iw_rdev *rdev = &srq->rhp->rdev; in free_srq_queue()
2486 struct sk_buff *skb = srq->destroy_skb; in free_srq_queue()
2487 struct t4_srq *wq = &srq->wq; in free_srq_queue()
2503 res->u.srq.restype = FW_RI_RES_TYPE_SRQ; in free_srq_queue()
2504 res->u.srq.op = FW_RI_RES_OP_RESET; in free_srq_queue()
2505 res->u.srq.srqid = cpu_to_be32(srq->idx); in free_srq_queue()
2506 res->u.srq.eqid = cpu_to_be32(wq->qid); in free_srq_queue()
2519 static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx, in alloc_srq_queue() argument
2522 struct c4iw_rdev *rdev = &srq->rhp->rdev; in alloc_srq_queue()
2524 struct t4_srq *wq = &srq->wq; in alloc_srq_queue()
2541 wq->pending_wrs = kcalloc(srq->wq.size, in alloc_srq_queue()
2542 sizeof(*srq->wq.pending_wrs), in alloc_srq_queue()
2593 res->u.srq.restype = FW_RI_RES_TYPE_SRQ; in alloc_srq_queue()
2594 res->u.srq.op = FW_RI_RES_OP_WRITE; in alloc_srq_queue()
2601 res->u.srq.eqid = cpu_to_be32(wq->qid); in alloc_srq_queue()
2602 res->u.srq.fetchszm_to_iqid = in alloc_srq_queue()
2608 res->u.srq.dcaen_to_eqsize = in alloc_srq_queue()
2616 res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr); in alloc_srq_queue()
2617 res->u.srq.srqid = cpu_to_be32(srq->idx); in alloc_srq_queue()
2618 res->u.srq.pdid = cpu_to_be32(srq->pdid); in alloc_srq_queue()
2619 res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size); in alloc_srq_queue()
2620 res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr - in alloc_srq_queue()
2631 __func__, srq->idx, wq->qid, srq->pdid, wq->queue, in alloc_srq_queue()
2654 void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16) in c4iw_copy_wr_to_srq() argument
2659 dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE); in c4iw_copy_wr_to_srq()
2662 if (dst >= (u64 *)&srq->queue[srq->size]) in c4iw_copy_wr_to_srq()
2663 dst = (u64 *)srq->queue; in c4iw_copy_wr_to_srq()
2665 if (dst >= (u64 *)&srq->queue[srq->size]) in c4iw_copy_wr_to_srq()
2666 dst = (u64 *)srq->queue; in c4iw_copy_wr_to_srq()
2676 struct c4iw_srq *srq = to_c4iw_srq(ib_srq); in c4iw_create_srq() local
2690 if (!rhp->rdev.lldi.vr->srq.size) in c4iw_create_srq()
2706 srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); in c4iw_create_srq()
2707 if (!srq->wr_waitp) in c4iw_create_srq()
2710 srq->idx = c4iw_alloc_srq_idx(&rhp->rdev); in c4iw_create_srq()
2711 if (srq->idx < 0) { in c4iw_create_srq()
2717 srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL); in c4iw_create_srq()
2718 if (!srq->destroy_skb) { in c4iw_create_srq()
2723 srq->rhp = rhp; in c4iw_create_srq()
2724 srq->pdid = php->pdid; in c4iw_create_srq()
2726 srq->wq.size = rqsize; in c4iw_create_srq()
2727 srq->wq.memsize = in c4iw_create_srq()
2729 sizeof(*srq->wq.queue); in c4iw_create_srq()
2731 srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE); in c4iw_create_srq()
2733 ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx : in c4iw_create_srq()
2734 &rhp->rdev.uctx, srq->wr_waitp); in c4iw_create_srq()
2740 srq->flags = T4_SRQ_LIMIT_SUPPORT; in c4iw_create_srq()
2754 uresp.flags = srq->flags; in c4iw_create_srq()
2756 uresp.srqid = srq->wq.qid; in c4iw_create_srq()
2757 uresp.srq_size = srq->wq.size; in c4iw_create_srq()
2758 uresp.srq_memsize = srq->wq.memsize; in c4iw_create_srq()
2759 uresp.rqt_abs_idx = srq->wq.rqt_abs_idx; in c4iw_create_srq()
2770 srq_key_mm->addr = virt_to_phys(srq->wq.queue); in c4iw_create_srq()
2771 srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize); in c4iw_create_srq()
2774 srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa; in c4iw_create_srq()
2780 __func__, srq->wq.qid, srq->idx, srq->wq.size, in c4iw_create_srq()
2781 (unsigned long)srq->wq.memsize, attrs->attr.max_wr); in c4iw_create_srq()
2783 spin_lock_init(&srq->lock); in c4iw_create_srq()
2791 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_srq()
2792 srq->wr_waitp); in c4iw_create_srq()
2794 kfree_skb(srq->destroy_skb); in c4iw_create_srq()
2796 c4iw_free_srq_idx(&rhp->rdev, srq->idx); in c4iw_create_srq()
2798 c4iw_put_wr_wait(srq->wr_waitp); in c4iw_create_srq()
2805 struct c4iw_srq *srq; in c4iw_destroy_srq() local
2808 srq = to_c4iw_srq(ibsrq); in c4iw_destroy_srq()
2809 rhp = srq->rhp; in c4iw_destroy_srq()
2811 pr_debug("%s id %d\n", __func__, srq->wq.qid); in c4iw_destroy_srq()
2814 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_destroy_srq()
2815 srq->wr_waitp); in c4iw_destroy_srq()
2816 c4iw_free_srq_idx(&rhp->rdev, srq->idx); in c4iw_destroy_srq()
2817 c4iw_put_wr_wait(srq->wr_waitp); in c4iw_destroy_srq()