Lines Matching refs:rhp
720 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); in post_write_cmpl()
744 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); in post_write_cmpl()
807 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3)); in build_tpte_memreg()
916 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
918 if (qhp->rhp->db_state == NORMAL) in ring_kernel_sq_db()
921 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_sq_db()
925 xa_unlock_irqrestore(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
933 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_rq_db()
935 if (qhp->rhp->db_state == NORMAL) in ring_kernel_rq_db()
938 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_rq_db()
942 xa_unlock_irqrestore(&qhp->rhp->qps, flags); in ring_kernel_rq_db()
1085 struct c4iw_dev *rhp; in c4iw_post_send() local
1093 rhp = qhp->rhp; in c4iw_post_send()
1121 if (qhp->rhp->rdev.lldi.write_cmpl_support && in c4iw_post_send()
1122 CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >= in c4iw_post_send()
1164 if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) { in c4iw_post_send()
1180 c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey); in c4iw_post_send()
1196 if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support && in c4iw_post_send()
1205 rhp->rdev.lldi.ulptx_memwrite_dsgl); in c4iw_post_send()
1218 c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey); in c4iw_post_send()
1237 rhp->rdev.lldi.ports[0]); in c4iw_post_send()
1251 if (!rhp->rdev.status_page->db_off) { in c4iw_post_send()
1312 qhp->rhp->rdev.lldi.ports[0]); in c4iw_post_receive()
1331 if (!qhp->rhp->rdev.status_page->db_off) { in c4iw_post_receive()
1590 c4iw_ofld_send(&qhp->rhp->rdev, skb); in post_terminate()
1692 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, in rdma_fini() argument
1718 ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp, in rdma_fini()
1749 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) in rdma_init() argument
1763 ret = alloc_ird(rhp, qhp->attr.max_ird); in rdma_init()
1810 rhp->rdev.lldi.vr->rq.start); in rdma_init()
1821 ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp, in rdma_init()
1826 free_ird(rhp, qhp->attr.max_ird); in rdma_init()
1832 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, in c4iw_modify_qp() argument
1871 if (attrs->max_ird > cur_max_read_depth(rhp)) { in c4iw_modify_qp()
1918 ret = rdma_init(rhp, qhp); in c4iw_modify_qp()
1942 ret = rdma_fini(rhp, qhp, ep); in c4iw_modify_qp()
1958 ret = rdma_fini(rhp, qhp, ep); in c4iw_modify_qp()
2073 struct c4iw_dev *rhp; in c4iw_destroy_qp() local
2079 rhp = qhp->rhp; in c4iw_destroy_qp()
2084 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); in c4iw_destroy_qp()
2086 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); in c4iw_destroy_qp()
2089 xa_lock_irq(&rhp->qps); in c4iw_destroy_qp()
2090 __xa_erase(&rhp->qps, qhp->wq.sq.qid); in c4iw_destroy_qp()
2093 xa_unlock_irq(&rhp->qps); in c4iw_destroy_qp()
2094 free_ird(rhp, qhp->attr.max_ird); in c4iw_destroy_qp()
2103 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_destroy_qp()
2104 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); in c4iw_destroy_qp()
2115 struct c4iw_dev *rhp; in c4iw_create_qp() local
2134 rhp = php->rhp; in c4iw_create_qp()
2135 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); in c4iw_create_qp()
2136 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); in c4iw_create_qp()
2144 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size) in c4iw_create_qp()
2151 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size) in c4iw_create_qp()
2169 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_qp()
2175 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_qp()
2186 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, in c4iw_create_qp()
2187 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_qp()
2196 qhp->rhp = rhp; in c4iw_create_qp()
2221 ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL); in c4iw_create_qp()
2261 if (rhp->rdev.lldi.write_w_imm_support) in c4iw_create_qp()
2263 uresp.qid_mask = rhp->rdev.qpmask; in c4iw_create_qp()
2317 (pci_resource_start(rhp->rdev.lldi.pdev, 0) + in c4iw_create_qp()
2357 xa_erase_irq(&rhp->qps, qhp->wq.sq.qid); in c4iw_create_qp()
2359 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_create_qp()
2360 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq); in c4iw_create_qp()
2371 struct c4iw_dev *rhp; in c4iw_ib_modify_qp() local
2387 rhp = qhp->rhp; in c4iw_ib_modify_qp()
2412 if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && in c4iw_ib_modify_qp()
2416 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); in c4iw_ib_modify_qp()
2429 event.device = &srq->rhp->ibdev; in c4iw_dispatch_srq_limit_reached_event()
2485 struct c4iw_rdev *rdev = &srq->rhp->rdev; in free_srq_queue()
2522 struct c4iw_rdev *rdev = &srq->rhp->rdev; in alloc_srq_queue()
2675 struct c4iw_dev *rhp; in c4iw_create_srq() local
2688 rhp = php->rhp; in c4iw_create_srq()
2690 if (!rhp->rdev.lldi.vr->srq.size) in c4iw_create_srq()
2692 if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size) in c4iw_create_srq()
2710 srq->idx = c4iw_alloc_srq_idx(&rhp->rdev); in c4iw_create_srq()
2723 srq->rhp = rhp; in c4iw_create_srq()
2728 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_srq()
2734 &rhp->rdev.uctx, srq->wr_waitp); in c4iw_create_srq()
2739 if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6) in c4iw_create_srq()
2755 uresp.qid_mask = rhp->rdev.qpmask; in c4iw_create_srq()
2791 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_srq()
2796 c4iw_free_srq_idx(&rhp->rdev, srq->idx); in c4iw_create_srq()
2804 struct c4iw_dev *rhp; in c4iw_destroy_srq() local
2809 rhp = srq->rhp; in c4iw_destroy_srq()
2814 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_destroy_srq()
2816 c4iw_free_srq_idx(&rhp->rdev, srq->idx); in c4iw_destroy_srq()