Searched refs:hr_qp (Results 1 – 5 of 5) sorted by relevance
| /OK3568_Linux_fs/kernel/drivers/infiniband/hw/hns/ |
| H A D | hns_roce_qp.c | 48 struct hns_roce_qp *hr_qp = container_of(flush_work, in flush_work_handle() local 58 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { in flush_work_handle() 59 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); in flush_work_handle() 69 if (atomic_dec_and_test(&hr_qp->refcount)) in flush_work_handle() 70 complete(&hr_qp->free); in flush_work_handle() 73 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) in init_flush_work() argument 75 struct hns_roce_work *flush_work = &hr_qp->flush_work; in init_flush_work() 79 atomic_inc(&hr_qp->refcount); in init_flush_work() 114 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, in hns_roce_ib_qp_event() argument 118 struct ib_qp *ibqp = &hr_qp->ibqp; in hns_roce_ib_qp_event() [all …]
|
| H A D | hns_roce_hw_v1.c | 348 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); in hns_roce_v1_post_recv() local 360 spin_lock_irqsave(&hr_qp->rq.lock, flags); in hns_roce_v1_post_recv() 363 if (hns_roce_wq_overflow(&hr_qp->rq, nreq, in hns_roce_v1_post_recv() 364 hr_qp->ibqp.recv_cq)) { in hns_roce_v1_post_recv() 370 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); in hns_roce_v1_post_recv() 372 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { in hns_roce_v1_post_recv() 374 wr->num_sge, hr_qp->rq.max_gs); in hns_roce_v1_post_recv() 380 ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx); in hns_roce_v1_post_recv() 392 hr_qp->rq.wrid[wqe_idx] = wr->wr_id; in hns_roce_v1_post_recv() 397 hr_qp->rq.head += nreq; in hns_roce_v1_post_recv() [all …]
|
| H A D | hns_roce_hw_v2.c | 351 struct hns_roce_qp *hr_qp) in check_send_valid() argument 354 struct ib_qp *ibqp = &hr_qp->ibqp; in check_send_valid() 362 } else if (unlikely(hr_qp->state == IB_QPS_RESET || in check_send_valid() 363 hr_qp->state == IB_QPS_INIT || in check_send_valid() 364 hr_qp->state == IB_QPS_RTR)) { in check_send_valid() 366 hr_qp->state); in check_send_valid() 708 struct hns_roce_qp *hr_qp) in check_recv_valid() argument 712 else if (hr_qp->state == IB_QPS_RESET) in check_recv_valid() 723 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); in hns_roce_v2_post_recv() local 734 spin_lock_irqsave(&hr_qp->rq.lock, flags); in hns_roce_v2_post_recv() [all …]
|
| H A D | hns_roce_main.c | 823 struct hns_roce_qp *hr_qp; in hns_roce_handle_device_err() local 832 list_for_each_entry(hr_qp, &hr_dev->qp_list, node) { in hns_roce_handle_device_err() 833 spin_lock_irqsave(&hr_qp->sq.lock, flags_qp); in hns_roce_handle_device_err() 834 if (hr_qp->sq.tail != hr_qp->sq.head) in hns_roce_handle_device_err() 835 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq); in hns_roce_handle_device_err() 836 spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp); in hns_roce_handle_device_err() 838 spin_lock_irqsave(&hr_qp->rq.lock, flags_qp); in hns_roce_handle_device_err() 839 if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head)) in hns_roce_handle_device_err() 840 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq); in hns_roce_handle_device_err() 841 spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp); in hns_roce_handle_device_err()
|
| H A D | hns_roce_device.h | 932 struct hns_roce_qp *hr_qp); 1248 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 1249 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n); 1250 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n); 1251 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n); 1259 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 1260 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|