Lines Matching refs:qp
105 static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_tunnel_qp() argument
110 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
111 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
115 static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_sqp() argument
122 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
123 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
129 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy || in is_sqp()
130 qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) { in is_sqp()
139 return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP); in is_sqp()
143 static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in is_qp0() argument
150 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
151 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
157 if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) { in is_qp0()
166 static void *get_wqe(struct mlx4_ib_qp *qp, int offset) in get_wqe() argument
168 return mlx4_buf_offset(&qp->buf, offset); in get_wqe()
171 static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) in get_recv_wqe() argument
173 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
176 static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) in get_send_wqe() argument
178 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); in get_send_wqe()
186 static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) in stamp_send_wqe() argument
194 buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe()
203 static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_qp_event() argument
206 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; in mlx4_ib_qp_event()
209 to_mibqp(qp)->port = to_mibqp(qp)->alt_port; in mlx4_ib_qp_event()
213 event.element.qp = ibqp; in mlx4_ib_qp_event()
241 "on QP %06x\n", type, qp->qpn); in mlx4_ib_qp_event()
249 static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type) in mlx4_ib_wq_event() argument
252 type, qp->qpn); in mlx4_ib_wq_event()
302 bool is_user, bool has_rq, struct mlx4_ib_qp *qp, in set_rq_size() argument
314 qp->rq.wqe_cnt = qp->rq.max_gs = 0; in set_rq_size()
325 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); in set_rq_size()
326 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); in set_rq_size()
327 wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg); in set_rq_size()
328 qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz)); in set_rq_size()
333 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
334 cap->max_recv_sge = qp->rq.max_gs; in set_rq_size()
336 cap->max_recv_wr = qp->rq.max_post = in set_rq_size()
337 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); in set_rq_size()
338 cap->max_recv_sge = min(qp->rq.max_gs, in set_rq_size()
347 enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) in set_kernel_sq_size() argument
354 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + in set_kernel_sq_size()
369 send_wqe_overhead(type, qp->flags); in set_kernel_sq_size()
374 qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); in set_kernel_sq_size()
380 qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift); in set_kernel_sq_size()
381 qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + in set_kernel_sq_size()
382 qp->sq_spare_wqes); in set_kernel_sq_size()
384 qp->sq.max_gs = in set_kernel_sq_size()
386 (1 << qp->sq.wqe_shift)) - in set_kernel_sq_size()
387 send_wqe_overhead(type, qp->flags)) / in set_kernel_sq_size()
390 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_kernel_sq_size()
391 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_kernel_sq_size()
392 if (qp->rq.wqe_shift > qp->sq.wqe_shift) { in set_kernel_sq_size()
393 qp->rq.offset = 0; in set_kernel_sq_size()
394 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; in set_kernel_sq_size()
396 qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; in set_kernel_sq_size()
397 qp->sq.offset = 0; in set_kernel_sq_size()
400 cap->max_send_wr = qp->sq.max_post = in set_kernel_sq_size()
401 qp->sq.wqe_cnt - qp->sq_spare_wqes; in set_kernel_sq_size()
402 cap->max_send_sge = min(qp->sq.max_gs, in set_kernel_sq_size()
412 struct mlx4_ib_qp *qp, in set_user_sq_size() argument
422 qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in set_user_sq_size()
423 qp->sq.wqe_shift = ucmd->log_sq_stride; in set_user_sq_size()
425 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in set_user_sq_size()
426 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in set_user_sq_size()
431 static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in alloc_proxy_bufs() argument
435 qp->sqp_proxy_rcv = in alloc_proxy_bufs()
436 kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf), in alloc_proxy_bufs()
438 if (!qp->sqp_proxy_rcv) in alloc_proxy_bufs()
440 for (i = 0; i < qp->rq.wqe_cnt; i++) { in alloc_proxy_bufs()
441 qp->sqp_proxy_rcv[i].addr = in alloc_proxy_bufs()
444 if (!qp->sqp_proxy_rcv[i].addr) in alloc_proxy_bufs()
446 qp->sqp_proxy_rcv[i].map = in alloc_proxy_bufs()
447 ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, in alloc_proxy_bufs()
450 if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { in alloc_proxy_bufs()
451 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
460 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in alloc_proxy_bufs()
463 kfree(qp->sqp_proxy_rcv[i].addr); in alloc_proxy_bufs()
465 kfree(qp->sqp_proxy_rcv); in alloc_proxy_bufs()
466 qp->sqp_proxy_rcv = NULL; in alloc_proxy_bufs()
470 static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) in free_proxy_bufs() argument
474 for (i = 0; i < qp->rq.wqe_cnt; i++) { in free_proxy_bufs()
475 ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, in free_proxy_bufs()
478 kfree(qp->sqp_proxy_rcv[i].addr); in free_proxy_bufs()
480 kfree(qp->sqp_proxy_rcv); in free_proxy_bufs()
502 struct mlx4_ib_qp *qp) in mlx4_ib_free_qp_counter() argument
504 mutex_lock(&dev->counters_table[qp->port - 1].mutex); in mlx4_ib_free_qp_counter()
505 mlx4_counter_free(dev->dev, qp->counter_index->index); in mlx4_ib_free_qp_counter()
506 list_del(&qp->counter_index->list); in mlx4_ib_free_qp_counter()
507 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); in mlx4_ib_free_qp_counter()
509 kfree(qp->counter_index); in mlx4_ib_free_qp_counter()
510 qp->counter_index = NULL; in mlx4_ib_free_qp_counter()
618 struct mlx4_ib_qp *qp) in create_qp_rss() argument
623 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_qp_rss()
625 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage); in create_qp_rss()
629 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_qp_rss()
633 INIT_LIST_HEAD(&qp->gid_list); in create_qp_rss()
634 INIT_LIST_HEAD(&qp->steering_rules); in create_qp_rss()
636 qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; in create_qp_rss()
637 qp->state = IB_QPS_RESET; in create_qp_rss()
640 qp->sq_no_prefetch = 1; in create_qp_rss()
641 qp->sq.wqe_cnt = 1; in create_qp_rss()
642 qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; in create_qp_rss()
643 qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE; in create_qp_rss()
644 qp->mtt = (to_mqp( in create_qp_rss()
647 qp->rss_ctx = kzalloc(sizeof(*qp->rss_ctx), GFP_KERNEL); in create_qp_rss()
648 if (!qp->rss_ctx) { in create_qp_rss()
653 err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd); in create_qp_rss()
660 kfree(qp->rss_ctx); in create_qp_rss()
663 mlx4_qp_remove(dev->dev, &qp->mqp); in create_qp_rss()
664 mlx4_qp_free(dev->dev, &qp->mqp); in create_qp_rss()
671 static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp, in _mlx4_ib_create_qp_rss() argument
728 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp_rss()
729 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp_rss()
731 err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp); in _mlx4_ib_create_qp_rss()
735 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp_rss()
745 struct mlx4_ib_qp *qp, int range_size, int *wqn) in mlx4_ib_alloc_wqn() argument
765 qp->mqp.usage); in mlx4_ib_alloc_wqn()
782 qp->wqn_range = range; in mlx4_ib_alloc_wqn()
795 struct mlx4_ib_qp *qp, bool dirty_release) in mlx4_ib_release_wqn() argument
802 range = qp->wqn_range; in mlx4_ib_release_wqn()
823 struct ib_udata *udata, struct mlx4_ib_qp *qp) in create_rq() argument
838 qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; in create_rq()
840 spin_lock_init(&qp->sq.lock); in create_rq()
841 spin_lock_init(&qp->rq.lock); in create_rq()
842 INIT_LIST_HEAD(&qp->gid_list); in create_rq()
843 INIT_LIST_HEAD(&qp->steering_rules); in create_rq()
845 qp->state = IB_QPS_RESET; in create_rq()
870 qp->flags |= MLX4_IB_QP_SCATTER_FCS; in create_rq()
872 err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz); in create_rq()
876 qp->sq_no_prefetch = 1; in create_rq()
877 qp->sq.wqe_cnt = 1; in create_rq()
878 qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; in create_rq()
879 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + in create_rq()
880 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in create_rq()
882 qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0); in create_rq()
883 if (IS_ERR(qp->umem)) { in create_rq()
884 err = PTR_ERR(qp->umem); in create_rq()
888 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); in create_rq()
889 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); in create_rq()
894 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_rq()
898 err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db); in create_rq()
901 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_rq()
903 err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn); in create_rq()
907 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_rq()
916 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_rq()
918 qp->mqp.event = mlx4_ib_wq_event; in create_rq()
926 list_add_tail(&qp->qps_list, &dev->qp_list); in create_rq()
931 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_rq()
933 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_rq()
940 mlx4_ib_release_wqn(context, qp, 0); in create_rq()
942 mlx4_ib_db_unmap_user(context, &qp->db); in create_rq()
945 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_rq()
947 ib_umem_release(qp->umem); in create_rq()
954 struct mlx4_ib_qp *qp) in create_qp_common() argument
1011 qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL); in create_qp_common()
1012 if (!qp->sqp) in create_qp_common()
1016 qp->mlx4_ib_qp_type = qp_type; in create_qp_common()
1018 spin_lock_init(&qp->sq.lock); in create_qp_common()
1019 spin_lock_init(&qp->rq.lock); in create_qp_common()
1020 INIT_LIST_HEAD(&qp->gid_list); in create_qp_common()
1021 INIT_LIST_HEAD(&qp->steering_rules); in create_qp_common()
1023 qp->state = IB_QPS_RESET; in create_qp_common()
1025 qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); in create_qp_common()
1040 qp->inl_recv_sz = ucmd.inl_recv_sz; in create_qp_common()
1050 qp->flags |= MLX4_IB_QP_SCATTER_FCS; in create_qp_common()
1054 qp_has_rq(init_attr), qp, qp->inl_recv_sz); in create_qp_common()
1058 qp->sq_no_prefetch = ucmd.sq_no_prefetch; in create_qp_common()
1060 err = set_user_sq_size(dev, qp, &ucmd); in create_qp_common()
1064 qp->umem = in create_qp_common()
1065 ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0); in create_qp_common()
1066 if (IS_ERR(qp->umem)) { in create_qp_common()
1067 err = PTR_ERR(qp->umem); in create_qp_common()
1071 shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); in create_qp_common()
1072 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); in create_qp_common()
1077 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common()
1082 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db); in create_qp_common()
1086 qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; in create_qp_common()
1089 qp_has_rq(init_attr), qp, 0); in create_qp_common()
1093 qp->sq_no_prefetch = 0; in create_qp_common()
1096 qp->flags |= MLX4_IB_QP_LSO; in create_qp_common()
1101 qp->flags |= MLX4_IB_QP_NETIF; in create_qp_common()
1108 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); in create_qp_common()
1113 err = mlx4_db_alloc(dev->dev, &qp->db, 0); in create_qp_common()
1117 *qp->db.db = 0; in create_qp_common()
1120 if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, in create_qp_common()
1121 &qp->buf)) { in create_qp_common()
1126 err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, in create_qp_common()
1127 &qp->mtt); in create_qp_common()
1131 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); in create_qp_common()
1135 qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, in create_qp_common()
1137 qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, in create_qp_common()
1139 if (!qp->sq.wrid || !qp->rq.wrid) { in create_qp_common()
1143 qp->mqp.usage = MLX4_RES_USAGE_DRIVER; in create_qp_common()
1147 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in create_qp_common()
1149 if (alloc_proxy_bufs(pd->device, qp)) { in create_qp_common()
1164 qp->mqp.usage); in create_qp_common()
1166 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
1170 &qpn, 0, qp->mqp.usage); in create_qp_common()
1176 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; in create_qp_common()
1178 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); in create_qp_common()
1183 qp->mqp.qpn |= (1 << 23); in create_qp_common()
1190 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
1192 qp->mqp.event = mlx4_ib_qp_event; in create_qp_common()
1200 list_add_tail(&qp->qps_list, &dev->qp_list); in create_qp_common()
1205 list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); in create_qp_common()
1207 list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); in create_qp_common()
1215 if (qp->flags & MLX4_IB_QP_NETIF) in create_qp_common()
1221 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in create_qp_common()
1222 free_proxy_bufs(pd->device, qp); in create_qp_common()
1226 mlx4_ib_db_unmap_user(context, &qp->db); in create_qp_common()
1228 kvfree(qp->sq.wrid); in create_qp_common()
1229 kvfree(qp->rq.wrid); in create_qp_common()
1233 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_qp_common()
1236 if (!qp->umem) in create_qp_common()
1237 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in create_qp_common()
1238 ib_umem_release(qp->umem); in create_qp_common()
1242 mlx4_db_free(dev->dev, &qp->db); in create_qp_common()
1245 kfree(qp->sqp); in create_qp_common()
1293 static void del_gid_entries(struct mlx4_ib_qp *qp) in del_gid_entries() argument
1297 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in del_gid_entries()
1303 static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) in get_pd() argument
1305 if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) in get_pd()
1306 return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); in get_pd()
1308 return to_mpd(qp->ibqp.pd); in get_pd()
1311 static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src, in get_cqs() argument
1314 switch (qp->ibqp.qp_type) { in get_cqs()
1316 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); in get_cqs()
1320 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1324 *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) : in get_cqs()
1325 to_mcq(qp->ibwq.cq); in get_cqs()
1326 *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) : in get_cqs()
1332 static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in destroy_qp_rss() argument
1334 if (qp->state != IB_QPS_RESET) { in destroy_qp_rss()
1337 for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size); in destroy_qp_rss()
1339 struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i]; in destroy_qp_rss()
1349 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_rss()
1350 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_rss()
1352 qp->mqp.qpn); in destroy_qp_rss()
1355 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_rss()
1356 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_rss()
1357 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_rss()
1358 del_gid_entries(qp); in destroy_qp_rss()
1361 static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, in destroy_qp_common() argument
1368 if (qp->state != IB_QPS_RESET) { in destroy_qp_common()
1369 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), in destroy_qp_common()
1370 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_common()
1372 qp->mqp.qpn); in destroy_qp_common()
1373 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in destroy_qp_common()
1374 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in destroy_qp_common()
1375 qp->pri.smac = 0; in destroy_qp_common()
1376 qp->pri.smac_port = 0; in destroy_qp_common()
1378 if (qp->alt.smac) { in destroy_qp_common()
1379 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in destroy_qp_common()
1380 qp->alt.smac = 0; in destroy_qp_common()
1382 if (qp->pri.vid < 0x1000) { in destroy_qp_common()
1383 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in destroy_qp_common()
1384 qp->pri.vid = 0xFFFF; in destroy_qp_common()
1385 qp->pri.candidate_vid = 0xFFFF; in destroy_qp_common()
1386 qp->pri.update_vid = 0; in destroy_qp_common()
1388 if (qp->alt.vid < 0x1000) { in destroy_qp_common()
1389 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in destroy_qp_common()
1390 qp->alt.vid = 0xFFFF; in destroy_qp_common()
1391 qp->alt.candidate_vid = 0xFFFF; in destroy_qp_common()
1392 qp->alt.update_vid = 0; in destroy_qp_common()
1396 get_cqs(qp, src, &send_cq, &recv_cq); in destroy_qp_common()
1402 list_del(&qp->qps_list); in destroy_qp_common()
1403 list_del(&qp->cq_send_list); in destroy_qp_common()
1404 list_del(&qp->cq_recv_list); in destroy_qp_common()
1406 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
1407 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); in destroy_qp_common()
1409 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1412 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_common()
1417 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_common()
1419 if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { in destroy_qp_common()
1420 if (qp->flags & MLX4_IB_QP_NETIF) in destroy_qp_common()
1421 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); in destroy_qp_common()
1428 qp, 1); in destroy_qp_common()
1430 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_common()
1433 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in destroy_qp_common()
1436 if (qp->rq.wqe_cnt) { in destroy_qp_common()
1443 mlx4_ib_db_unmap_user(mcontext, &qp->db); in destroy_qp_common()
1446 kvfree(qp->sq.wrid); in destroy_qp_common()
1447 kvfree(qp->rq.wrid); in destroy_qp_common()
1448 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in destroy_qp_common()
1450 free_proxy_bufs(&dev->ib_dev, qp); in destroy_qp_common()
1451 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); in destroy_qp_common()
1452 if (qp->rq.wqe_cnt) in destroy_qp_common()
1453 mlx4_db_free(dev->dev, &qp->db); in destroy_qp_common()
1455 ib_umem_release(qp->umem); in destroy_qp_common()
1457 del_gid_entries(qp); in destroy_qp_common()
1477 static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp, in _mlx4_ib_create_qp() argument
1486 return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata); in _mlx4_ib_create_qp()
1535 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp()
1536 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp()
1537 err = create_qp_common(pd, init_attr, udata, 0, qp); in _mlx4_ib_create_qp()
1541 qp->ibqp.qp_num = qp->mqp.qpn; in _mlx4_ib_create_qp()
1542 qp->xrcdn = xrcdn; in _mlx4_ib_create_qp()
1560 qp->pri.vid = 0xFFFF; in _mlx4_ib_create_qp()
1561 qp->alt.vid = 0xFFFF; in _mlx4_ib_create_qp()
1562 err = create_qp_common(pd, init_attr, udata, sqpn, qp); in _mlx4_ib_create_qp()
1566 qp->port = init_attr->port_num; in _mlx4_ib_create_qp()
1567 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : in _mlx4_ib_create_qp()
1583 struct mlx4_ib_qp *qp; in mlx4_ib_create_qp() local
1586 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in mlx4_ib_create_qp()
1587 if (!qp) in mlx4_ib_create_qp()
1590 mutex_init(&qp->mutex); in mlx4_ib_create_qp()
1591 ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata); in mlx4_ib_create_qp()
1593 kfree(qp); in mlx4_ib_create_qp()
1599 struct mlx4_ib_sqp *sqp = qp->sqp; in mlx4_ib_create_qp()
1618 return &qp->ibqp; in mlx4_ib_create_qp()
1621 static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) in _mlx4_ib_destroy_qp() argument
1623 struct mlx4_ib_dev *dev = to_mdev(qp->device); in _mlx4_ib_destroy_qp()
1624 struct mlx4_ib_qp *mqp = to_mqp(qp); in _mlx4_ib_destroy_qp()
1639 if (qp->rwq_ind_tbl) { in _mlx4_ib_destroy_qp()
1651 int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) in mlx4_ib_destroy_qp() argument
1653 struct mlx4_ib_qp *mqp = to_mqp(qp); in mlx4_ib_destroy_qp()
1662 return _mlx4_ib_destroy_qp(qp, udata); in mlx4_ib_destroy_qp()
1689 static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, in to_mlx4_access_flags() argument
1699 dest_rd_atomic = qp->resp_depth; in to_mlx4_access_flags()
1704 access_flags = qp->atomic_rd_en; in to_mlx4_access_flags()
1853 static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, in mlx4_set_path() argument
1859 return _mlx4_set_path(dev, &qp->ah_attr, in mlx4_set_path()
1866 const struct ib_qp_attr *qp, in mlx4_set_alt_path() argument
1871 return _mlx4_set_path(dev, &qp->alt_ah_attr, in mlx4_set_alt_path()
1877 static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in update_mcg_macs() argument
1881 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { in update_mcg_macs()
1882 if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { in update_mcg_macs()
1884 ge->port = qp->port; in update_mcg_macs()
1890 struct mlx4_ib_qp *qp, in handle_eth_ud_smac_index() argument
1896 u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); in handle_eth_ud_smac_index()
1898 context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); in handle_eth_ud_smac_index()
1899 if (!qp->pri.smac && !qp->pri.smac_port) { in handle_eth_ud_smac_index()
1900 smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); in handle_eth_ud_smac_index()
1902 qp->pri.candidate_smac_index = smac_index; in handle_eth_ud_smac_index()
1903 qp->pri.candidate_smac = u64_mac; in handle_eth_ud_smac_index()
1904 qp->pri.candidate_smac_port = qp->port; in handle_eth_ud_smac_index()
1913 static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) in create_qp_lb_counter() argument
1919 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) != in create_qp_lb_counter()
1921 !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) || in create_qp_lb_counter()
1937 qp->counter_index = new_counter_index; in create_qp_lb_counter()
1939 mutex_lock(&dev->counters_table[qp->port - 1].mutex); in create_qp_lb_counter()
1941 &dev->counters_table[qp->port - 1].counters_list); in create_qp_lb_counter()
1942 mutex_unlock(&dev->counters_table[qp->port - 1].mutex); in create_qp_lb_counter()
2051 struct mlx4_ib_qp *qp) in fill_qp_rss_context() argument
2058 rss_context->base_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz); in fill_qp_rss_context()
2060 cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz & 0xffffff); in fill_qp_rss_context()
2061 if (qp->rss_ctx->flags & (MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6)) in fill_qp_rss_context()
2063 rss_context->flags = qp->rss_ctx->flags; in fill_qp_rss_context()
2067 memcpy(rss_context->rss_key, qp->rss_ctx->rss_key, in fill_qp_rss_context()
2082 struct mlx4_ib_qp *qp; in __mlx4_ib_modify_qp() local
2101 qp = to_mqp((struct ib_qp *)ibwq); in __mlx4_ib_modify_qp()
2111 qp = to_mqp(ibqp); in __mlx4_ib_modify_qp()
2113 pd = get_pd(qp); in __mlx4_ib_modify_qp()
2118 rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2127 (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); in __mlx4_ib_modify_qp()
2146 if (qp->inl_recv_sz) in __mlx4_ib_modify_qp()
2149 if (qp->flags & MLX4_IB_QP_SCATTER_FCS) in __mlx4_ib_modify_qp()
2157 if (qp->flags & MLX4_IB_QP_LSO) in __mlx4_ib_modify_qp()
2173 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
2174 context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
2175 context->rq_size_stride |= qp->rq.wqe_shift - 4; in __mlx4_ib_modify_qp()
2178 if (qp->sq.wqe_cnt) in __mlx4_ib_modify_qp()
2179 context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; in __mlx4_ib_modify_qp()
2180 context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mlx4_ib_modify_qp()
2182 if (new_state == IB_QPS_RESET && qp->counter_index) in __mlx4_ib_modify_qp()
2183 mlx4_ib_free_qp_counter(dev, qp); in __mlx4_ib_modify_qp()
2186 context->sq_size_stride |= !!qp->sq_no_prefetch << 7; in __mlx4_ib_modify_qp()
2187 context->xrcd = cpu_to_be32((u32) qp->xrcdn); in __mlx4_ib_modify_qp()
2211 err = create_qp_lb_counter(dev, qp); in __mlx4_ib_modify_qp()
2216 dev->counters_table[qp->port - 1].default_counter; in __mlx4_ib_modify_qp()
2217 if (qp->counter_index) in __mlx4_ib_modify_qp()
2218 counter_index = qp->counter_index->index; in __mlx4_ib_modify_qp()
2223 if (qp->counter_index) { in __mlx4_ib_modify_qp()
2233 if (qp->flags & MLX4_IB_QP_NETIF) { in __mlx4_ib_modify_qp()
2234 mlx4_ib_steer_qp_reg(dev, qp, 1); in __mlx4_ib_modify_qp()
2239 enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ? in __mlx4_ib_modify_qp()
2248 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
2256 attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in __mlx4_ib_modify_qp()
2271 if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, in __mlx4_ib_modify_qp()
2305 if (mlx4_set_alt_path(dev, attr, attr_mask, qp, in __mlx4_ib_modify_qp()
2319 get_cqs(qp, src_type, &send_cq, &recv_cq); in __mlx4_ib_modify_qp()
2359 context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); in __mlx4_ib_modify_qp()
2375 if (qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
2380 !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && in __mlx4_ib_modify_qp()
2398 if (qp->rq.wqe_cnt && in __mlx4_ib_modify_qp()
2401 context->db_rec_addr = cpu_to_be64(qp->db.dma); in __mlx4_ib_modify_qp()
2407 context->pri_path.sched_queue = (qp->port - 1) << 6; in __mlx4_ib_modify_qp()
2408 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in __mlx4_ib_modify_qp()
2409 qp->mlx4_ib_qp_type & in __mlx4_ib_modify_qp()
2412 if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) in __mlx4_ib_modify_qp()
2415 if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) in __mlx4_ib_modify_qp()
2419 if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2421 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || in __mlx4_ib_modify_qp()
2422 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) in __mlx4_ib_modify_qp()
2425 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || in __mlx4_ib_modify_qp()
2426 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || in __mlx4_ib_modify_qp()
2427 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { in __mlx4_ib_modify_qp()
2428 err = handle_eth_ud_smac_index(dev, qp, context); in __mlx4_ib_modify_qp()
2433 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) in __mlx4_ib_modify_qp()
2434 dev->qp1_proxy[qp->port - 1] = qp; in __mlx4_ib_modify_qp()
2451 &dev->ib_dev, qp->port) == in __mlx4_ib_modify_qp()
2482 for (i = 0; i < qp->sq.wqe_cnt; ++i) { in __mlx4_ib_modify_qp()
2483 ctrl = get_send_wqe(qp, i); in __mlx4_ib_modify_qp()
2486 1 << (qp->sq.wqe_shift - 4); in __mlx4_ib_modify_qp()
2487 stamp_send_wqe(qp, i); in __mlx4_ib_modify_qp()
2494 fill_qp_rss_context(context, qp); in __mlx4_ib_modify_qp()
2498 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), in __mlx4_ib_modify_qp()
2500 sqd_event, &qp->mqp); in __mlx4_ib_modify_qp()
2504 qp->state = new_state; in __mlx4_ib_modify_qp()
2507 qp->atomic_rd_en = attr->qp_access_flags; in __mlx4_ib_modify_qp()
2509 qp->resp_depth = attr->max_dest_rd_atomic; in __mlx4_ib_modify_qp()
2511 qp->port = attr->port_num; in __mlx4_ib_modify_qp()
2512 update_mcg_macs(dev, qp); in __mlx4_ib_modify_qp()
2515 qp->alt_port = attr->alt_port_num; in __mlx4_ib_modify_qp()
2517 if (is_sqp(dev, qp)) in __mlx4_ib_modify_qp()
2518 store_sqp_attrs(qp->sqp, attr, attr_mask); in __mlx4_ib_modify_qp()
2524 if (is_qp0(dev, qp)) { in __mlx4_ib_modify_qp()
2526 if (mlx4_INIT_PORT(dev->dev, qp->port)) in __mlx4_ib_modify_qp()
2528 qp->port); in __mlx4_ib_modify_qp()
2532 mlx4_CLOSE_PORT(dev->dev, qp->port); in __mlx4_ib_modify_qp()
2541 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in __mlx4_ib_modify_qp()
2544 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx4_ib_modify_qp()
2546 qp->rq.head = 0; in __mlx4_ib_modify_qp()
2547 qp->rq.tail = 0; in __mlx4_ib_modify_qp()
2548 qp->sq.head = 0; in __mlx4_ib_modify_qp()
2549 qp->sq.tail = 0; in __mlx4_ib_modify_qp()
2550 qp->sq_next_wqe = 0; in __mlx4_ib_modify_qp()
2551 if (qp->rq.wqe_cnt) in __mlx4_ib_modify_qp()
2552 *qp->db.db = 0; in __mlx4_ib_modify_qp()
2554 if (qp->flags & MLX4_IB_QP_NETIF) in __mlx4_ib_modify_qp()
2555 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
2557 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { in __mlx4_ib_modify_qp()
2558 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
2559 qp->pri.smac = 0; in __mlx4_ib_modify_qp()
2560 qp->pri.smac_port = 0; in __mlx4_ib_modify_qp()
2562 if (qp->alt.smac) { in __mlx4_ib_modify_qp()
2563 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
2564 qp->alt.smac = 0; in __mlx4_ib_modify_qp()
2566 if (qp->pri.vid < 0x1000) { in __mlx4_ib_modify_qp()
2567 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); in __mlx4_ib_modify_qp()
2568 qp->pri.vid = 0xFFFF; in __mlx4_ib_modify_qp()
2569 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2570 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
2573 if (qp->alt.vid < 0x1000) { in __mlx4_ib_modify_qp()
2574 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); in __mlx4_ib_modify_qp()
2575 qp->alt.vid = 0xFFFF; in __mlx4_ib_modify_qp()
2576 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2577 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
2581 if (err && qp->counter_index) in __mlx4_ib_modify_qp()
2582 mlx4_ib_free_qp_counter(dev, qp); in __mlx4_ib_modify_qp()
2584 mlx4_ib_steer_qp_reg(dev, qp, 0); in __mlx4_ib_modify_qp()
2586 if (qp->pri.candidate_smac || in __mlx4_ib_modify_qp()
2587 (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { in __mlx4_ib_modify_qp()
2589 mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); in __mlx4_ib_modify_qp()
2591 if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) in __mlx4_ib_modify_qp()
2592 mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); in __mlx4_ib_modify_qp()
2593 qp->pri.smac = qp->pri.candidate_smac; in __mlx4_ib_modify_qp()
2594 qp->pri.smac_index = qp->pri.candidate_smac_index; in __mlx4_ib_modify_qp()
2595 qp->pri.smac_port = qp->pri.candidate_smac_port; in __mlx4_ib_modify_qp()
2597 qp->pri.candidate_smac = 0; in __mlx4_ib_modify_qp()
2598 qp->pri.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
2599 qp->pri.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
2601 if (qp->alt.candidate_smac) { in __mlx4_ib_modify_qp()
2603 mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac); in __mlx4_ib_modify_qp()
2605 if (qp->alt.smac) in __mlx4_ib_modify_qp()
2606 mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); in __mlx4_ib_modify_qp()
2607 qp->alt.smac = qp->alt.candidate_smac; in __mlx4_ib_modify_qp()
2608 qp->alt.smac_index = qp->alt.candidate_smac_index; in __mlx4_ib_modify_qp()
2609 qp->alt.smac_port = qp->alt.candidate_smac_port; in __mlx4_ib_modify_qp()
2611 qp->alt.candidate_smac = 0; in __mlx4_ib_modify_qp()
2612 qp->alt.candidate_smac_index = 0; in __mlx4_ib_modify_qp()
2613 qp->alt.candidate_smac_port = 0; in __mlx4_ib_modify_qp()
2616 if (qp->pri.update_vid) { in __mlx4_ib_modify_qp()
2618 if (qp->pri.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
2619 mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, in __mlx4_ib_modify_qp()
2620 qp->pri.candidate_vid); in __mlx4_ib_modify_qp()
2622 if (qp->pri.vid < 0x1000) in __mlx4_ib_modify_qp()
2623 mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, in __mlx4_ib_modify_qp()
2624 qp->pri.vid); in __mlx4_ib_modify_qp()
2625 qp->pri.vid = qp->pri.candidate_vid; in __mlx4_ib_modify_qp()
2626 qp->pri.vlan_port = qp->pri.candidate_vlan_port; in __mlx4_ib_modify_qp()
2627 qp->pri.vlan_index = qp->pri.candidate_vlan_index; in __mlx4_ib_modify_qp()
2629 qp->pri.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2630 qp->pri.update_vid = 0; in __mlx4_ib_modify_qp()
2633 if (qp->alt.update_vid) { in __mlx4_ib_modify_qp()
2635 if (qp->alt.candidate_vid < 0x1000) in __mlx4_ib_modify_qp()
2636 mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, in __mlx4_ib_modify_qp()
2637 qp->alt.candidate_vid); in __mlx4_ib_modify_qp()
2639 if (qp->alt.vid < 0x1000) in __mlx4_ib_modify_qp()
2640 mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, in __mlx4_ib_modify_qp()
2641 qp->alt.vid); in __mlx4_ib_modify_qp()
2642 qp->alt.vid = qp->alt.candidate_vid; in __mlx4_ib_modify_qp()
2643 qp->alt.vlan_port = qp->alt.candidate_vlan_port; in __mlx4_ib_modify_qp()
2644 qp->alt.vlan_index = qp->alt.candidate_vlan_index; in __mlx4_ib_modify_qp()
2646 qp->alt.candidate_vid = 0xFFFF; in __mlx4_ib_modify_qp()
2647 qp->alt.update_vid = 0; in __mlx4_ib_modify_qp()
2662 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_modify_qp() local
2665 mutex_lock(&qp->mutex); in _mlx4_ib_modify_qp()
2667 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; in _mlx4_ib_modify_qp()
2732 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in _mlx4_ib_modify_qp()
2782 mutex_unlock(&qp->mutex); in _mlx4_ib_modify_qp()
2820 static int build_sriov_qp0_header(struct mlx4_ib_qp *qp, in build_sriov_qp0_header() argument
2824 struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device); in build_sriov_qp0_header()
2825 struct mlx4_ib_sqp *sqp = qp->sqp; in build_sriov_qp0_header()
2826 struct ib_device *ib_dev = qp->ibqp.device; in build_sriov_qp0_header()
2848 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) in build_sriov_qp0_header()
2853 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { in build_sriov_qp0_header()
2870 err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey); in build_sriov_qp0_header()
2874 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) in build_sriov_qp0_header()
2878 cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel); in build_sriov_qp0_header()
2882 if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey)) in build_sriov_qp0_header()
2885 if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey)) in build_sriov_qp0_header()
2889 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn); in build_sriov_qp0_header()
2973 static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, in build_mlx_header() argument
2976 struct mlx4_ib_sqp *sqp = qp->sqp; in build_mlx_header()
2977 struct ib_device *ib_dev = qp->ibqp.device; in build_mlx_header()
3001 is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET; in build_mlx_header()
3015 err = fill_gid_by_hw_index(ibdev, qp->port, in build_mlx_header()
3067 .demux[qp->port - 1] in build_mlx_header()
3072 ->sriov.demux[qp->port - 1] in build_mlx_header()
3106 cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | in build_mlx_header()
3158 !qp->ibqp.qp_num ? in build_mlx_header()
3162 qp->port); in build_mlx_header()
3163 if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15) in build_mlx_header()
3169 if (!qp->ibqp.qp_num) in build_mlx_header()
3170 err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index, in build_mlx_header()
3173 err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index, in build_mlx_header()
3183 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num); in build_mlx_header()
3444 const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, in build_lso_seg() argument
3452 if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && in build_lso_seg()
3453 wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) in build_lso_seg()
3488 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_post_send() local
3505 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { in _mlx4_ib_post_send()
3506 struct mlx4_ib_sqp *sqp = qp->sqp; in _mlx4_ib_post_send()
3513 if (!fill_gid_by_hw_index(mdev, qp->port, in _mlx4_ib_post_send()
3516 qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? in _mlx4_ib_post_send()
3517 to_mqp(sqp->roce_v2_gsi) : qp; in _mlx4_ib_post_send()
3524 spin_lock_irqsave(&qp->sq.lock, flags); in _mlx4_ib_post_send()
3533 ind = qp->sq_next_wqe; in _mlx4_ib_post_send()
3539 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in _mlx4_ib_post_send()
3545 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in _mlx4_ib_post_send()
3551 ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in _mlx4_ib_post_send()
3552 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in _mlx4_ib_post_send()
3562 qp->sq_signal_bits; in _mlx4_ib_post_send()
3569 switch (qp->mlx4_ib_qp_type) { in _mlx4_ib_post_send()
3633 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, in _mlx4_ib_post_send()
3657 err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, in _mlx4_ib_post_send()
3670 err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, in _mlx4_ib_post_send()
3694 qp->mlx4_ib_qp_type); in _mlx4_ib_post_send()
3704 err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen); in _mlx4_ib_post_send()
3729 if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || in _mlx4_ib_post_send()
3730 qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || in _mlx4_ib_post_send()
3731 qp->mlx4_ib_qp_type & in _mlx4_ib_post_send()
3765 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; in _mlx4_ib_post_send()
3773 stamp_send_wqe(qp, ind + qp->sq_spare_wqes); in _mlx4_ib_post_send()
3779 qp->sq.head += nreq; in _mlx4_ib_post_send()
3787 writel_relaxed(qp->doorbell_qpn, in _mlx4_ib_post_send()
3790 stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1); in _mlx4_ib_post_send()
3792 qp->sq_next_wqe = ind; in _mlx4_ib_post_send()
3795 spin_unlock_irqrestore(&qp->sq.lock, flags); in _mlx4_ib_post_send()
3809 struct mlx4_ib_qp *qp = to_mqp(ibqp); in _mlx4_ib_post_recv() local
3819 max_gs = qp->rq.max_gs; in _mlx4_ib_post_recv()
3820 spin_lock_irqsave(&qp->rq.lock, flags); in _mlx4_ib_post_recv()
3830 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in _mlx4_ib_post_recv()
3833 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in _mlx4_ib_post_recv()
3839 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in _mlx4_ib_post_recv()
3845 scat = get_recv_wqe(qp, ind); in _mlx4_ib_post_recv()
3847 if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | in _mlx4_ib_post_recv()
3850 qp->sqp_proxy_rcv[ind].map, in _mlx4_ib_post_recv()
3857 scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); in _mlx4_ib_post_recv()
3871 qp->rq.wrid[ind] = wr->wr_id; in _mlx4_ib_post_recv()
3873 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in _mlx4_ib_post_recv()
3878 qp->rq.head += nreq; in _mlx4_ib_post_recv()
3886 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in _mlx4_ib_post_recv()
3889 spin_unlock_irqrestore(&qp->rq.lock, flags); in _mlx4_ib_post_recv()
3977 struct mlx4_ib_qp *qp = to_mqp(ibqp); in mlx4_ib_query_qp() local
3985 mutex_lock(&qp->mutex); in mlx4_ib_query_qp()
3987 if (qp->state == IB_QPS_RESET) { in mlx4_ib_query_qp()
3992 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); in mlx4_ib_query_qp()
4000 qp->state = to_ib_qp_state(mlx4_state); in mlx4_ib_query_qp()
4001 qp_attr->qp_state = qp->state; in mlx4_ib_query_qp()
4012 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { in mlx4_ib_query_qp()
4022 qp_attr->port_num = qp->port; in mlx4_ib_query_qp()
4042 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; in mlx4_ib_query_qp()
4043 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mlx4_ib_query_qp()
4046 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; in mlx4_ib_query_qp()
4047 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mlx4_ib_query_qp()
4062 if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) in mlx4_ib_query_qp()
4065 if (qp->flags & MLX4_IB_QP_LSO) in mlx4_ib_query_qp()
4068 if (qp->flags & MLX4_IB_QP_NETIF) in mlx4_ib_query_qp()
4072 qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? in mlx4_ib_query_qp()
4076 mutex_unlock(&qp->mutex); in mlx4_ib_query_qp()
4086 struct mlx4_ib_qp *qp; in mlx4_ib_create_wq() local
4122 qp = kzalloc(sizeof(*qp), GFP_KERNEL); in mlx4_ib_create_wq()
4123 if (!qp) in mlx4_ib_create_wq()
4126 mutex_init(&qp->mutex); in mlx4_ib_create_wq()
4127 qp->pri.vid = 0xFFFF; in mlx4_ib_create_wq()
4128 qp->alt.vid = 0xFFFF; in mlx4_ib_create_wq()
4140 err = create_rq(pd, &ib_qp_init_attr, udata, qp); in mlx4_ib_create_wq()
4142 kfree(qp); in mlx4_ib_create_wq()
4146 qp->ibwq.event_handler = init_attr->event_handler; in mlx4_ib_create_wq()
4147 qp->ibwq.wq_num = qp->mqp.qpn; in mlx4_ib_create_wq()
4148 qp->ibwq.state = IB_WQS_RESET; in mlx4_ib_create_wq()
4150 return &qp->ibwq; in mlx4_ib_create_wq()
4168 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in _mlx4_ib_modify_wq() local
4177 qp_cur_state = qp->state; in _mlx4_ib_modify_wq()
4186 attr.port_num = qp->port; in _mlx4_ib_modify_wq()
4216 qp->state = qp_new_state; in _mlx4_ib_modify_wq()
4224 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in mlx4_ib_modify_wq() local
4261 mutex_lock(&qp->mutex); in mlx4_ib_modify_wq()
4266 if (qp->rss_usecnt) in mlx4_ib_modify_wq()
4272 mutex_unlock(&qp->mutex); in mlx4_ib_modify_wq()
4280 struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); in mlx4_ib_destroy_wq() local
4282 if (qp->counter_index) in mlx4_ib_destroy_wq()
4283 mlx4_ib_free_qp_counter(dev, qp); in mlx4_ib_destroy_wq()
4285 destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata); in mlx4_ib_destroy_wq()
4287 kfree(qp); in mlx4_ib_destroy_wq()
4407 void mlx4_ib_drain_sq(struct ib_qp *qp) in mlx4_ib_drain_sq() argument
4409 struct ib_cq *cq = qp->send_cq; in mlx4_ib_drain_sq()
4421 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_drain_sq()
4424 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); in mlx4_ib_drain_sq()
4433 ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true); in mlx4_ib_drain_sq()
4442 void mlx4_ib_drain_rq(struct ib_qp *qp) in mlx4_ib_drain_rq() argument
4444 struct ib_cq *cq = qp->recv_cq; in mlx4_ib_drain_rq()
4450 struct mlx4_ib_dev *dev = to_mdev(qp->device); in mlx4_ib_drain_rq()
4453 ret = ib_modify_qp(qp, &attr, IB_QP_STATE); in mlx4_ib_drain_rq()
4463 ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true); in mlx4_ib_drain_rq()