Lines Matching +full:4 +full:- +full:wire
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
28 /* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
30 * @sq - SQ buffer.
43 idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1); in handle_post_send_edge()
46 *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx); in handle_post_send_edge()
49 /* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
51 * @sq - SQ buffer.
63 size_t leftlen = *cur_edge - *seg; in memcpy_send_wqe()
69 n -= copysz; in memcpy_send_wqe()
73 *wqe_sz += stride >> 4; in memcpy_send_wqe()
84 cur = wq->head - wq->tail; in mlx5_wq_overflow()
85 if (likely(cur + nreq < wq->max_post)) in mlx5_wq_overflow()
89 spin_lock(&cq->lock); in mlx5_wq_overflow()
90 cur = wq->head - wq->tail; in mlx5_wq_overflow()
91 spin_unlock(&cq->lock); in mlx5_wq_overflow()
93 return cur + nreq >= wq->max_post; in mlx5_wq_overflow()
99 rseg->raddr = cpu_to_be64(remote_addr); in set_raddr_seg()
100 rseg->rkey = cpu_to_be32(rkey); in set_raddr_seg()
101 rseg->reserved = 0; in set_raddr_seg()
111 if (wr->send_flags & IB_SEND_IP_CSUM) in set_eth_seg()
112 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | in set_eth_seg()
115 if (wr->opcode == IB_WR_LSO) { in set_eth_seg()
118 void *pdata = ud_wr->header; in set_eth_seg()
121 left = ud_wr->hlen; in set_eth_seg()
122 eseg->mss = cpu_to_be16(ud_wr->mss); in set_eth_seg()
123 eseg->inline_hdr.sz = cpu_to_be16(left); in set_eth_seg()
129 copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start, in set_eth_seg()
131 memcpy(eseg->inline_hdr.start, pdata, copysz); in set_eth_seg()
132 stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) - in set_eth_seg()
133 sizeof(eseg->inline_hdr.start) + copysz, 16); in set_eth_seg()
138 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_eth_seg()
139 left -= copysz; in set_eth_seg()
141 memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata, in set_eth_seg()
155 memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av)); in set_datagram_seg()
156 dseg->av.dqp_dct = in set_datagram_seg()
157 cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg()
158 dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey); in set_datagram_seg()
163 dseg->byte_count = cpu_to_be32(sg->length); in set_data_ptr_seg()
164 dseg->lkey = cpu_to_be32(sg->lkey); in set_data_ptr_seg()
165 dseg->addr = cpu_to_be64(sg->addr); in set_data_ptr_seg()
220 int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; in set_reg_umr_seg()
224 umr->flags = flags; in set_reg_umr_seg()
225 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); in set_reg_umr_seg()
226 umr->mkey_mask = frwr_mkey_mask(atomic); in set_reg_umr_seg()
232 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); in set_linv_umr_seg()
233 umr->flags = MLX5_UMR_INLINE; in set_linv_umr_seg()
301 MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) in umr_check_mkey_mask()
302 return -EPERM; in umr_check_mkey_mask()
305 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) in umr_check_mkey_mask()
306 return -EPERM; in umr_check_mkey_mask()
309 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in umr_check_mkey_mask()
310 return -EPERM; in umr_check_mkey_mask()
313 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in umr_check_mkey_mask()
314 return -EPERM; in umr_check_mkey_mask()
327 if (!umrwr->ignore_free_state) { in set_reg_umr_segment()
328 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE) in set_reg_umr_segment()
330 umr->flags = MLX5_UMR_CHECK_FREE; in set_reg_umr_segment()
333 umr->flags = MLX5_UMR_CHECK_NOT_FREE; in set_reg_umr_segment()
336 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); in set_reg_umr_segment()
337 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) { in set_reg_umr_segment()
338 u64 offset = get_xlt_octo(umrwr->offset); in set_reg_umr_segment()
340 umr->xlt_offset = cpu_to_be16(offset & 0xffff); in set_reg_umr_segment()
341 umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16); in set_reg_umr_segment()
342 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN; in set_reg_umr_segment()
344 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) in set_reg_umr_segment()
345 umr->mkey_mask |= get_umr_update_translation_mask(); in set_reg_umr_segment()
346 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) { in set_reg_umr_segment()
347 umr->mkey_mask |= get_umr_update_access_mask( in set_reg_umr_segment()
348 !!(MLX5_CAP_GEN(dev->mdev, atomic)), in set_reg_umr_segment()
349 !!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)), in set_reg_umr_segment()
350 !!(MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))); in set_reg_umr_segment()
351 umr->mkey_mask |= get_umr_update_pd_mask(); in set_reg_umr_segment()
353 if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR) in set_reg_umr_segment()
354 umr->mkey_mask |= get_umr_enable_mr_mask(); in set_reg_umr_segment()
355 if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) in set_reg_umr_segment()
356 umr->mkey_mask |= get_umr_disable_mr_mask(); in set_reg_umr_segment()
358 if (!wr->num_sge) in set_reg_umr_segment()
359 umr->flags |= MLX5_UMR_INLINE; in set_reg_umr_segment()
361 return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask)); in set_reg_umr_segment()
377 int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1; in set_reg_mkey_seg()
381 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT) in set_reg_mkey_seg()
382 seg->log2_page_size = ilog2(mr->ibmr.page_size); in set_reg_mkey_seg()
383 else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) in set_reg_mkey_seg()
387 seg->flags = get_umr_flags(access) | mr->access_mode; in set_reg_mkey_seg()
388 seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00); in set_reg_mkey_seg()
389 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); in set_reg_mkey_seg()
390 seg->start_addr = cpu_to_be64(mr->ibmr.iova); in set_reg_mkey_seg()
391 seg->len = cpu_to_be64(mr->ibmr.length); in set_reg_mkey_seg()
392 seg->xlt_oct_size = cpu_to_be32(ndescs); in set_reg_mkey_seg()
398 seg->status = MLX5_MKEY_STATUS_FREE; in set_linv_mkey_seg()
408 if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR) in set_reg_mkey_segment()
412 !!(umrwr->access_flags & IB_ACCESS_REMOTE_ATOMIC)); in set_reg_mkey_segment()
414 !!(umrwr->access_flags & IB_ACCESS_REMOTE_WRITE)); in set_reg_mkey_segment()
415 MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ)); in set_reg_mkey_segment()
416 MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE)); in set_reg_mkey_segment()
418 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) in set_reg_mkey_segment()
420 !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING)); in set_reg_mkey_segment()
421 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) in set_reg_mkey_segment()
423 !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING)); in set_reg_mkey_segment()
425 if (umrwr->pd) in set_reg_mkey_segment()
426 MLX5_SET(mkc, seg, pd, to_mpd(umrwr->pd)->pdn); in set_reg_mkey_segment()
427 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION && in set_reg_mkey_segment()
428 !umrwr->length) in set_reg_mkey_segment()
431 MLX5_SET64(mkc, seg, start_addr, umrwr->virt_addr); in set_reg_mkey_segment()
432 MLX5_SET64(mkc, seg, len, umrwr->length); in set_reg_mkey_segment()
433 MLX5_SET(mkc, seg, log_page_size, umrwr->page_shift); in set_reg_mkey_segment()
435 MLX5_SET(mkc, seg, mkey_7_0, mlx5_mkey_variant(umrwr->mkey)); in set_reg_mkey_segment()
442 int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs); in set_reg_data_seg()
444 dseg->addr = cpu_to_be64(mr->desc_map); in set_reg_data_seg()
445 dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64)); in set_reg_data_seg()
446 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); in set_reg_data_seg()
451 switch (wr->opcode) { in send_ieth()
454 return wr->ex.imm_data; in send_ieth()
457 return cpu_to_be32(wr->ex.invalidate_rkey); in send_ieth()
478 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); in wq_sig()
493 for (i = 0; i < wr->num_sge; i++) { in set_data_inl_seg()
494 size_t len = wr->sg_list[i].length; in set_data_inl_seg()
495 void *addr = (void *)(unsigned long)(wr->sg_list[i].addr); in set_data_inl_seg()
499 if (unlikely(inl > qp->max_inline_data)) in set_data_inl_seg()
500 return -ENOMEM; in set_data_inl_seg()
506 handle_post_send_edge(&qp->sq, wqe, in set_data_inl_seg()
507 *wqe_sz + (offset >> 4), in set_data_inl_seg()
510 leftlen = *cur_edge - *wqe; in set_data_inl_seg()
514 len -= copysz; in set_data_inl_seg()
521 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); in set_data_inl_seg()
523 *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16; in set_data_inl_seg()
554 inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID | in mlx5_fill_inl_bsf()
556 inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag); in mlx5_fill_inl_bsf()
557 inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag); in mlx5_fill_inl_bsf()
559 inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK; in mlx5_fill_inl_bsf()
560 inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ? in mlx5_fill_inl_bsf()
563 if (domain->sig.dif.ref_remap) in mlx5_fill_inl_bsf()
564 inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG; in mlx5_fill_inl_bsf()
566 if (domain->sig.dif.app_escape) { in mlx5_fill_inl_bsf()
567 if (domain->sig.dif.ref_escape) in mlx5_fill_inl_bsf()
568 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE; in mlx5_fill_inl_bsf()
570 inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE; in mlx5_fill_inl_bsf()
573 inl->dif_app_bitmask_check = in mlx5_fill_inl_bsf()
574 cpu_to_be16(domain->sig.dif.apptag_check_mask); in mlx5_fill_inl_bsf()
581 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig; in mlx5_set_bsf()
582 struct mlx5_bsf_basic *basic = &bsf->basic; in mlx5_set_bsf()
583 struct ib_sig_domain *mem = &sig_attrs->mem; in mlx5_set_bsf()
584 struct ib_sig_domain *wire = &sig_attrs->wire; in mlx5_set_bsf() local
589 basic->bsf_size_sbs = 1 << 7; in mlx5_set_bsf()
591 basic->check_byte_mask = sig_attrs->check_mask; in mlx5_set_bsf()
592 basic->raw_data_size = cpu_to_be32(data_size); in mlx5_set_bsf()
595 switch (sig_attrs->mem.sig_type) { in mlx5_set_bsf()
599 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval); in mlx5_set_bsf()
600 basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx); in mlx5_set_bsf()
601 mlx5_fill_inl_bsf(mem, &bsf->m_inl); in mlx5_set_bsf()
604 return -EINVAL; in mlx5_set_bsf()
607 /* Wire domain */ in mlx5_set_bsf()
608 switch (sig_attrs->wire.sig_type) { in mlx5_set_bsf()
612 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval && in mlx5_set_bsf()
613 mem->sig_type == wire->sig_type) { in mlx5_set_bsf()
615 basic->bsf_size_sbs |= 1 << 4; in mlx5_set_bsf()
616 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) in mlx5_set_bsf()
617 basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK; in mlx5_set_bsf()
618 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag) in mlx5_set_bsf()
619 basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK; in mlx5_set_bsf()
620 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag) in mlx5_set_bsf()
621 basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK; in mlx5_set_bsf()
623 basic->wire.bs_selector = in mlx5_set_bsf()
624 bs_selector(wire->sig.dif.pi_interval); in mlx5_set_bsf()
626 basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx); in mlx5_set_bsf()
627 mlx5_fill_inl_bsf(wire, &bsf->w_inl); in mlx5_set_bsf()
630 return -EINVAL; in mlx5_set_bsf()
654 struct mlx5_ib_mr *pi_mr = mr->pi_mr; in set_sig_data_segment()
656 data_len = pi_mr->data_length; in set_sig_data_segment()
657 data_key = pi_mr->ibmr.lkey; in set_sig_data_segment()
658 data_va = pi_mr->data_iova; in set_sig_data_segment()
659 if (pi_mr->meta_ndescs) { in set_sig_data_segment()
660 prot_len = pi_mr->meta_length; in set_sig_data_segment()
661 prot_key = pi_mr->ibmr.lkey; in set_sig_data_segment()
662 prot_va = pi_mr->pi_iova; in set_sig_data_segment()
672 * ------------------ in set_sig_data_segment()
674 * ------------------ in set_sig_data_segment()
676 * ------------------ in set_sig_data_segment()
680 data_klm->bcount = cpu_to_be32(data_len); in set_sig_data_segment()
681 data_klm->key = cpu_to_be32(data_key); in set_sig_data_segment()
682 data_klm->va = cpu_to_be64(data_va); in set_sig_data_segment()
688 * --------------------------- in set_sig_data_segment()
690 * --------------------------- in set_sig_data_segment()
692 * --------------------------- in set_sig_data_segment()
694 * --------------------------- in set_sig_data_segment()
696 * --------------------------- in set_sig_data_segment()
701 u16 block_size = sig_attrs->mem.sig.dif.pi_interval; in set_sig_data_segment()
708 prot_size = prot_field_size(sig_attrs->mem.sig_type); in set_sig_data_segment()
711 return -EINVAL; in set_sig_data_segment()
713 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size + in set_sig_data_segment()
715 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP); in set_sig_data_segment()
716 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size); in set_sig_data_segment()
717 sblock_ctrl->num_entries = cpu_to_be16(2); in set_sig_data_segment()
719 data_sentry->bcount = cpu_to_be16(block_size); in set_sig_data_segment()
720 data_sentry->key = cpu_to_be32(data_key); in set_sig_data_segment()
721 data_sentry->va = cpu_to_be64(data_va); in set_sig_data_segment()
722 data_sentry->stride = cpu_to_be16(block_size); in set_sig_data_segment()
724 prot_sentry->bcount = cpu_to_be16(prot_size); in set_sig_data_segment()
725 prot_sentry->key = cpu_to_be32(prot_key); in set_sig_data_segment()
726 prot_sentry->va = cpu_to_be64(prot_va); in set_sig_data_segment()
727 prot_sentry->stride = cpu_to_be16(prot_size); in set_sig_data_segment()
735 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_sig_data_segment()
740 return -EINVAL; in set_sig_data_segment()
744 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_sig_data_segment()
753 u32 sig_key = sig_mr->rkey; in set_sig_mkey_segment()
754 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1; in set_sig_mkey_segment()
758 seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS; in set_sig_mkey_segment()
759 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); in set_sig_mkey_segment()
760 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | in set_sig_mkey_segment()
762 seg->len = cpu_to_be64(length); in set_sig_mkey_segment()
763 seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size)); in set_sig_mkey_segment()
764 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); in set_sig_mkey_segment()
772 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE; in set_sig_umr_segment()
773 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); in set_sig_umr_segment()
774 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE); in set_sig_umr_segment()
775 umr->mkey_mask = sig_mkey_mask(); in set_sig_umr_segment()
783 struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr); in set_pi_umr_wr()
784 struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr; in set_pi_umr_wr()
785 struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs; in set_pi_umr_wr()
786 u32 pdn = to_mpd(qp->ibqp.pd)->pdn; in set_pi_umr_wr()
790 if (unlikely(send_wr->num_sge != 0) || in set_pi_umr_wr()
791 unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) || in set_pi_umr_wr()
792 unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) || in set_pi_umr_wr()
793 unlikely(!sig_mr->sig->sig_status_checked)) in set_pi_umr_wr()
794 return -EINVAL; in set_pi_umr_wr()
797 region_len = pi_mr->ibmr.length; in set_pi_umr_wr()
800 * KLM octoword size - if protection was provided in set_pi_umr_wr()
804 if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE) in set_pi_umr_wr()
812 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_pi_umr_wr()
814 set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len, in set_pi_umr_wr()
818 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_pi_umr_wr()
820 ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size, in set_pi_umr_wr()
825 sig_mr->sig->sig_status_checked = false; in set_pi_umr_wr()
835 psv_seg->psv_num = cpu_to_be32(psv_idx); in set_psv_wr()
836 switch (domain->sig_type) { in set_psv_wr()
840 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 | in set_psv_wr()
841 domain->sig.dif.app_tag); in set_psv_wr()
842 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag); in set_psv_wr()
846 domain->sig_type); in set_psv_wr()
847 return -EINVAL; in set_psv_wr()
861 struct mlx5_ib_mr *mr = to_mmr(wr->mr); in set_reg_wr()
862 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); in set_reg_wr()
863 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device); in set_reg_wr()
864 int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; in set_reg_wr()
866 bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC; in set_reg_wr()
870 if (!mlx5_ib_can_reconfig_with_umr(dev, 0, wr->access)) { in set_reg_wr()
872 to_mdev(qp->ibqp.device), in set_reg_wr()
874 return -EINVAL; in set_reg_wr()
877 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { in set_reg_wr()
878 mlx5_ib_warn(to_mdev(qp->ibqp.device), in set_reg_wr()
880 return -EINVAL; in set_reg_wr()
891 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_reg_wr()
893 set_reg_mkey_seg(*seg, mr, wr->key, wr->access); in set_reg_wr()
896 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_reg_wr()
899 memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs, in set_reg_wr()
901 *size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4); in set_reg_wr()
916 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_linv_wr()
920 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in set_linv_wr()
929 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) { in dump_wqe()
931 p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx); in dump_wqe()
934 idx = (idx + 1) & (qp->sq.wqe_cnt - 1); in dump_wqe()
948 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) in __begin_wqe()
949 return -ENOMEM; in __begin_wqe()
951 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); in __begin_wqe()
952 *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx); in __begin_wqe()
955 (*ctrl)->imm = send_ieth(wr); in __begin_wqe()
956 (*ctrl)->fm_ce_se = qp->sq_signal_bits | in __begin_wqe()
962 *cur_edge = qp->sq.cur_edge; in __begin_wqe()
973 wr->send_flags & IB_SEND_SIGNALED, in begin_wqe()
974 wr->send_flags & IB_SEND_SOLICITED); in begin_wqe()
985 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | in finish_wqe()
987 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); in finish_wqe()
988 ctrl->fm_ce_se |= fence; in finish_wqe()
989 if (unlikely(qp->flags_en & MLX5_QP_FLAG_SIGNATURE)) in finish_wqe()
990 ctrl->signature = wq_sig(ctrl); in finish_wqe()
992 qp->sq.wrid[idx] = wr_id; in finish_wqe()
993 qp->sq.w_list[idx].opcode = mlx5_opcode; in finish_wqe()
994 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in finish_wqe()
995 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); in finish_wqe()
996 qp->sq.w_list[idx].next = qp->sq.cur_post; in finish_wqe()
1002 qp->sq.cur_edge = (unlikely(seg == cur_edge)) ? in finish_wqe()
1003 get_sq_edge(&qp->sq, qp->sq.cur_post & in finish_wqe()
1004 (qp->sq.wqe_cnt - 1)) : in finish_wqe()
1010 set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey); in handle_rdma_op()
1019 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; in handle_local_inv()
1020 (*ctrl)->imm = cpu_to_be32(wr->ex.invalidate_rkey); in handle_local_inv()
1028 qp->sq.wr_data[idx] = IB_WR_REG_MR; in handle_reg_mr()
1029 (*ctrl)->imm = cpu_to_be32(reg_wr(wr)->key); in handle_reg_mr()
1049 err = -ENOMEM; in handle_psv()
1057 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq, in handle_psv()
1079 qp->sq.wr_data[*idx] = IB_WR_REG_MR_INTEGRITY; in handle_reg_mr_integrity()
1081 mr = to_mmr(reg_wr(wr)->mr); in handle_reg_mr_integrity()
1082 pi_mr = mr->pi_mr; in handle_reg_mr_integrity()
1088 reg_pi_wr.mr = &pi_mr->ibmr; in handle_reg_mr_integrity()
1089 reg_pi_wr.access = reg_wr(wr)->access; in handle_reg_mr_integrity()
1090 reg_pi_wr.key = pi_mr->ibmr.rkey; in handle_reg_mr_integrity()
1092 (*ctrl)->imm = cpu_to_be32(reg_pi_wr.key); in handle_reg_mr_integrity()
1098 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, in handle_reg_mr_integrity()
1104 err = -ENOMEM; in handle_reg_mr_integrity()
1110 pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey; in handle_reg_mr_integrity()
1111 pa_pi_mr.ndescs = mr->ndescs; in handle_reg_mr_integrity()
1112 pa_pi_mr.data_length = mr->data_length; in handle_reg_mr_integrity()
1113 pa_pi_mr.data_iova = mr->data_iova; in handle_reg_mr_integrity()
1114 if (mr->meta_ndescs) { in handle_reg_mr_integrity()
1115 pa_pi_mr.meta_ndescs = mr->meta_ndescs; in handle_reg_mr_integrity()
1116 pa_pi_mr.meta_length = mr->meta_length; in handle_reg_mr_integrity()
1117 pa_pi_mr.pi_iova = mr->pi_iova; in handle_reg_mr_integrity()
1120 pa_pi_mr.ibmr.length = mr->ibmr.length; in handle_reg_mr_integrity()
1121 mr->pi_mr = &pa_pi_mr; in handle_reg_mr_integrity()
1123 (*ctrl)->imm = cpu_to_be32(mr->ibmr.rkey); in handle_reg_mr_integrity()
1130 finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id, nreq, in handle_reg_mr_integrity()
1133 sig_attrs = mr->ibmr.sig_attrs; in handle_reg_mr_integrity()
1135 &sig_attrs->mem, mr->sig->psv_memory.psv_idx, in handle_reg_mr_integrity()
1141 &sig_attrs->wire, mr->sig->psv_wire.psv_idx, in handle_reg_mr_integrity()
1146 qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; in handle_reg_mr_integrity()
1160 switch (wr->opcode) { in handle_qpt_rc()
1171 err = -EOPNOTSUPP; in handle_qpt_rc()
1205 switch (wr->opcode) { in handle_qpt_uc()
1222 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_hw_gsi()
1231 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_ud()
1234 if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) { in handle_qpt_ud()
1242 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_ud()
1253 if (unlikely(wr->opcode != MLX5_IB_WR_UMR)) { in handle_qpt_reg_umr()
1254 err = -EINVAL; in handle_qpt_reg_umr()
1255 mlx5_ib_warn(dev, "bad opcode %d\n", wr->opcode); in handle_qpt_reg_umr()
1259 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; in handle_qpt_reg_umr()
1260 (*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey); in handle_qpt_reg_umr()
1266 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_reg_umr()
1270 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); in handle_qpt_reg_umr()
1279 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); in mlx5_ib_post_send()
1280 struct mlx5_core_dev *mdev = dev->mdev; in mlx5_ib_post_send()
1296 if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && in mlx5_ib_post_send()
1299 return -EIO; in mlx5_ib_post_send()
1302 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) in mlx5_ib_post_send()
1306 bf = &qp->bf; in mlx5_ib_post_send()
1308 spin_lock_irqsave(&qp->sq.lock, flags); in mlx5_ib_post_send()
1310 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_send()
1311 if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) { in mlx5_ib_post_send()
1313 err = -EINVAL; in mlx5_ib_post_send()
1318 num_sge = wr->num_sge; in mlx5_ib_post_send()
1319 if (unlikely(num_sge > qp->sq.max_gs)) { in mlx5_ib_post_send()
1321 err = -EINVAL; in mlx5_ib_post_send()
1330 err = -ENOMEM; in mlx5_ib_post_send()
1335 if (wr->opcode == IB_WR_REG_MR || in mlx5_ib_post_send()
1336 wr->opcode == IB_WR_REG_MR_INTEGRITY) { in mlx5_ib_post_send()
1337 fence = dev->umr_fence; in mlx5_ib_post_send()
1340 if (wr->send_flags & IB_SEND_FENCE) { in mlx5_ib_post_send()
1341 if (qp->next_fence) in mlx5_ib_post_send()
1346 fence = qp->next_fence; in mlx5_ib_post_send()
1350 switch (ibqp->qp_type) { in mlx5_ib_post_send()
1363 } else if (wr->opcode == IB_WR_REG_MR_INTEGRITY) { in mlx5_ib_post_send()
1372 if (unlikely(!mdev->port_caps[qp->port - 1].has_smi)) { in mlx5_ib_post_send()
1374 err = -EPERM; in mlx5_ib_post_send()
1396 if (wr->send_flags & IB_SEND_INLINE && num_sge) { in mlx5_ib_post_send()
1405 handle_post_send_edge(&qp->sq, &seg, size, in mlx5_ib_post_send()
1407 if (unlikely(!wr->sg_list[i].length)) in mlx5_ib_post_send()
1412 wr->sg_list + i); in mlx5_ib_post_send()
1418 qp->next_fence = next_fence; in mlx5_ib_post_send()
1419 finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq, in mlx5_ib_post_send()
1420 fence, mlx5_ib_opcode[wr->opcode]); in mlx5_ib_post_send()
1428 qp->sq.head += nreq; in mlx5_ib_post_send()
1435 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); in mlx5_ib_post_send()
1442 mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset); in mlx5_ib_post_send()
1446 bf->offset ^= bf->buf_size; in mlx5_ib_post_send()
1449 spin_unlock_irqrestore(&qp->sq.lock, flags); in mlx5_ib_post_send()
1456 sig->signature = calc_sig(sig, (max_gs + 1) << 2); in set_sig_seg()
1465 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); in mlx5_ib_post_recv()
1466 struct mlx5_core_dev *mdev = dev->mdev; in mlx5_ib_post_recv()
1473 if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && in mlx5_ib_post_recv()
1476 return -EIO; in mlx5_ib_post_recv()
1479 if (unlikely(ibqp->qp_type == IB_QPT_GSI)) in mlx5_ib_post_recv()
1482 spin_lock_irqsave(&qp->rq.lock, flags); in mlx5_ib_post_recv()
1484 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv()
1486 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_recv()
1487 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mlx5_ib_post_recv()
1488 err = -ENOMEM; in mlx5_ib_post_recv()
1493 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx5_ib_post_recv()
1494 err = -EINVAL; in mlx5_ib_post_recv()
1499 scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind); in mlx5_ib_post_recv()
1500 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) in mlx5_ib_post_recv()
1503 for (i = 0; i < wr->num_sge; i++) in mlx5_ib_post_recv()
1504 set_data_ptr_seg(scat + i, wr->sg_list + i); in mlx5_ib_post_recv()
1506 if (i < qp->rq.max_gs) { in mlx5_ib_post_recv()
1512 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) { in mlx5_ib_post_recv()
1514 set_sig_seg(sig, qp->rq.max_gs); in mlx5_ib_post_recv()
1517 qp->rq.wrid[ind] = wr->wr_id; in mlx5_ib_post_recv()
1519 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx5_ib_post_recv()
1524 qp->rq.head += nreq; in mlx5_ib_post_recv()
1531 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); in mlx5_ib_post_recv()
1534 spin_unlock_irqrestore(&qp->rq.lock, flags); in mlx5_ib_post_recv()