Searched refs:umrwr (Results 1 – 2 of 2) sorted by relevance
323 const struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_umr_segment() local327 if (!umrwr->ignore_free_state) { in set_reg_umr_segment()336 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); in set_reg_umr_segment()338 u64 offset = get_xlt_octo(umrwr->offset); in set_reg_umr_segment()405 const struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_mkey_segment() local412 !!(umrwr->access_flags & IB_ACCESS_REMOTE_ATOMIC)); in set_reg_mkey_segment()414 !!(umrwr->access_flags & IB_ACCESS_REMOTE_WRITE)); in set_reg_mkey_segment()415 MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ)); in set_reg_mkey_segment()416 MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE)); in set_reg_mkey_segment()420 !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING)); in set_reg_mkey_segment()[all …]
943 struct mlx5_umr_wr *umrwr) in mlx5_ib_post_send_wait() argument951 umrwr->wr.wr_cqe = &umr_context.cqe; in mlx5_ib_post_send_wait()954 err = ib_post_send(umrc->qp, &umrwr->wr, &bad); in mlx5_ib_post_send_wait()1479 struct mlx5_umr_wr umrwr = {}; in mlx5_mr_cache_invalidate() local1484 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | in mlx5_mr_cache_invalidate()1486 umrwr.wr.opcode = MLX5_IB_WR_UMR; in mlx5_mr_cache_invalidate()1487 umrwr.pd = mr->dev->umrc.pd; in mlx5_mr_cache_invalidate()1488 umrwr.mkey = mr->mmkey.key; in mlx5_mr_cache_invalidate()1489 umrwr.ignore_free_state = 1; in mlx5_mr_cache_invalidate()1491 return mlx5_ib_post_send_wait(mr->dev, &umrwr); in mlx5_mr_cache_invalidate()[all …]