Lines Matching refs:mr

161 			 struct mlx5_ib_mr *mr, int flags)  in populate_mtt()  argument
163 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in populate_mtt()
177 struct mlx5_ib_mr *mr, int flags) in mlx5_odp_populate_xlt() argument
180 populate_klm(xlt, idx, nentries, mr, flags); in mlx5_odp_populate_xlt()
182 populate_mtt(xlt, idx, nentries, mr, flags); in mlx5_odp_populate_xlt()
186 static void dma_fence_odp_mr(struct mlx5_ib_mr *mr) in dma_fence_odp_mr() argument
188 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in dma_fence_odp_mr()
193 mlx5_mr_cache_invalidate(mr); in dma_fence_odp_mr()
201 if (!mr->cache_ent) { in dma_fence_odp_mr()
202 mlx5_core_destroy_mkey(mr->dev->mdev, &mr->mmkey); in dma_fence_odp_mr()
203 WARN_ON(mr->descs); in dma_fence_odp_mr()
213 static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt) in free_implicit_child_mr() argument
215 struct mlx5_ib_mr *imr = mr->parent; in free_implicit_child_mr()
217 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in free_implicit_child_mr()
222 WARN_ON(atomic_read(&mr->num_deferred_work)); in free_implicit_child_mr()
225 srcu_key = srcu_read_lock(&mr->dev->odp_srcu); in free_implicit_child_mr()
227 mlx5_ib_update_xlt(mr->parent, idx, 1, 0, in free_implicit_child_mr()
231 srcu_read_unlock(&mr->dev->odp_srcu, srcu_key); in free_implicit_child_mr()
234 dma_fence_odp_mr(mr); in free_implicit_child_mr()
236 mr->parent = NULL; in free_implicit_child_mr()
237 mlx5_mr_cache_free(mr->dev, mr); in free_implicit_child_mr()
245 struct mlx5_ib_mr *mr = in free_implicit_child_mr_work() local
248 free_implicit_child_mr(mr, true); in free_implicit_child_mr_work()
253 struct mlx5_ib_mr *mr = in free_implicit_child_mr_rcu() local
257 INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work); in free_implicit_child_mr_rcu()
258 queue_work(system_unbound_wq, &mr->odp_destroy.work); in free_implicit_child_mr_rcu()
261 static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr) in destroy_unused_implicit_child_mr() argument
263 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in destroy_unused_implicit_child_mr()
265 struct mlx5_ib_mr *imr = mr->parent; in destroy_unused_implicit_child_mr()
272 if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_ATOMIC) != in destroy_unused_implicit_child_mr()
273 mr) in destroy_unused_implicit_child_mr()
277 call_srcu(&mr->dev->odp_srcu, &mr->odp_destroy.rcu, in destroy_unused_implicit_child_mr()
290 struct mlx5_ib_mr *mr; in mlx5_ib_invalidate_range() local
311 mr = umem_odp->private; in mlx5_ib_invalidate_range()
343 mlx5_ib_update_xlt(mr, blk_start_idx, in mlx5_ib_invalidate_range()
352 mlx5_ib_update_xlt(mr, blk_start_idx, in mlx5_ib_invalidate_range()
357 mlx5_update_odp_stats(mr, invalidations, invalidations); in mlx5_ib_invalidate_range()
367 if (unlikely(!umem_odp->npages && mr->parent)) in mlx5_ib_invalidate_range()
368 destroy_unused_implicit_child_mr(mr); in mlx5_ib_invalidate_range()
469 struct mlx5_ib_mr *mr; in implicit_get_child_mr() local
479 ret = mr = mlx5_mr_cache_alloc(imr->dev, MLX5_IMR_MTT_CACHE_ENTRY, in implicit_get_child_mr()
481 if (IS_ERR(mr)) in implicit_get_child_mr()
484 mr->ibmr.pd = imr->ibmr.pd; in implicit_get_child_mr()
485 mr->umem = &odp->umem; in implicit_get_child_mr()
486 mr->ibmr.lkey = mr->mmkey.key; in implicit_get_child_mr()
487 mr->ibmr.rkey = mr->mmkey.key; in implicit_get_child_mr()
488 mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE; in implicit_get_child_mr()
489 mr->parent = imr; in implicit_get_child_mr()
490 odp->private = mr; in implicit_get_child_mr()
492 err = mlx5_ib_update_xlt(mr, 0, in implicit_get_child_mr()
506 ret = xa_cmpxchg(&imr->implicit_children, idx, NULL, mr, in implicit_get_child_mr()
520 mlx5_ib_dbg(imr->dev, "key %x mr %p\n", mr->mmkey.key, mr); in implicit_get_child_mr()
521 return mr; in implicit_get_child_mr()
524 mlx5_mr_cache_free(imr->dev, mr); in implicit_get_child_mr()
654 void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr) in mlx5_ib_fence_odp_mr() argument
657 xa_erase(&mr->dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)); in mlx5_ib_fence_odp_mr()
660 synchronize_srcu(&mr->dev->odp_srcu); in mlx5_ib_fence_odp_mr()
662 wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work)); in mlx5_ib_fence_odp_mr()
664 dma_fence_odp_mr(mr); in mlx5_ib_fence_odp_mr()
670 static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, in pagefault_real_mr() argument
699 ret = mlx5_ib_update_xlt(mr, start_idx, np, page_shift, xlt_flags); in pagefault_real_mr()
704 mlx5_ib_err(mr->dev, in pagefault_real_mr()
809 static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, in pagefault_mr() argument
812 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); in pagefault_mr()
814 lockdep_assert_held(&mr->dev->odp_srcu); in pagefault_mr()
815 if (unlikely(io_virt < mr->mmkey.iova)) in pagefault_mr()
821 if (check_add_overflow(io_virt - mr->mmkey.iova, in pagefault_mr()
827 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped, in pagefault_mr()
830 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped, in pagefault_mr()
834 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable) in mlx5_ib_init_odp_mr() argument
842 ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), in mlx5_ib_init_odp_mr()
843 mr->umem->address, mr->umem->length, NULL, in mlx5_ib_init_odp_mr()
900 struct mlx5_ib_mr *mr; in pagefault_single_data_segment() local
936 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); in pagefault_single_data_segment()
938 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0); in pagefault_single_data_segment()
942 mlx5_update_odp_stats(mr, faults, ret); in pagefault_single_data_segment()
1729 struct mlx5_ib_mr *mr; member
1739 if (atomic_dec_and_test(&work->frags[i].mr->num_deferred_work)) in destroy_prefetch_work()
1740 wake_up(&work->frags[i].mr->q_deferred_work); in destroy_prefetch_work()
1751 struct mlx5_ib_mr *mr; in get_prefetchable_mr() local
1759 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); in get_prefetchable_mr()
1761 if (mr->ibmr.pd != pd) in get_prefetchable_mr()
1764 odp = to_ib_umem_odp(mr->umem); in get_prefetchable_mr()
1771 return mr; in get_prefetchable_mr()
1786 dev = work->frags[0].mr->dev; in mlx5_ib_prefetch_mr_work()
1790 ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt, in mlx5_ib_prefetch_mr_work()
1795 mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret); in mlx5_ib_prefetch_mr_work()
1815 work->frags[i].mr = in init_prefetch_work()
1817 if (!work->frags[i].mr) { in init_prefetch_work()
1823 atomic_inc(&work->frags[i].mr->num_deferred_work); in init_prefetch_work()
1842 struct mlx5_ib_mr *mr; in mlx5_ib_prefetch_sg_list() local
1844 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); in mlx5_ib_prefetch_sg_list()
1845 if (!mr) { in mlx5_ib_prefetch_sg_list()
1849 ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length, in mlx5_ib_prefetch_sg_list()
1853 mlx5_update_odp_stats(mr, prefetch, ret); in mlx5_ib_prefetch_sg_list()