Lines Matching refs:umem_odp
288 struct ib_umem_odp *umem_odp = in mlx5_ib_invalidate_range() local
303 mutex_lock(&umem_odp->umem_mutex); in mlx5_ib_invalidate_range()
309 if (!umem_odp->npages) in mlx5_ib_invalidate_range()
311 mr = umem_odp->private; in mlx5_ib_invalidate_range()
313 start = max_t(u64, ib_umem_start(umem_odp), range->start); in mlx5_ib_invalidate_range()
314 end = min_t(u64, ib_umem_end(umem_odp), range->end); in mlx5_ib_invalidate_range()
322 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { in mlx5_ib_invalidate_range()
323 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in mlx5_ib_invalidate_range()
330 if (umem_odp->dma_list[idx] & in mlx5_ib_invalidate_range()
365 ib_umem_odp_unmap_dma_pages(umem_odp, start, end); in mlx5_ib_invalidate_range()
367 if (unlikely(!umem_odp->npages && mr->parent)) in mlx5_ib_invalidate_range()
370 mutex_unlock(&umem_odp->umem_mutex); in mlx5_ib_invalidate_range()
535 struct ib_umem_odp *umem_odp; in mlx5_ib_alloc_implicit_mr() local
539 umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags); in mlx5_ib_alloc_implicit_mr()
540 if (IS_ERR(umem_odp)) in mlx5_ib_alloc_implicit_mr()
541 return ERR_CAST(umem_odp); in mlx5_ib_alloc_implicit_mr()
551 imr->umem = &umem_odp->umem; in mlx5_ib_alloc_implicit_mr()
554 imr->umem = &umem_odp->umem; in mlx5_ib_alloc_implicit_mr()
580 ib_umem_odp_release(umem_odp); in mlx5_ib_alloc_implicit_mr()
740 struct ib_umem_odp *umem_odp; in pagefault_implicit_mr() local
755 umem_odp = to_ib_umem_odp(mtt->umem); in pagefault_implicit_mr()
756 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) - in pagefault_implicit_mr()
759 ret = pagefault_real_mr(mtt, umem_odp, user_va, len, in pagefault_implicit_mr()