1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2007 Cisco Systems. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef IB_UMEM_H
7*4882a593Smuzhiyun #define IB_UMEM_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/scatterlist.h>
11*4882a593Smuzhiyun #include <linux/workqueue.h>
12*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun struct ib_ucontext;
15*4882a593Smuzhiyun struct ib_umem_odp;
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun struct ib_umem {
18*4882a593Smuzhiyun struct ib_device *ibdev;
19*4882a593Smuzhiyun struct mm_struct *owning_mm;
20*4882a593Smuzhiyun u64 iova;
21*4882a593Smuzhiyun size_t length;
22*4882a593Smuzhiyun unsigned long address;
23*4882a593Smuzhiyun u32 writable : 1;
24*4882a593Smuzhiyun u32 is_odp : 1;
25*4882a593Smuzhiyun struct work_struct work;
26*4882a593Smuzhiyun struct sg_table sg_head;
27*4882a593Smuzhiyun int nmap;
28*4882a593Smuzhiyun unsigned int sg_nents;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* Returns the offset of the umem start relative to the first page. */
ib_umem_offset(struct ib_umem * umem)32*4882a593Smuzhiyun static inline int ib_umem_offset(struct ib_umem *umem)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun return umem->address & ~PAGE_MASK;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
ib_umem_num_dma_blocks(struct ib_umem * umem,unsigned long pgsz)37*4882a593Smuzhiyun static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
38*4882a593Smuzhiyun unsigned long pgsz)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
41*4882a593Smuzhiyun ALIGN_DOWN(umem->iova, pgsz))) /
42*4882a593Smuzhiyun pgsz;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
ib_umem_num_pages(struct ib_umem * umem)45*4882a593Smuzhiyun static inline size_t ib_umem_num_pages(struct ib_umem *umem)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
__rdma_umem_block_iter_start(struct ib_block_iter * biter,struct ib_umem * umem,unsigned long pgsz)50*4882a593Smuzhiyun static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
51*4882a593Smuzhiyun struct ib_umem *umem,
52*4882a593Smuzhiyun unsigned long pgsz)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /**
58*4882a593Smuzhiyun * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
59*4882a593Smuzhiyun * @umem: umem to iterate over
60*4882a593Smuzhiyun * @pgsz: Page size to split the list into
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
63*4882a593Smuzhiyun * returned DMA blocks will be aligned to pgsz and span the range:
64*4882a593Smuzhiyun * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
65*4882a593Smuzhiyun *
66*4882a593Smuzhiyun * Performs exactly ib_umem_num_dma_blocks() iterations.
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \
69*4882a593Smuzhiyun for (__rdma_umem_block_iter_start(biter, umem, pgsz); \
70*4882a593Smuzhiyun __rdma_block_iter_next(biter);)
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #ifdef CONFIG_INFINIBAND_USER_MEM
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
75*4882a593Smuzhiyun size_t size, int access);
76*4882a593Smuzhiyun void ib_umem_release(struct ib_umem *umem);
77*4882a593Smuzhiyun int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
78*4882a593Smuzhiyun size_t length);
79*4882a593Smuzhiyun unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
80*4882a593Smuzhiyun unsigned long pgsz_bitmap,
81*4882a593Smuzhiyun unsigned long virt);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #else /* CONFIG_INFINIBAND_USER_MEM */
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #include <linux/err.h>
86*4882a593Smuzhiyun
ib_umem_get(struct ib_device * device,unsigned long addr,size_t size,int access)87*4882a593Smuzhiyun static inline struct ib_umem *ib_umem_get(struct ib_device *device,
88*4882a593Smuzhiyun unsigned long addr, size_t size,
89*4882a593Smuzhiyun int access)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
92*4882a593Smuzhiyun }
ib_umem_release(struct ib_umem * umem)93*4882a593Smuzhiyun static inline void ib_umem_release(struct ib_umem *umem) { }
ib_umem_copy_from(void * dst,struct ib_umem * umem,size_t offset,size_t length)94*4882a593Smuzhiyun static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
95*4882a593Smuzhiyun size_t length) {
96*4882a593Smuzhiyun return -EINVAL;
97*4882a593Smuzhiyun }
ib_umem_find_best_pgsz(struct ib_umem * umem,unsigned long pgsz_bitmap,unsigned long virt)98*4882a593Smuzhiyun static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
99*4882a593Smuzhiyun unsigned long pgsz_bitmap,
100*4882a593Smuzhiyun unsigned long virt)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun #endif /* CONFIG_INFINIBAND_USER_MEM */
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun #endif /* IB_UMEM_H */
108