1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef IB_UMEM_ODP_H
7*4882a593Smuzhiyun #define IB_UMEM_ODP_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <rdma/ib_umem.h>
10*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun struct ib_umem_odp {
13*4882a593Smuzhiyun struct ib_umem umem;
14*4882a593Smuzhiyun struct mmu_interval_notifier notifier;
15*4882a593Smuzhiyun struct pid *tgid;
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /* An array of the pfns included in the on-demand paging umem. */
18*4882a593Smuzhiyun unsigned long *pfn_list;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * An array with DMA addresses mapped for pfns in pfn_list.
22*4882a593Smuzhiyun * The lower two bits designate access permissions.
23*4882a593Smuzhiyun * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT.
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun dma_addr_t *dma_list;
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun * The umem_mutex protects the page_list and dma_list fields of an ODP
28*4882a593Smuzhiyun * umem, allowing only a single thread to map/unmap pages. The mutex
29*4882a593Smuzhiyun * also protects access to the mmu notifier counters.
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun struct mutex umem_mutex;
32*4882a593Smuzhiyun void *private; /* for the HW driver to use. */
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun int npages;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * An implicit odp umem cannot be DMA mapped, has 0 length, and serves
38*4882a593Smuzhiyun * only as an anchor for the driver to hold onto the per_mm. FIXME:
39*4882a593Smuzhiyun * This should be removed and drivers should work with the per_mm
40*4882a593Smuzhiyun * directly.
41*4882a593Smuzhiyun */
42*4882a593Smuzhiyun bool is_implicit_odp;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun unsigned int page_shift;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun
to_ib_umem_odp(struct ib_umem * umem)47*4882a593Smuzhiyun static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun return container_of(umem, struct ib_umem_odp, umem);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* Returns the first page of an ODP umem. */
ib_umem_start(struct ib_umem_odp * umem_odp)53*4882a593Smuzhiyun static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun return umem_odp->notifier.interval_tree.start;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* Returns the address of the page after the last one of an ODP umem. */
ib_umem_end(struct ib_umem_odp * umem_odp)59*4882a593Smuzhiyun static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun return umem_odp->notifier.interval_tree.last + 1;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
ib_umem_odp_num_pages(struct ib_umem_odp * umem_odp)64*4882a593Smuzhiyun static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >>
67*4882a593Smuzhiyun umem_odp->page_shift;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun * The lower 2 bits of the DMA address signal the R/W permissions for
72*4882a593Smuzhiyun * the entry. To upgrade the permissions, provide the appropriate
73*4882a593Smuzhiyun * bitmask to the map_dma_pages function.
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * Be aware that upgrading a mapped address might result in change of
76*4882a593Smuzhiyun * the DMA address for the page.
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun #define ODP_READ_ALLOWED_BIT (1<<0ULL)
79*4882a593Smuzhiyun #define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun struct ib_umem_odp *
86*4882a593Smuzhiyun ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
87*4882a593Smuzhiyun int access, const struct mmu_interval_notifier_ops *ops);
88*4882a593Smuzhiyun struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
89*4882a593Smuzhiyun int access);
90*4882a593Smuzhiyun struct ib_umem_odp *
91*4882a593Smuzhiyun ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
92*4882a593Smuzhiyun size_t size,
93*4882a593Smuzhiyun const struct mmu_interval_notifier_ops *ops);
94*4882a593Smuzhiyun void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset,
97*4882a593Smuzhiyun u64 bcnt, u64 access_mask, bool fault);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
100*4882a593Smuzhiyun u64 bound);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun static inline struct ib_umem_odp *
ib_umem_odp_get(struct ib_device * device,unsigned long addr,size_t size,int access,const struct mmu_interval_notifier_ops * ops)105*4882a593Smuzhiyun ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
106*4882a593Smuzhiyun int access, const struct mmu_interval_notifier_ops *ops)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
ib_umem_odp_release(struct ib_umem_odp * umem_odp)111*4882a593Smuzhiyun static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #endif /* IB_UMEM_ODP_H */
116