1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4*4882a593Smuzhiyun /* Copyright (c) 2008-2019, IBM Corporation */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef _SIW_MEM_H
7*4882a593Smuzhiyun #define _SIW_MEM_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
10*4882a593Smuzhiyun void siw_umem_release(struct siw_umem *umem, bool dirty);
11*4882a593Smuzhiyun struct siw_pbl *siw_pbl_alloc(u32 num_buf);
12*4882a593Smuzhiyun dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
13*4882a593Smuzhiyun struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
14*4882a593Smuzhiyun int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
15*4882a593Smuzhiyun int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
16*4882a593Smuzhiyun int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
17*4882a593Smuzhiyun enum ib_access_flags perms, int len);
18*4882a593Smuzhiyun int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge,
19*4882a593Smuzhiyun struct siw_mem *mem[], enum ib_access_flags perms,
20*4882a593Smuzhiyun u32 off, int len);
21*4882a593Smuzhiyun void siw_wqe_put_mem(struct siw_wqe *wqe, enum siw_opcode op);
22*4882a593Smuzhiyun int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
23*4882a593Smuzhiyun u64 start, u64 len, int rights);
24*4882a593Smuzhiyun void siw_mr_drop_mem(struct siw_mr *mr);
25*4882a593Smuzhiyun void siw_free_mem(struct kref *ref);
26*4882a593Smuzhiyun
siw_mem_put(struct siw_mem * mem)27*4882a593Smuzhiyun static inline void siw_mem_put(struct siw_mem *mem)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun kref_put(&mem->ref, siw_free_mem);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
siw_mem2mr(struct siw_mem * m)32*4882a593Smuzhiyun static inline struct siw_mr *siw_mem2mr(struct siw_mem *m)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun return container_of(m, struct siw_mr, mem);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
siw_unref_mem_sgl(struct siw_mem ** mem,unsigned int num_sge)37*4882a593Smuzhiyun static inline void siw_unref_mem_sgl(struct siw_mem **mem, unsigned int num_sge)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun while (num_sge) {
40*4882a593Smuzhiyun if (*mem == NULL)
41*4882a593Smuzhiyun break;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun siw_mem_put(*mem);
44*4882a593Smuzhiyun *mem = NULL;
45*4882a593Smuzhiyun mem++;
46*4882a593Smuzhiyun num_sge--;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #define CHUNK_SHIFT 9 /* sets number of pages per chunk */
51*4882a593Smuzhiyun #define PAGES_PER_CHUNK (_AC(1, UL) << CHUNK_SHIFT)
52*4882a593Smuzhiyun #define CHUNK_MASK (~(PAGES_PER_CHUNK - 1))
53*4882a593Smuzhiyun #define PAGE_CHUNK_SIZE (PAGES_PER_CHUNK * sizeof(struct page *))
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * siw_get_upage()
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * Get page pointer for address on given umem.
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * @umem: two dimensional list of page pointers
61*4882a593Smuzhiyun * @addr: user virtual address
62*4882a593Smuzhiyun */
siw_get_upage(struct siw_umem * umem,u64 addr)63*4882a593Smuzhiyun static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT,
66*4882a593Smuzhiyun chunk_idx = page_idx >> CHUNK_SHIFT,
67*4882a593Smuzhiyun page_in_chunk = page_idx & ~CHUNK_MASK;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if (likely(page_idx < umem->num_pages))
70*4882a593Smuzhiyun return umem->page_chunk[chunk_idx].plist[page_in_chunk];
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun return NULL;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun #endif
75