xref: /OK3568_Linux_fs/kernel/include/rdma/rdmavt_mr.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(c) 2016 Intel Corporation.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef DEF_RDMAVT_INCMR_H
7*4882a593Smuzhiyun #define DEF_RDMAVT_INCMR_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  * For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once
11*4882a593Smuzhiyun  * drivers no longer need access to the MR directly.
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun #include <linux/percpu-refcount.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun  * A segment is a linear region of low physical memory.
17*4882a593Smuzhiyun  * Used by the verbs layer.
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun struct rvt_seg {
20*4882a593Smuzhiyun 	void *vaddr;
21*4882a593Smuzhiyun 	size_t length;
22*4882a593Smuzhiyun };
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* The number of rvt_segs that fit in a page. */
25*4882a593Smuzhiyun #define RVT_SEGSZ     (PAGE_SIZE / sizeof(struct rvt_seg))
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun struct rvt_segarray {
28*4882a593Smuzhiyun 	struct rvt_seg segs[RVT_SEGSZ];
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun struct rvt_mregion {
32*4882a593Smuzhiyun 	struct ib_pd *pd;       /* shares refcnt of ibmr.pd */
33*4882a593Smuzhiyun 	u64 user_base;          /* User's address for this region */
34*4882a593Smuzhiyun 	u64 iova;               /* IB start address of this region */
35*4882a593Smuzhiyun 	size_t length;
36*4882a593Smuzhiyun 	u32 lkey;
37*4882a593Smuzhiyun 	u32 offset;             /* offset (bytes) to start of region */
38*4882a593Smuzhiyun 	int access_flags;
39*4882a593Smuzhiyun 	u32 max_segs;           /* number of rvt_segs in all the arrays */
40*4882a593Smuzhiyun 	u32 mapsz;              /* size of the map array */
41*4882a593Smuzhiyun 	atomic_t lkey_invalid;	/* true if current lkey is invalid */
42*4882a593Smuzhiyun 	u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
43*4882a593Smuzhiyun 	u8  lkey_published;     /* in global table */
44*4882a593Smuzhiyun 	struct percpu_ref refcount;
45*4882a593Smuzhiyun 	struct completion comp; /* complete when refcount goes to zero */
46*4882a593Smuzhiyun 	struct rvt_segarray *map[];    /* the segments */
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define RVT_MAX_LKEY_TABLE_BITS 23
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun struct rvt_lkey_table {
52*4882a593Smuzhiyun 	/* read mostly fields */
53*4882a593Smuzhiyun 	u32 max;                /* size of the table */
54*4882a593Smuzhiyun 	u32 shift;              /* lkey/rkey shift */
55*4882a593Smuzhiyun 	struct rvt_mregion __rcu **table;
56*4882a593Smuzhiyun 	/* writeable fields */
57*4882a593Smuzhiyun 	/* protect changes in this struct */
58*4882a593Smuzhiyun 	spinlock_t lock ____cacheline_aligned_in_smp;
59*4882a593Smuzhiyun 	u32 next;               /* next unused index (speeds search) */
60*4882a593Smuzhiyun 	u32 gen;                /* generation count */
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun  * These keep track of the copy progress within a memory region.
65*4882a593Smuzhiyun  * Used by the verbs layer.
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun struct rvt_sge {
68*4882a593Smuzhiyun 	struct rvt_mregion *mr;
69*4882a593Smuzhiyun 	void *vaddr;            /* kernel virtual address of segment */
70*4882a593Smuzhiyun 	u32 sge_length;         /* length of the SGE */
71*4882a593Smuzhiyun 	u32 length;             /* remaining length of the segment */
72*4882a593Smuzhiyun 	u16 m;                  /* current index: mr->map[m] */
73*4882a593Smuzhiyun 	u16 n;                  /* current index: mr->map[m]->segs[n] */
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun struct rvt_sge_state {
77*4882a593Smuzhiyun 	struct rvt_sge *sg_list;      /* next SGE to be used if any */
78*4882a593Smuzhiyun 	struct rvt_sge sge;   /* progress state for the current SGE */
79*4882a593Smuzhiyun 	u32 total_len;
80*4882a593Smuzhiyun 	u8 num_sge;
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun 
rvt_put_mr(struct rvt_mregion * mr)83*4882a593Smuzhiyun static inline void rvt_put_mr(struct rvt_mregion *mr)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	percpu_ref_put(&mr->refcount);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
rvt_get_mr(struct rvt_mregion * mr)88*4882a593Smuzhiyun static inline void rvt_get_mr(struct rvt_mregion *mr)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	percpu_ref_get(&mr->refcount);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
rvt_put_ss(struct rvt_sge_state * ss)93*4882a593Smuzhiyun static inline void rvt_put_ss(struct rvt_sge_state *ss)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	while (ss->num_sge) {
96*4882a593Smuzhiyun 		rvt_put_mr(ss->sge.mr);
97*4882a593Smuzhiyun 		if (--ss->num_sge)
98*4882a593Smuzhiyun 			ss->sge = *ss->sg_list++;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
rvt_get_sge_length(struct rvt_sge * sge,u32 length)102*4882a593Smuzhiyun static inline u32 rvt_get_sge_length(struct rvt_sge *sge, u32 length)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	u32 len = sge->length;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (len > length)
107*4882a593Smuzhiyun 		len = length;
108*4882a593Smuzhiyun 	if (len > sge->sge_length)
109*4882a593Smuzhiyun 		len = sge->sge_length;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	return len;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
rvt_update_sge(struct rvt_sge_state * ss,u32 length,bool release)114*4882a593Smuzhiyun static inline void rvt_update_sge(struct rvt_sge_state *ss, u32 length,
115*4882a593Smuzhiyun 				  bool release)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct rvt_sge *sge = &ss->sge;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	sge->vaddr += length;
120*4882a593Smuzhiyun 	sge->length -= length;
121*4882a593Smuzhiyun 	sge->sge_length -= length;
122*4882a593Smuzhiyun 	if (sge->sge_length == 0) {
123*4882a593Smuzhiyun 		if (release)
124*4882a593Smuzhiyun 			rvt_put_mr(sge->mr);
125*4882a593Smuzhiyun 		if (--ss->num_sge)
126*4882a593Smuzhiyun 			*sge = *ss->sg_list++;
127*4882a593Smuzhiyun 	} else if (sge->length == 0 && sge->mr->lkey) {
128*4882a593Smuzhiyun 		if (++sge->n >= RVT_SEGSZ) {
129*4882a593Smuzhiyun 			if (++sge->m >= sge->mr->mapsz)
130*4882a593Smuzhiyun 				return;
131*4882a593Smuzhiyun 			sge->n = 0;
132*4882a593Smuzhiyun 		}
133*4882a593Smuzhiyun 		sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
134*4882a593Smuzhiyun 		sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
rvt_skip_sge(struct rvt_sge_state * ss,u32 length,bool release)138*4882a593Smuzhiyun static inline void rvt_skip_sge(struct rvt_sge_state *ss, u32 length,
139*4882a593Smuzhiyun 				bool release)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct rvt_sge *sge = &ss->sge;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	while (length) {
144*4882a593Smuzhiyun 		u32 len = rvt_get_sge_length(sge, length);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 		WARN_ON_ONCE(len == 0);
147*4882a593Smuzhiyun 		rvt_update_sge(ss, len, release);
148*4882a593Smuzhiyun 		length -= len;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey);
153*4882a593Smuzhiyun bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #endif          /* DEF_RDMAVT_INCMRH */
156