xref: /OK3568_Linux_fs/kernel/drivers/infiniband/sw/rxe/rxe_pool.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "rxe.h"
8*4882a593Smuzhiyun #include "rxe_loc.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /* info about object pools
11*4882a593Smuzhiyun  * note that mr and mw share a single index space
12*4882a593Smuzhiyun  * so that one can map an lkey to the correct type of object
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
15*4882a593Smuzhiyun 	[RXE_TYPE_UC] = {
16*4882a593Smuzhiyun 		.name		= "rxe-uc",
17*4882a593Smuzhiyun 		.size		= sizeof(struct rxe_ucontext),
18*4882a593Smuzhiyun 		.flags          = RXE_POOL_NO_ALLOC,
19*4882a593Smuzhiyun 	},
20*4882a593Smuzhiyun 	[RXE_TYPE_PD] = {
21*4882a593Smuzhiyun 		.name		= "rxe-pd",
22*4882a593Smuzhiyun 		.size		= sizeof(struct rxe_pd),
23*4882a593Smuzhiyun 		.flags		= RXE_POOL_NO_ALLOC,
24*4882a593Smuzhiyun 	},
25*4882a593Smuzhiyun 	[RXE_TYPE_AH] = {
26*4882a593Smuzhiyun 		.name		= "rxe-ah",
27*4882a593Smuzhiyun 		.size		= sizeof(struct rxe_ah),
28*4882a593Smuzhiyun 		.flags		= RXE_POOL_ATOMIC | RXE_POOL_NO_ALLOC,
29*4882a593Smuzhiyun 	},
30*4882a593Smuzhiyun 	[RXE_TYPE_SRQ] = {
31*4882a593Smuzhiyun 		.name		= "rxe-srq",
32*4882a593Smuzhiyun 		.size		= sizeof(struct rxe_srq),
33*4882a593Smuzhiyun 		.flags		= RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
34*4882a593Smuzhiyun 		.min_index	= RXE_MIN_SRQ_INDEX,
35*4882a593Smuzhiyun 		.max_index	= RXE_MAX_SRQ_INDEX,
36*4882a593Smuzhiyun 	},
37*4882a593Smuzhiyun 	[RXE_TYPE_QP] = {
38*4882a593Smuzhiyun 		.name		= "rxe-qp",
39*4882a593Smuzhiyun 		.size		= sizeof(struct rxe_qp),
40*4882a593Smuzhiyun 		.cleanup	= rxe_qp_cleanup,
41*4882a593Smuzhiyun 		.flags		= RXE_POOL_INDEX,
42*4882a593Smuzhiyun 		.min_index	= RXE_MIN_QP_INDEX,
43*4882a593Smuzhiyun 		.max_index	= RXE_MAX_QP_INDEX,
44*4882a593Smuzhiyun 	},
45*4882a593Smuzhiyun 	[RXE_TYPE_CQ] = {
46*4882a593Smuzhiyun 		.name		= "rxe-cq",
47*4882a593Smuzhiyun 		.size		= sizeof(struct rxe_cq),
48*4882a593Smuzhiyun 		.flags          = RXE_POOL_NO_ALLOC,
49*4882a593Smuzhiyun 		.cleanup	= rxe_cq_cleanup,
50*4882a593Smuzhiyun 	},
51*4882a593Smuzhiyun 	[RXE_TYPE_MR] = {
52*4882a593Smuzhiyun 		.name		= "rxe-mr",
53*4882a593Smuzhiyun 		.size		= sizeof(struct rxe_mem),
54*4882a593Smuzhiyun 		.cleanup	= rxe_mem_cleanup,
55*4882a593Smuzhiyun 		.flags		= RXE_POOL_INDEX,
56*4882a593Smuzhiyun 		.max_index	= RXE_MAX_MR_INDEX,
57*4882a593Smuzhiyun 		.min_index	= RXE_MIN_MR_INDEX,
58*4882a593Smuzhiyun 	},
59*4882a593Smuzhiyun 	[RXE_TYPE_MW] = {
60*4882a593Smuzhiyun 		.name		= "rxe-mw",
61*4882a593Smuzhiyun 		.size		= sizeof(struct rxe_mem),
62*4882a593Smuzhiyun 		.flags		= RXE_POOL_INDEX,
63*4882a593Smuzhiyun 		.max_index	= RXE_MAX_MW_INDEX,
64*4882a593Smuzhiyun 		.min_index	= RXE_MIN_MW_INDEX,
65*4882a593Smuzhiyun 	},
66*4882a593Smuzhiyun 	[RXE_TYPE_MC_GRP] = {
67*4882a593Smuzhiyun 		.name		= "rxe-mc_grp",
68*4882a593Smuzhiyun 		.size		= sizeof(struct rxe_mc_grp),
69*4882a593Smuzhiyun 		.cleanup	= rxe_mc_cleanup,
70*4882a593Smuzhiyun 		.flags		= RXE_POOL_KEY,
71*4882a593Smuzhiyun 		.key_offset	= offsetof(struct rxe_mc_grp, mgid),
72*4882a593Smuzhiyun 		.key_size	= sizeof(union ib_gid),
73*4882a593Smuzhiyun 	},
74*4882a593Smuzhiyun 	[RXE_TYPE_MC_ELEM] = {
75*4882a593Smuzhiyun 		.name		= "rxe-mc_elem",
76*4882a593Smuzhiyun 		.size		= sizeof(struct rxe_mc_elem),
77*4882a593Smuzhiyun 		.flags		= RXE_POOL_ATOMIC,
78*4882a593Smuzhiyun 	},
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun 
pool_name(struct rxe_pool * pool)81*4882a593Smuzhiyun static inline const char *pool_name(struct rxe_pool *pool)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	return rxe_type_info[pool->type].name;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
rxe_pool_init_index(struct rxe_pool * pool,u32 max,u32 min)86*4882a593Smuzhiyun static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	int err = 0;
89*4882a593Smuzhiyun 	size_t size;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	if ((max - min + 1) < pool->max_elem) {
92*4882a593Smuzhiyun 		pr_warn("not enough indices for max_elem\n");
93*4882a593Smuzhiyun 		err = -EINVAL;
94*4882a593Smuzhiyun 		goto out;
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	pool->max_index = max;
98*4882a593Smuzhiyun 	pool->min_index = min;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
101*4882a593Smuzhiyun 	pool->table = kmalloc(size, GFP_KERNEL);
102*4882a593Smuzhiyun 	if (!pool->table) {
103*4882a593Smuzhiyun 		err = -ENOMEM;
104*4882a593Smuzhiyun 		goto out;
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	pool->table_size = size;
108*4882a593Smuzhiyun 	bitmap_zero(pool->table, max - min + 1);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun out:
111*4882a593Smuzhiyun 	return err;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
rxe_pool_init(struct rxe_dev * rxe,struct rxe_pool * pool,enum rxe_elem_type type,unsigned int max_elem)114*4882a593Smuzhiyun int rxe_pool_init(
115*4882a593Smuzhiyun 	struct rxe_dev		*rxe,
116*4882a593Smuzhiyun 	struct rxe_pool		*pool,
117*4882a593Smuzhiyun 	enum rxe_elem_type	type,
118*4882a593Smuzhiyun 	unsigned int		max_elem)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	int			err = 0;
121*4882a593Smuzhiyun 	size_t			size = rxe_type_info[type].size;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	memset(pool, 0, sizeof(*pool));
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	pool->rxe		= rxe;
126*4882a593Smuzhiyun 	pool->type		= type;
127*4882a593Smuzhiyun 	pool->max_elem		= max_elem;
128*4882a593Smuzhiyun 	pool->elem_size		= ALIGN(size, RXE_POOL_ALIGN);
129*4882a593Smuzhiyun 	pool->flags		= rxe_type_info[type].flags;
130*4882a593Smuzhiyun 	pool->tree		= RB_ROOT;
131*4882a593Smuzhiyun 	pool->cleanup		= rxe_type_info[type].cleanup;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	atomic_set(&pool->num_elem, 0);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	kref_init(&pool->ref_cnt);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	rwlock_init(&pool->pool_lock);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
140*4882a593Smuzhiyun 		err = rxe_pool_init_index(pool,
141*4882a593Smuzhiyun 					  rxe_type_info[type].max_index,
142*4882a593Smuzhiyun 					  rxe_type_info[type].min_index);
143*4882a593Smuzhiyun 		if (err)
144*4882a593Smuzhiyun 			goto out;
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (rxe_type_info[type].flags & RXE_POOL_KEY) {
148*4882a593Smuzhiyun 		pool->key_offset = rxe_type_info[type].key_offset;
149*4882a593Smuzhiyun 		pool->key_size = rxe_type_info[type].key_size;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	pool->state = RXE_POOL_STATE_VALID;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun out:
155*4882a593Smuzhiyun 	return err;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
rxe_pool_release(struct kref * kref)158*4882a593Smuzhiyun static void rxe_pool_release(struct kref *kref)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	pool->state = RXE_POOL_STATE_INVALID;
163*4882a593Smuzhiyun 	kfree(pool->table);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
rxe_pool_put(struct rxe_pool * pool)166*4882a593Smuzhiyun static void rxe_pool_put(struct rxe_pool *pool)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	kref_put(&pool->ref_cnt, rxe_pool_release);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
rxe_pool_cleanup(struct rxe_pool * pool)171*4882a593Smuzhiyun void rxe_pool_cleanup(struct rxe_pool *pool)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	unsigned long flags;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	write_lock_irqsave(&pool->pool_lock, flags);
176*4882a593Smuzhiyun 	pool->state = RXE_POOL_STATE_INVALID;
177*4882a593Smuzhiyun 	if (atomic_read(&pool->num_elem) > 0)
178*4882a593Smuzhiyun 		pr_warn("%s pool destroyed with unfree'd elem\n",
179*4882a593Smuzhiyun 			pool_name(pool));
180*4882a593Smuzhiyun 	write_unlock_irqrestore(&pool->pool_lock, flags);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	rxe_pool_put(pool);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
alloc_index(struct rxe_pool * pool)185*4882a593Smuzhiyun static u32 alloc_index(struct rxe_pool *pool)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	u32 index;
188*4882a593Smuzhiyun 	u32 range = pool->max_index - pool->min_index + 1;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	index = find_next_zero_bit(pool->table, range, pool->last);
191*4882a593Smuzhiyun 	if (index >= range)
192*4882a593Smuzhiyun 		index = find_first_zero_bit(pool->table, range);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	WARN_ON_ONCE(index >= range);
195*4882a593Smuzhiyun 	set_bit(index, pool->table);
196*4882a593Smuzhiyun 	pool->last = index;
197*4882a593Smuzhiyun 	return index + pool->min_index;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
insert_index(struct rxe_pool * pool,struct rxe_pool_entry * new)200*4882a593Smuzhiyun static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	struct rb_node **link = &pool->tree.rb_node;
203*4882a593Smuzhiyun 	struct rb_node *parent = NULL;
204*4882a593Smuzhiyun 	struct rxe_pool_entry *elem;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	while (*link) {
207*4882a593Smuzhiyun 		parent = *link;
208*4882a593Smuzhiyun 		elem = rb_entry(parent, struct rxe_pool_entry, node);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		if (elem->index == new->index) {
211*4882a593Smuzhiyun 			pr_warn("element already exists!\n");
212*4882a593Smuzhiyun 			goto out;
213*4882a593Smuzhiyun 		}
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 		if (elem->index > new->index)
216*4882a593Smuzhiyun 			link = &(*link)->rb_left;
217*4882a593Smuzhiyun 		else
218*4882a593Smuzhiyun 			link = &(*link)->rb_right;
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	rb_link_node(&new->node, parent, link);
222*4882a593Smuzhiyun 	rb_insert_color(&new->node, &pool->tree);
223*4882a593Smuzhiyun out:
224*4882a593Smuzhiyun 	return;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
insert_key(struct rxe_pool * pool,struct rxe_pool_entry * new)227*4882a593Smuzhiyun static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct rb_node **link = &pool->tree.rb_node;
230*4882a593Smuzhiyun 	struct rb_node *parent = NULL;
231*4882a593Smuzhiyun 	struct rxe_pool_entry *elem;
232*4882a593Smuzhiyun 	int cmp;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	while (*link) {
235*4882a593Smuzhiyun 		parent = *link;
236*4882a593Smuzhiyun 		elem = rb_entry(parent, struct rxe_pool_entry, node);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 		cmp = memcmp((u8 *)elem + pool->key_offset,
239*4882a593Smuzhiyun 			     (u8 *)new + pool->key_offset, pool->key_size);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		if (cmp == 0) {
242*4882a593Smuzhiyun 			pr_warn("key already exists!\n");
243*4882a593Smuzhiyun 			goto out;
244*4882a593Smuzhiyun 		}
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		if (cmp > 0)
247*4882a593Smuzhiyun 			link = &(*link)->rb_left;
248*4882a593Smuzhiyun 		else
249*4882a593Smuzhiyun 			link = &(*link)->rb_right;
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	rb_link_node(&new->node, parent, link);
253*4882a593Smuzhiyun 	rb_insert_color(&new->node, &pool->tree);
254*4882a593Smuzhiyun out:
255*4882a593Smuzhiyun 	return;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
rxe_add_key(void * arg,void * key)258*4882a593Smuzhiyun void rxe_add_key(void *arg, void *key)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	struct rxe_pool_entry *elem = arg;
261*4882a593Smuzhiyun 	struct rxe_pool *pool = elem->pool;
262*4882a593Smuzhiyun 	unsigned long flags;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	write_lock_irqsave(&pool->pool_lock, flags);
265*4882a593Smuzhiyun 	memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
266*4882a593Smuzhiyun 	insert_key(pool, elem);
267*4882a593Smuzhiyun 	write_unlock_irqrestore(&pool->pool_lock, flags);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
rxe_drop_key(void * arg)270*4882a593Smuzhiyun void rxe_drop_key(void *arg)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	struct rxe_pool_entry *elem = arg;
273*4882a593Smuzhiyun 	struct rxe_pool *pool = elem->pool;
274*4882a593Smuzhiyun 	unsigned long flags;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	write_lock_irqsave(&pool->pool_lock, flags);
277*4882a593Smuzhiyun 	rb_erase(&elem->node, &pool->tree);
278*4882a593Smuzhiyun 	write_unlock_irqrestore(&pool->pool_lock, flags);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
rxe_add_index(void * arg)281*4882a593Smuzhiyun void rxe_add_index(void *arg)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	struct rxe_pool_entry *elem = arg;
284*4882a593Smuzhiyun 	struct rxe_pool *pool = elem->pool;
285*4882a593Smuzhiyun 	unsigned long flags;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	write_lock_irqsave(&pool->pool_lock, flags);
288*4882a593Smuzhiyun 	elem->index = alloc_index(pool);
289*4882a593Smuzhiyun 	insert_index(pool, elem);
290*4882a593Smuzhiyun 	write_unlock_irqrestore(&pool->pool_lock, flags);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
rxe_drop_index(void * arg)293*4882a593Smuzhiyun void rxe_drop_index(void *arg)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	struct rxe_pool_entry *elem = arg;
296*4882a593Smuzhiyun 	struct rxe_pool *pool = elem->pool;
297*4882a593Smuzhiyun 	unsigned long flags;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	write_lock_irqsave(&pool->pool_lock, flags);
300*4882a593Smuzhiyun 	clear_bit(elem->index - pool->min_index, pool->table);
301*4882a593Smuzhiyun 	rb_erase(&elem->node, &pool->tree);
302*4882a593Smuzhiyun 	write_unlock_irqrestore(&pool->pool_lock, flags);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
rxe_alloc(struct rxe_pool * pool)305*4882a593Smuzhiyun void *rxe_alloc(struct rxe_pool *pool)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	struct rxe_pool_entry *elem;
308*4882a593Smuzhiyun 	unsigned long flags;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	read_lock_irqsave(&pool->pool_lock, flags);
313*4882a593Smuzhiyun 	if (pool->state != RXE_POOL_STATE_VALID) {
314*4882a593Smuzhiyun 		read_unlock_irqrestore(&pool->pool_lock, flags);
315*4882a593Smuzhiyun 		return NULL;
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 	kref_get(&pool->ref_cnt);
318*4882a593Smuzhiyun 	read_unlock_irqrestore(&pool->pool_lock, flags);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (!ib_device_try_get(&pool->rxe->ib_dev))
321*4882a593Smuzhiyun 		goto out_put_pool;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
324*4882a593Smuzhiyun 		goto out_cnt;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	elem = kzalloc(rxe_type_info[pool->type].size,
327*4882a593Smuzhiyun 				 (pool->flags & RXE_POOL_ATOMIC) ?
328*4882a593Smuzhiyun 				 GFP_ATOMIC : GFP_KERNEL);
329*4882a593Smuzhiyun 	if (!elem)
330*4882a593Smuzhiyun 		goto out_cnt;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	elem->pool = pool;
333*4882a593Smuzhiyun 	kref_init(&elem->ref_cnt);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	return elem;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun out_cnt:
338*4882a593Smuzhiyun 	atomic_dec(&pool->num_elem);
339*4882a593Smuzhiyun 	ib_device_put(&pool->rxe->ib_dev);
340*4882a593Smuzhiyun out_put_pool:
341*4882a593Smuzhiyun 	rxe_pool_put(pool);
342*4882a593Smuzhiyun 	return NULL;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
rxe_add_to_pool(struct rxe_pool * pool,struct rxe_pool_entry * elem)345*4882a593Smuzhiyun int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	unsigned long flags;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	read_lock_irqsave(&pool->pool_lock, flags);
352*4882a593Smuzhiyun 	if (pool->state != RXE_POOL_STATE_VALID) {
353*4882a593Smuzhiyun 		read_unlock_irqrestore(&pool->pool_lock, flags);
354*4882a593Smuzhiyun 		return -EINVAL;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 	kref_get(&pool->ref_cnt);
357*4882a593Smuzhiyun 	read_unlock_irqrestore(&pool->pool_lock, flags);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (!ib_device_try_get(&pool->rxe->ib_dev))
360*4882a593Smuzhiyun 		goto out_put_pool;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
363*4882a593Smuzhiyun 		goto out_cnt;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	elem->pool = pool;
366*4882a593Smuzhiyun 	kref_init(&elem->ref_cnt);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	return 0;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun out_cnt:
371*4882a593Smuzhiyun 	atomic_dec(&pool->num_elem);
372*4882a593Smuzhiyun 	ib_device_put(&pool->rxe->ib_dev);
373*4882a593Smuzhiyun out_put_pool:
374*4882a593Smuzhiyun 	rxe_pool_put(pool);
375*4882a593Smuzhiyun 	return -EINVAL;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
rxe_elem_release(struct kref * kref)378*4882a593Smuzhiyun void rxe_elem_release(struct kref *kref)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	struct rxe_pool_entry *elem =
381*4882a593Smuzhiyun 		container_of(kref, struct rxe_pool_entry, ref_cnt);
382*4882a593Smuzhiyun 	struct rxe_pool *pool = elem->pool;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if (pool->cleanup)
385*4882a593Smuzhiyun 		pool->cleanup(elem);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	if (!(pool->flags & RXE_POOL_NO_ALLOC))
388*4882a593Smuzhiyun 		kfree(elem);
389*4882a593Smuzhiyun 	atomic_dec(&pool->num_elem);
390*4882a593Smuzhiyun 	ib_device_put(&pool->rxe->ib_dev);
391*4882a593Smuzhiyun 	rxe_pool_put(pool);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
rxe_pool_get_index(struct rxe_pool * pool,u32 index)394*4882a593Smuzhiyun void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	struct rb_node *node = NULL;
397*4882a593Smuzhiyun 	struct rxe_pool_entry *elem = NULL;
398*4882a593Smuzhiyun 	unsigned long flags;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	read_lock_irqsave(&pool->pool_lock, flags);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	if (pool->state != RXE_POOL_STATE_VALID)
403*4882a593Smuzhiyun 		goto out;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	node = pool->tree.rb_node;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	while (node) {
408*4882a593Smuzhiyun 		elem = rb_entry(node, struct rxe_pool_entry, node);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 		if (elem->index > index)
411*4882a593Smuzhiyun 			node = node->rb_left;
412*4882a593Smuzhiyun 		else if (elem->index < index)
413*4882a593Smuzhiyun 			node = node->rb_right;
414*4882a593Smuzhiyun 		else {
415*4882a593Smuzhiyun 			kref_get(&elem->ref_cnt);
416*4882a593Smuzhiyun 			break;
417*4882a593Smuzhiyun 		}
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun out:
421*4882a593Smuzhiyun 	read_unlock_irqrestore(&pool->pool_lock, flags);
422*4882a593Smuzhiyun 	return node ? elem : NULL;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
rxe_pool_get_key(struct rxe_pool * pool,void * key)425*4882a593Smuzhiyun void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	struct rb_node *node = NULL;
428*4882a593Smuzhiyun 	struct rxe_pool_entry *elem = NULL;
429*4882a593Smuzhiyun 	int cmp;
430*4882a593Smuzhiyun 	unsigned long flags;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	read_lock_irqsave(&pool->pool_lock, flags);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if (pool->state != RXE_POOL_STATE_VALID)
435*4882a593Smuzhiyun 		goto out;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	node = pool->tree.rb_node;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	while (node) {
440*4882a593Smuzhiyun 		elem = rb_entry(node, struct rxe_pool_entry, node);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 		cmp = memcmp((u8 *)elem + pool->key_offset,
443*4882a593Smuzhiyun 			     key, pool->key_size);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 		if (cmp > 0)
446*4882a593Smuzhiyun 			node = node->rb_left;
447*4882a593Smuzhiyun 		else if (cmp < 0)
448*4882a593Smuzhiyun 			node = node->rb_right;
449*4882a593Smuzhiyun 		else
450*4882a593Smuzhiyun 			break;
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	if (node)
454*4882a593Smuzhiyun 		kref_get(&elem->ref_cnt);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun out:
457*4882a593Smuzhiyun 	read_unlock_irqrestore(&pool->pool_lock, flags);
458*4882a593Smuzhiyun 	return node ? elem : NULL;
459*4882a593Smuzhiyun }
460