xref: /optee_os/lib/libutils/ext/mempool.c (revision d4f909c01d27814c655bce7b818f21f4555d98cd)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2018, Linaro Limited
5  */
6 
7 
8 #include <assert.h>
9 #include <compiler.h>
10 #include <malloc.h>
11 #include <mempool.h>
12 #include <util.h>
13 
14 #if defined(__KERNEL__)
15 #include <kernel/mutex.h>
16 #include <kernel/panic.h>
17 #include <kernel/thread.h>
18 #include <kernel/refcount.h>
19 #endif
20 
21 /*
22  * Allocation of temporary memory buffers which are used in a stack like
23  * fashion. One exmaple is when a Big Number is needed for a temporary
24  * variable in a Big Number computation: Big Number operations (add,...),
25  * crypto algorithms (rsa, ecc,,...).
26  *
27  *  The allocation algorithm takes memory buffers from a pool,
28  *  characterized by (cf. struct mempool):
29  * - the total size (in bytes) of the pool
30  * - the offset of the last item allocated in the pool (struct
31  *   mempool_item). This offset is -1 is nothing is allocated yet.
32  *
33  * Each item consists of (struct mempool_item)
34  * - the size of the item
35  * - the offsets, in the pool, of the previous and next items
36  *
37  * The allocation allocates an item for a given size.
38  * The allocation is performed in the pool after the last
39  * allocated items. This means:
40  * - the heap is never used.
41  * - there is no assumption on the size of the allocated memory buffers. Only
42  *   the size of the pool will limit the allocation.
43  * - a constant time allocation and free as there is no list scan
44  * - but a potentially fragmented memory as the allocation does not take into
45  *   account "holes" in the pool (allocation is performed after the last
46  *   allocated variable). Indeed, this interface is supposed to be used
47  *   with stack like allocations to avoid this issue. This means that
48  *   allocated items:
49  *   - should have a short life cycle
50  *   - if an item A is allocated before another item B, then A should be
51  *     released after B.
52  *   So the potential fragmentation is mitigated.
53  */
54 
55 #define POOL_ALIGN	__alignof__(long)
56 
57 struct mempool {
58 	size_t size;  /* size of the memory pool, in bytes */
59 	ssize_t last_offset;   /* offset to the last one */
60 	vaddr_t data;
61 #if defined(__KERNEL__)
62 	void (*release_mem)(void *ptr, size_t size);
63 	struct mutex mu;
64 	struct condvar cv;
65 	struct refcount refc;
66 	int owner;
67 #endif
68 };
69 
70 static void get_pool(struct mempool *pool __maybe_unused)
71 {
72 #if defined(__KERNEL__)
73 	if (refcount_inc(&pool->refc)) {
74 		if (pool->owner == thread_get_id())
75 			return;
76 		refcount_dec(&pool->refc);
77 	}
78 
79 	mutex_lock(&pool->mu);
80 
81 	/* Wait until the pool is available */
82 	while (pool->owner != THREAD_ID_INVALID)
83 		condvar_wait(&pool->cv, &pool->mu);
84 
85 	pool->owner = thread_get_id();
86 	refcount_set(&pool->refc, 1);
87 
88 	mutex_unlock(&pool->mu);
89 #endif
90 }
91 
92 static void put_pool(struct mempool *pool __maybe_unused)
93 {
94 #if defined(__KERNEL__)
95 	assert(pool->owner == thread_get_id());
96 
97 	if (refcount_dec(&pool->refc)) {
98 		mutex_lock(&pool->mu);
99 
100 		pool->owner = THREAD_ID_INVALID;
101 		condvar_signal(&pool->cv);
102 
103 		/* As the refcount is 0 there should be no items left */
104 		if (pool->last_offset >= 0)
105 			panic();
106 		if (pool->release_mem)
107 			pool->release_mem((void *)pool->data, pool->size);
108 
109 		mutex_unlock(&pool->mu);
110 	}
111 #endif
112 }
113 
114 struct mempool *
115 mempool_alloc_pool(void *data, size_t size,
116 		   void (*release_mem)(void *ptr, size_t size) __maybe_unused)
117 {
118 	struct mempool *pool = calloc(1, sizeof(*pool));
119 
120 	COMPILE_TIME_ASSERT(POOL_ALIGN >= __alignof__(struct mempool_item));
121 	assert(!((vaddr_t)data & (POOL_ALIGN - 1)));
122 
123 	if (pool) {
124 		pool->size = size;
125 		pool->data = (vaddr_t)data;
126 		pool->last_offset = -1;
127 #if defined(__KERNEL__)
128 		pool->release_mem = release_mem;
129 		mutex_init(&pool->mu);
130 		condvar_init(&pool->cv);
131 		pool->owner = THREAD_ID_INVALID;
132 #endif
133 	}
134 
135 	return pool;
136 }
137 
138 void *mempool_alloc(struct mempool *pool, size_t size)
139 {
140 	size_t offset;
141 	struct mempool_item *new_item;
142 	struct mempool_item *last_item = NULL;
143 
144 	get_pool(pool);
145 
146 	if (pool->last_offset < 0) {
147 		offset = 0;
148 	} else {
149 		last_item = (struct mempool_item *)(pool->data +
150 						    pool->last_offset);
151 		offset = pool->last_offset + last_item->size;
152 
153 		offset = ROUNDUP(offset, POOL_ALIGN);
154 		if (offset > pool->size)
155 			goto error;
156 	}
157 
158 	size = sizeof(struct mempool_item) + size;
159 	size = ROUNDUP(size, POOL_ALIGN);
160 	if (offset + size > pool->size)
161 		goto error;
162 
163 	new_item = (struct mempool_item *)(pool->data + offset);
164 	new_item->size = size;
165 	new_item->prev_item_offset = pool->last_offset;
166 	if (last_item)
167 		last_item->next_item_offset = offset;
168 	new_item->next_item_offset = -1;
169 	pool->last_offset = offset;
170 
171 	return new_item + 1;
172 
173 error:
174 	EMSG("Failed to allocate %zu bytes, please tune the pool size", size);
175 	put_pool(pool);
176 	return NULL;
177 }
178 
179 void mempool_free(struct mempool *pool, void *ptr)
180 {
181 	struct mempool_item *item;
182 	struct mempool_item *prev_item;
183 	struct mempool_item *next_item;
184 	ssize_t last_offset = -1;
185 
186 	if (!ptr)
187 		return;
188 
189 	item = (struct mempool_item *)((vaddr_t)ptr -
190 				       sizeof(struct mempool_item));
191 	if (item->prev_item_offset >= 0) {
192 		prev_item = (struct mempool_item *)(pool->data +
193 						    item->prev_item_offset);
194 		prev_item->next_item_offset = item->next_item_offset;
195 		last_offset = item->prev_item_offset;
196 	}
197 
198 	if (item->next_item_offset >= 0) {
199 		next_item = (struct mempool_item *)(pool->data +
200 						    item->next_item_offset);
201 		next_item->prev_item_offset = item->prev_item_offset;
202 		last_offset = pool->last_offset;
203 	}
204 
205 	pool->last_offset = last_offset;
206 	put_pool(pool);
207 }
208