xref: /optee_os/lib/libutils/ext/mempool.c (revision 5a913ee74d3c71af2a2860ce8a4e7aeab2916f9b)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2018-2019, Linaro Limited
5  */
6 
7 
8 #include <assert.h>
9 #include <compiler.h>
10 #include <malloc.h>
11 #include <mempool.h>
12 #include <string.h>
13 #include <util.h>
14 
15 #if defined(__KERNEL__)
16 #include <kernel/mutex.h>
17 #include <kernel/panic.h>
18 #include <kernel/thread.h>
19 #include <kernel/refcount.h>
20 #endif
21 
22 /*
23  * Allocation of temporary memory buffers which are used in a stack like
24  * fashion. One exmaple is when a Big Number is needed for a temporary
25  * variable in a Big Number computation: Big Number operations (add,...),
26  * crypto algorithms (rsa, ecc,,...).
27  *
28  *  The allocation algorithm takes memory buffers from a pool,
29  *  characterized by (cf. struct mempool):
30  * - the total size (in bytes) of the pool
31  * - the offset of the last item allocated in the pool (struct
32  *   mempool_item). This offset is -1 is nothing is allocated yet.
33  *
34  * Each item consists of (struct mempool_item)
35  * - the size of the item
36  * - the offsets, in the pool, of the previous and next items
37  *
38  * The allocation allocates an item for a given size.
39  * The allocation is performed in the pool after the last
40  * allocated items. This means:
41  * - the heap is never used.
42  * - there is no assumption on the size of the allocated memory buffers. Only
43  *   the size of the pool will limit the allocation.
44  * - a constant time allocation and free as there is no list scan
45  * - but a potentially fragmented memory as the allocation does not take into
46  *   account "holes" in the pool (allocation is performed after the last
47  *   allocated variable). Indeed, this interface is supposed to be used
48  *   with stack like allocations to avoid this issue. This means that
49  *   allocated items:
50  *   - should have a short life cycle
51  *   - if an item A is allocated before another item B, then A should be
52  *     released after B.
53  *   So the potential fragmentation is mitigated.
54  */
55 
56 
57 struct mempool {
58 	size_t size;  /* size of the memory pool, in bytes */
59 	ssize_t last_offset;   /* offset to the last one */
60 	vaddr_t data;
61 #ifdef CFG_MEMPOOL_REPORT_LAST_OFFSET
62 	ssize_t max_last_offset;
63 #endif
64 #if defined(__KERNEL__)
65 	void (*release_mem)(void *ptr, size_t size);
66 	struct mutex mu;
67 	struct condvar cv;
68 	struct refcount refc;
69 	int owner;
70 #endif
71 };
72 
73 #if defined(__KERNEL__)
74 struct mempool *mempool_default;
75 #endif
76 
77 static void get_pool(struct mempool *pool __maybe_unused)
78 {
79 #if defined(__KERNEL__)
80 	/*
81 	 * Owner matches our thread it cannot be changed. If it doesn't
82 	 * match it can change any at time we're not holding the mutex to
83 	 * any value but our thread id.
84 	 */
85 	if (atomic_load_int(&pool->owner) == thread_get_id()) {
86 		if (!refcount_inc(&pool->refc))
87 			panic();
88 		return;
89 	}
90 
91 	mutex_lock(&pool->mu);
92 
93 	/* Wait until the pool is available */
94 	while (pool->owner != THREAD_ID_INVALID)
95 		condvar_wait(&pool->cv, &pool->mu);
96 
97 	pool->owner = thread_get_id();
98 	refcount_set(&pool->refc, 1);
99 
100 	mutex_unlock(&pool->mu);
101 #endif
102 }
103 
104 static void put_pool(struct mempool *pool __maybe_unused)
105 {
106 #if defined(__KERNEL__)
107 	assert(atomic_load_int(&pool->owner) == thread_get_id());
108 
109 	if (refcount_dec(&pool->refc)) {
110 		mutex_lock(&pool->mu);
111 
112 		/*
113 		 * Do an atomic store to match the atomic load in
114 		 * get_pool() above.
115 		 */
116 		atomic_store_int(&pool->owner, THREAD_ID_INVALID);
117 		condvar_signal(&pool->cv);
118 
119 		/* As the refcount is 0 there should be no items left */
120 		if (pool->last_offset >= 0)
121 			panic();
122 		if (pool->release_mem)
123 			pool->release_mem((void *)pool->data, pool->size);
124 
125 		mutex_unlock(&pool->mu);
126 	}
127 #endif
128 }
129 
130 struct mempool *
131 mempool_alloc_pool(void *data, size_t size,
132 		   void (*release_mem)(void *ptr, size_t size) __maybe_unused)
133 {
134 	struct mempool *pool = calloc(1, sizeof(*pool));
135 
136 	COMPILE_TIME_ASSERT(MEMPOOL_ALIGN >= __alignof__(struct mempool_item));
137 	assert(!((vaddr_t)data & (MEMPOOL_ALIGN - 1)));
138 
139 	if (pool) {
140 		pool->size = size;
141 		pool->data = (vaddr_t)data;
142 		pool->last_offset = -1;
143 #if defined(__KERNEL__)
144 		pool->release_mem = release_mem;
145 		mutex_init(&pool->mu);
146 		condvar_init(&pool->cv);
147 		pool->owner = THREAD_ID_INVALID;
148 #endif
149 	}
150 
151 	return pool;
152 }
153 
154 void *mempool_alloc(struct mempool *pool, size_t size)
155 {
156 	size_t offset;
157 	struct mempool_item *new_item;
158 	struct mempool_item *last_item = NULL;
159 
160 	get_pool(pool);
161 
162 	if (pool->last_offset < 0) {
163 		offset = 0;
164 	} else {
165 		last_item = (struct mempool_item *)(pool->data +
166 						    pool->last_offset);
167 		offset = pool->last_offset + last_item->size;
168 
169 		offset = ROUNDUP(offset, MEMPOOL_ALIGN);
170 		if (offset > pool->size)
171 			goto error;
172 	}
173 
174 	size = sizeof(struct mempool_item) + size;
175 	size = ROUNDUP(size, MEMPOOL_ALIGN);
176 	if (offset + size > pool->size)
177 		goto error;
178 
179 	new_item = (struct mempool_item *)(pool->data + offset);
180 	new_item->size = size;
181 	new_item->prev_item_offset = pool->last_offset;
182 	if (last_item)
183 		last_item->next_item_offset = offset;
184 	new_item->next_item_offset = -1;
185 	pool->last_offset = offset;
186 #ifdef CFG_MEMPOOL_REPORT_LAST_OFFSET
187 	if (pool->last_offset > pool->max_last_offset) {
188 		pool->max_last_offset = pool->last_offset;
189 		DMSG("Max memory usage increased to %zu",
190 		     (size_t)pool->max_last_offset);
191 	}
192 #endif
193 
194 	return new_item + 1;
195 
196 error:
197 	EMSG("Failed to allocate %zu bytes, please tune the pool size", size);
198 	put_pool(pool);
199 	return NULL;
200 }
201 
202 void *mempool_calloc(struct mempool *pool, size_t nmemb, size_t size)
203 {
204 	size_t sz;
205 	void *p;
206 
207 	if (MUL_OVERFLOW(nmemb, size, &sz))
208 		return NULL;
209 
210 	p = mempool_alloc(pool, sz);
211 	if (p)
212 		memset(p, 0, sz);
213 
214 	return p;
215 }
216 
217 void mempool_free(struct mempool *pool, void *ptr)
218 {
219 	struct mempool_item *item;
220 	struct mempool_item *prev_item;
221 	struct mempool_item *next_item;
222 	ssize_t last_offset = -1;
223 
224 	if (!ptr)
225 		return;
226 
227 	item = (struct mempool_item *)((vaddr_t)ptr -
228 				       sizeof(struct mempool_item));
229 	if (item->prev_item_offset >= 0) {
230 		prev_item = (struct mempool_item *)(pool->data +
231 						    item->prev_item_offset);
232 		prev_item->next_item_offset = item->next_item_offset;
233 		last_offset = item->prev_item_offset;
234 	}
235 
236 	if (item->next_item_offset >= 0) {
237 		next_item = (struct mempool_item *)(pool->data +
238 						    item->next_item_offset);
239 		next_item->prev_item_offset = item->prev_item_offset;
240 		last_offset = pool->last_offset;
241 	}
242 
243 	pool->last_offset = last_offset;
244 	put_pool(pool);
245 }
246