xref: /optee_os/lib/libutils/ext/mempool.c (revision 817466cb476de705a8e3dabe1ef165fe27a18c2f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2018, Linaro Limited
5  */
6 
7 
8 #include <assert.h>
9 #include <compiler.h>
10 #include <malloc.h>
11 #include <mempool.h>
12 #include <util.h>
13 
14 #if defined(__KERNEL__)
15 #include <kernel/mutex.h>
16 #include <kernel/panic.h>
17 #include <kernel/thread.h>
18 #endif
19 
20 /*
21  * Allocation of temporary memory buffers which are used in a stack like
22  * fashion. One exmaple is when a Big Number is needed for a temporary
23  * variable in a Big Number computation: Big Number operations (add,...),
24  * crypto algorithms (rsa, ecc,,...).
25  *
26  *  The allocation algorithm takes memory buffers from a pool,
27  *  characterized by (cf. struct mempool):
28  * - the total size (in bytes) of the pool
29  * - the offset of the last item allocated in the pool (struct
30  *   mempool_item). This offset is -1 is nothing is allocated yet.
31  *
32  * Each item consists of (struct mempool_item)
33  * - the size of the item
34  * - the offsets, in the pool, of the previous and next items
35  *
36  * The allocation allocates an item for a given size.
37  * The allocation is performed in the pool after the last
38  * allocated items. This means:
39  * - the heap is never used.
40  * - there is no assumption on the size of the allocated memory buffers. Only
41  *   the size of the pool will limit the allocation.
42  * - a constant time allocation and free as there is no list scan
43  * - but a potentially fragmented memory as the allocation does not take into
44  *   account "holes" in the pool (allocation is performed after the last
45  *   allocated variable). Indeed, this interface is supposed to be used
46  *   with stack like allocations to avoid this issue. This means that
47  *   allocated items:
48  *   - should have a short life cycle
49  *   - if an item A is allocated before another item B, then A should be
50  *     released after B.
51  *   So the potential fragmentation is mitigated.
52  */
53 
54 #define POOL_ALIGN	__alignof__(long)
55 
56 struct mempool {
57 	size_t size;  /* size of the memory pool, in bytes */
58 	ssize_t last_offset;   /* offset to the last one */
59 	vaddr_t data;
60 #if defined(__KERNEL__)
61 	void (*release_mem)(void *ptr, size_t size);
62 	struct mutex mu;
63 	struct condvar cv;
64 	size_t count;
65 	int owner;
66 #endif
67 };
68 
69 static void get_pool(struct mempool *pool __maybe_unused)
70 {
71 #if defined(__KERNEL__)
72 	mutex_lock(&pool->mu);
73 
74 	if (pool->owner != thread_get_id()) {
75 		/* Wait until the pool is available */
76 		while (pool->owner != THREAD_ID_INVALID)
77 			condvar_wait(&pool->cv, &pool->mu);
78 
79 		pool->owner = thread_get_id();
80 		assert(pool->count == 0);
81 	}
82 
83 	pool->count++;
84 
85 	mutex_unlock(&pool->mu);
86 #endif
87 }
88 
89 static void put_pool(struct mempool *pool __maybe_unused)
90 {
91 #if defined(__KERNEL__)
92 	mutex_lock(&pool->mu);
93 
94 	assert(pool->owner == thread_get_id());
95 	assert(pool->count > 0);
96 
97 	pool->count--;
98 	if (!pool->count) {
99 		pool->owner = THREAD_ID_INVALID;
100 		condvar_signal(&pool->cv);
101 		/* As the refcount is 0 there should be no items left */
102 		if (pool->last_offset >= 0)
103 			panic();
104 		if (pool->release_mem)
105 			pool->release_mem((void *)pool->data, pool->size);
106 	}
107 
108 	mutex_unlock(&pool->mu);
109 #endif
110 }
111 
112 struct mempool *
113 mempool_alloc_pool(void *data, size_t size,
114 		   void (*release_mem)(void *ptr, size_t size) __maybe_unused)
115 {
116 	struct mempool *pool = calloc(1, sizeof(*pool));
117 
118 	COMPILE_TIME_ASSERT(POOL_ALIGN >= __alignof__(struct mempool_item));
119 	assert(!((vaddr_t)data & (POOL_ALIGN - 1)));
120 
121 	if (pool) {
122 		pool->size = size;
123 		pool->data = (vaddr_t)data;
124 		pool->last_offset = -1;
125 #if defined(__KERNEL__)
126 		pool->release_mem = release_mem;
127 		mutex_init(&pool->mu);
128 		condvar_init(&pool->cv);
129 		pool->owner = THREAD_ID_INVALID;
130 #endif
131 	}
132 
133 	return pool;
134 }
135 
136 void *mempool_alloc(struct mempool *pool, size_t size)
137 {
138 	size_t offset;
139 	struct mempool_item *new_item;
140 	struct mempool_item *last_item = NULL;
141 
142 	get_pool(pool);
143 
144 	if (pool->last_offset < 0) {
145 		offset = 0;
146 	} else {
147 		last_item = (struct mempool_item *)(pool->data +
148 						    pool->last_offset);
149 		offset = pool->last_offset + last_item->size;
150 
151 		offset = ROUNDUP(offset, POOL_ALIGN);
152 		if (offset > pool->size)
153 			goto error;
154 	}
155 
156 	size = sizeof(struct mempool_item) + size;
157 	size = ROUNDUP(size, POOL_ALIGN);
158 	if (offset + size > pool->size)
159 		goto error;
160 
161 	new_item = (struct mempool_item *)(pool->data + offset);
162 	new_item->size = size;
163 	new_item->prev_item_offset = pool->last_offset;
164 	if (last_item)
165 		last_item->next_item_offset = offset;
166 	new_item->next_item_offset = -1;
167 	pool->last_offset = offset;
168 
169 	return new_item + 1;
170 
171 error:
172 	put_pool(pool);
173 	return NULL;
174 }
175 
176 void mempool_free(struct mempool *pool, void *ptr)
177 {
178 	struct mempool_item *item;
179 	struct mempool_item *prev_item;
180 	struct mempool_item *next_item;
181 	ssize_t last_offset = -1;
182 
183 	if (!ptr)
184 		return;
185 
186 	item = (struct mempool_item *)((vaddr_t)ptr -
187 				       sizeof(struct mempool_item));
188 	if (item->prev_item_offset >= 0) {
189 		prev_item = (struct mempool_item *)(pool->data +
190 						    item->prev_item_offset);
191 		prev_item->next_item_offset = item->next_item_offset;
192 		last_offset = item->prev_item_offset;
193 	}
194 
195 	if (item->next_item_offset >= 0) {
196 		next_item = (struct mempool_item *)(pool->data +
197 						    item->next_item_offset);
198 		next_item->prev_item_offset = item->prev_item_offset;
199 		last_offset = pool->last_offset;
200 	}
201 
202 	pool->last_offset = last_offset;
203 	put_pool(pool);
204 }
205