xref: /optee_os/lib/libutils/ext/mempool.c (revision a2eb5b55d169aaf0961ecc98b74ced53b20fe42c)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  * Copyright (c) 2018, Linaro Limited
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  * 1. Redistributions of source code must retain the above copyright notice,
11  * this list of conditions and the following disclaimer.
12  *
13  * 2. Redistributions in binary form must reproduce the above copyright notice,
14  * this list of conditions and the following disclaimer in the documentation
15  * and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
21  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 
31 #include <assert.h>
32 #include <compiler.h>
33 #include <malloc.h>
34 #include <mempool.h>
35 #include <util.h>
36 
37 #if defined(__KERNEL__)
38 #include <kernel/mutex.h>
39 #include <kernel/panic.h>
40 #include <kernel/thread.h>
41 #endif
42 
43 /*
44  * Allocation of temporary memory buffers which are used in a stack like
45  * fashion. One exmaple is when a Big Number is needed for a temporary
46  * variable in a Big Number computation: Big Number operations (add,...),
47  * crypto algorithms (rsa, ecc,,...).
48  *
49  *  The allocation algorithm takes memory buffers from a pool,
50  *  characterized by (cf. struct mempool):
51  * - the total size (in bytes) of the pool
52  * - the offset of the last item allocated in the pool (struct
53  *   mempool_item). This offset is -1 is nothing is allocated yet.
54  *
55  * Each item consists of (struct mempool_item)
56  * - the size of the item
57  * - the offsets, in the pool, of the previous and next items
58  *
59  * The allocation allocates an item for a given size.
60  * The allocation is performed in the pool after the last
61  * allocated items. This means:
62  * - the heap is never used.
63  * - there is no assumption on the size of the allocated memory buffers. Only
64  *   the size of the pool will limit the allocation.
65  * - a constant time allocation and free as there is no list scan
66  * - but a potentially fragmented memory as the allocation does not take into
67  *   account "holes" in the pool (allocation is performed after the last
68  *   allocated variable). Indeed, this interface is supposed to be used
69  *   with stack like allocations to avoid this issue. This means that
70  *   allocated items:
71  *   - should have a short life cycle
72  *   - if an item A is allocated before another item B, then A should be
73  *     released after B.
74  *   So the potential fragmentation is mitigated.
75  */
76 
77 #define POOL_ALIGN	__alignof__(long)
78 
79 struct mempool {
80 	size_t size;  /* size of the memory pool, in bytes */
81 	ssize_t last_offset;   /* offset to the last one */
82 	vaddr_t data;
83 #if defined(__KERNEL__)
84 	void (*release_mem)(void *ptr, size_t size);
85 	struct mutex mu;
86 	struct condvar cv;
87 	size_t count;
88 	int owner;
89 #endif
90 };
91 
92 static void get_pool(struct mempool *pool __maybe_unused)
93 {
94 #if defined(__KERNEL__)
95 	mutex_lock(&pool->mu);
96 
97 	if (pool->owner != thread_get_id()) {
98 		/* Wait until the pool is available */
99 		while (pool->owner != THREAD_ID_INVALID)
100 			condvar_wait(&pool->cv, &pool->mu);
101 
102 		pool->owner = thread_get_id();
103 		assert(pool->count == 0);
104 	}
105 
106 	pool->count++;
107 
108 	mutex_unlock(&pool->mu);
109 #endif
110 }
111 
112 static void put_pool(struct mempool *pool __maybe_unused)
113 {
114 #if defined(__KERNEL__)
115 	mutex_lock(&pool->mu);
116 
117 	assert(pool->owner == thread_get_id());
118 	assert(pool->count > 0);
119 
120 	pool->count--;
121 	if (!pool->count) {
122 		pool->owner = THREAD_ID_INVALID;
123 		condvar_signal(&pool->cv);
124 		/* As the refcount is 0 there should be no items left */
125 		if (pool->last_offset >= 0)
126 			panic();
127 		if (pool->release_mem)
128 			pool->release_mem((void *)pool->data, pool->size);
129 	}
130 
131 	mutex_unlock(&pool->mu);
132 #endif
133 }
134 
135 struct mempool *
136 mempool_alloc_pool(void *data, size_t size,
137 		   void (*release_mem)(void *ptr, size_t size) __maybe_unused)
138 {
139 	struct mempool *pool = calloc(1, sizeof(*pool));
140 
141 	COMPILE_TIME_ASSERT(POOL_ALIGN >= __alignof__(struct mempool_item));
142 	assert(!((vaddr_t)data & (POOL_ALIGN - 1)));
143 
144 	if (pool) {
145 		pool->size = size;
146 		pool->data = (vaddr_t)data;
147 		pool->last_offset = -1;
148 #if defined(__KERNEL__)
149 		pool->release_mem = release_mem;
150 		mutex_init(&pool->mu);
151 		condvar_init(&pool->cv);
152 		pool->owner = THREAD_ID_INVALID;
153 #endif
154 	}
155 
156 	return pool;
157 }
158 
159 void *mempool_alloc(struct mempool *pool, size_t size)
160 {
161 	size_t offset;
162 	struct mempool_item *new_item;
163 	struct mempool_item *last_item = NULL;
164 
165 	get_pool(pool);
166 
167 	if (pool->last_offset < 0) {
168 		offset = 0;
169 	} else {
170 		last_item = (struct mempool_item *)(pool->data +
171 						    pool->last_offset);
172 		offset = pool->last_offset + last_item->size;
173 
174 		offset = ROUNDUP(offset, POOL_ALIGN);
175 		if (offset > pool->size)
176 			goto error;
177 	}
178 
179 	size = sizeof(struct mempool_item) + size;
180 	size = ROUNDUP(size, POOL_ALIGN);
181 	if (offset + size > pool->size)
182 		goto error;
183 
184 	new_item = (struct mempool_item *)(pool->data + offset);
185 	new_item->size = size;
186 	new_item->prev_item_offset = pool->last_offset;
187 	if (last_item)
188 		last_item->next_item_offset = offset;
189 	new_item->next_item_offset = -1;
190 	pool->last_offset = offset;
191 
192 	return new_item + 1;
193 
194 error:
195 	put_pool(pool);
196 	return NULL;
197 }
198 
199 void mempool_free(struct mempool *pool, void *ptr)
200 {
201 	struct mempool_item *item;
202 	struct mempool_item *prev_item;
203 	struct mempool_item *next_item;
204 	ssize_t last_offset = -1;
205 
206 	if (!ptr)
207 		return;
208 
209 	item = (struct mempool_item *)((vaddr_t)ptr -
210 				       sizeof(struct mempool_item));
211 	if (item->prev_item_offset >= 0) {
212 		prev_item = (struct mempool_item *)(pool->data +
213 						    item->prev_item_offset);
214 		prev_item->next_item_offset = item->next_item_offset;
215 		last_offset = item->prev_item_offset;
216 	}
217 
218 	if (item->next_item_offset >= 0) {
219 		next_item = (struct mempool_item *)(pool->data +
220 						    item->next_item_offset);
221 		next_item->prev_item_offset = item->prev_item_offset;
222 		last_offset = pool->last_offset;
223 	}
224 
225 	pool->last_offset = last_offset;
226 	put_pool(pool);
227 }
228