blob: 119b81fdd1ba76e0d47b02f49396e1ca1585be12 [file] [log] [blame]
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00007 */
8
9#include "hf/mpool.h"
10
11#include <stdbool.h>
12
13struct mpool_chunk {
14 struct mpool_chunk *next_chunk;
15 struct mpool_chunk *limit;
16};
17
18struct mpool_entry {
19 struct mpool_entry *next;
20};
21
22static bool mpool_locks_enabled = false;
23
24/**
25 * Enables the locks protecting memory pools. Before this function is called,
26 * the locks are disabled, that is, acquiring/releasing them is a no-op.
27 */
28void mpool_enable_locks(void)
29{
30 mpool_locks_enabled = true;
31}
32
33/**
34 * Acquires the lock protecting the given memory pool, if locks are enabled.
35 */
36static void mpool_lock(struct mpool *p)
37{
38 if (mpool_locks_enabled) {
39 sl_lock(&p->lock);
40 }
41}
42
43/**
44 * Releases the lock protecting the given memory pool, if locks are enabled.
45 */
46static void mpool_unlock(struct mpool *p)
47{
48 if (mpool_locks_enabled) {
49 sl_unlock(&p->lock);
50 }
51}
52
53/**
54 * Initialises the given memory pool with the given entry size, which must be
55 * at least the size of two pointers.
56 *
57 * All entries stored in the memory pool will be aligned to at least the entry
58 * size.
59 */
60void mpool_init(struct mpool *p, size_t entry_size)
61{
62 p->entry_size = entry_size;
63 p->chunk_list = NULL;
64 p->entry_list = NULL;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000065 p->fallback = NULL;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000066 sl_init(&p->lock);
67}
68
69/**
70 * Initialises the given memory pool by replicating the properties of `from`. It
71 * also pulls the chunk and free lists from `from`, consuming all its resources
72 * and making them available via the new memory pool.
73 */
74void mpool_init_from(struct mpool *p, struct mpool *from)
75{
76 mpool_init(p, from->entry_size);
77
78 mpool_lock(from);
79 p->chunk_list = from->chunk_list;
80 p->entry_list = from->entry_list;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000081 p->fallback = from->fallback;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000082
83 from->chunk_list = NULL;
84 from->entry_list = NULL;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000085 from->fallback = NULL;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000086 mpool_unlock(from);
87}
88
89/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +000090 * Initialises the given memory pool with a fallback memory pool if this pool
91 * runs out of memory.
92 */
93void mpool_init_with_fallback(struct mpool *p, struct mpool *fallback)
94{
95 mpool_init(p, fallback->entry_size);
96 p->fallback = fallback;
97}
98
99/**
100 * Finishes the given memory pool, giving all free memory to the fallback pool
101 * if there is one.
102 */
103void mpool_fini(struct mpool *p)
104{
105 struct mpool_entry *entry;
106 struct mpool_chunk *chunk;
107
108 if (!p->fallback) {
109 return;
110 }
111
112 mpool_lock(p);
113
114 /* Merge the freelist into the fallback. */
115 entry = p->entry_list;
116 while (entry != NULL) {
117 void *ptr = entry;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000118
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000119 entry = entry->next;
120 mpool_free(p->fallback, ptr);
121 }
122
123 /* Merge the chunk list into the fallback. */
124 chunk = p->chunk_list;
125 while (chunk != NULL) {
126 void *ptr = chunk;
127 size_t size = (uintptr_t)chunk->limit - (uintptr_t)chunk;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000128
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000129 chunk = chunk->next_chunk;
130 mpool_add_chunk(p->fallback, ptr, size);
131 }
132
133 p->chunk_list = NULL;
134 p->entry_list = NULL;
135 p->fallback = NULL;
136
137 mpool_unlock(p);
138}
139
140/**
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000141 * Adds a contiguous chunk of memory to the given memory pool. The chunk will
142 * eventually be broken up into entries of the size held by the memory pool.
143 *
144 * Only the portions aligned to the entry size will be added to the pool.
145 *
146 * Returns true if at least a portion of the chunk was added to pool, or false
147 * if none of the buffer was usable in the pool.
148 */
149bool mpool_add_chunk(struct mpool *p, void *begin, size_t size)
150{
151 struct mpool_chunk *chunk;
152 uintptr_t new_begin;
153 uintptr_t new_end;
154
155 /* Round begin address up, and end address down. */
156 new_begin = ((uintptr_t)begin + p->entry_size - 1) / p->entry_size *
157 p->entry_size;
158 new_end = ((uintptr_t)begin + size) / p->entry_size * p->entry_size;
159
160 /* Nothing to do if there isn't enough room for an entry. */
161 if (new_begin >= new_end || new_end - new_begin < p->entry_size) {
162 return false;
163 }
164
165 chunk = (struct mpool_chunk *)new_begin;
166 chunk->limit = (struct mpool_chunk *)new_end;
167
168 mpool_lock(p);
169 chunk->next_chunk = p->chunk_list;
170 p->chunk_list = chunk;
171 mpool_unlock(p);
172
173 return true;
174}
175
176/**
Hong-Seok Kim6d66ef62019-07-09 05:22:38 +0000177 * Allocates an entry from the given memory pool, if one is available. The
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000178 * fallback will not be used even if there is one.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000179 */
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000180static void *mpool_alloc_no_fallback(struct mpool *p)
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000181{
182 void *ret;
183 struct mpool_chunk *chunk;
184 struct mpool_chunk *new_chunk;
185
186 /* Fetch an entry from the free list if one is available. */
187 mpool_lock(p);
188 if (p->entry_list != NULL) {
189 struct mpool_entry *entry = p->entry_list;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000190
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000191 p->entry_list = entry->next;
192 ret = entry;
193 goto exit;
194 }
195
Hong-Seok Kim6d66ef62019-07-09 05:22:38 +0000196 /* There was no free list available. Try a chunk instead. */
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000197 chunk = p->chunk_list;
198 if (chunk == NULL) {
199 /* The chunk list is also empty, we're out of entries. */
200 ret = NULL;
201 goto exit;
202 }
203
204 new_chunk = (struct mpool_chunk *)((uintptr_t)chunk + p->entry_size);
205 if (new_chunk >= chunk->limit) {
206 p->chunk_list = chunk->next_chunk;
207 } else {
208 *new_chunk = *chunk;
209 p->chunk_list = new_chunk;
210 }
211
212 ret = chunk;
213
214exit:
215 mpool_unlock(p);
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000216
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000217 return ret;
218}
219
220/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000221 * Allocates an entry from the given memory pool, if one is available. If there
222 * isn't one available, try and allocate from the fallback if there is one.
223 */
224void *mpool_alloc(struct mpool *p)
225{
226 do {
227 void *ret = mpool_alloc_no_fallback(p);
228
229 if (ret != NULL) {
230 return ret;
231 }
232
233 p = p->fallback;
234 } while (p != NULL);
235
236 return NULL;
237}
238
239/**
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000240 * Frees an entry back into the memory pool, making it available for reuse.
241 *
242 * This is meant to be used for freeing single entries. To free multiple
243 * entries, one must call mpool_add_chunk instead.
244 */
245void mpool_free(struct mpool *p, void *ptr)
246{
247 struct mpool_entry *e = ptr;
248
249 /* Store the newly freed entry in the front of the free list. */
250 mpool_lock(p);
251 e->next = p->entry_list;
252 p->entry_list = e;
253 mpool_unlock(p);
254}
255
256/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000257 * Allocates a number of contiguous and aligned entries. If a suitable
258 * allocation could not be found, the fallback will not be used even if there is
259 * one.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000260 */
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000261void *mpool_alloc_contiguous_no_fallback(struct mpool *p, size_t count,
262 size_t align)
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000263{
264 struct mpool_chunk **prev;
265 void *ret = NULL;
266
267 align *= p->entry_size;
268
269 mpool_lock(p);
270
271 /*
272 * Go through the chunk list in search of one with enough room for the
273 * requested allocation
274 */
275 prev = &p->chunk_list;
276 while (*prev != NULL) {
277 uintptr_t start;
278 struct mpool_chunk *new_chunk;
279 struct mpool_chunk *chunk = *prev;
280
281 /* Round start address up to the required alignment. */
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000282 start = (((uintptr_t)chunk + align - 1) / align) * align;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000283
284 /*
285 * Calculate where the new chunk would be if we consume the
286 * requested number of entries. Then check if this chunk is big
287 * enough to satisfy the request.
288 */
289 new_chunk =
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000290 (struct mpool_chunk *)(start + (count * p->entry_size));
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000291 if (new_chunk <= chunk->limit) {
292 /* Remove the consumed area. */
293 if (new_chunk == chunk->limit) {
294 *prev = chunk->next_chunk;
295 } else {
296 *new_chunk = *chunk;
297 *prev = new_chunk;
298 }
299
300 /*
301 * Add back the space consumed by the alignment
302 * requirement, if it's big enough to fit an entry.
303 */
304 if (start - (uintptr_t)chunk >= p->entry_size) {
305 chunk->next_chunk = *prev;
306 *prev = chunk;
307 chunk->limit = (struct mpool_chunk *)start;
308 }
309
310 ret = (void *)start;
311 break;
312 }
313
314 prev = &chunk->next_chunk;
315 }
316
317 mpool_unlock(p);
318
319 return ret;
320}
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000321
322/**
323 * Allocates a number of contiguous and aligned entries. This is a best-effort
324 * operation and only succeeds if such entries can be found in the chunks list
325 * or the chunks of the fallbacks (i.e., the entry list is never used to satisfy
326 * these allocations).
327 *
328 * The alignment is specified as the number of entries, that is, if `align` is
329 * 4, the alignment in bytes will be 4 * entry_size.
330 *
331 * The caller can enventually free the returned entries by calling
332 * mpool_add_chunk.
333 */
334void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align)
335{
336 do {
337 void *ret = mpool_alloc_contiguous_no_fallback(p, count, align);
338
339 if (ret != NULL) {
340 return ret;
341 }
342
343 p = p->fallback;
344 } while (p != NULL);
345
346 return NULL;
347}