blob: ed40e6e0093f40fdae05f2051e96709b178a02da [file] [log] [blame]
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00007 */
8
9#include "hf/mpool.h"
10
11#include <stdbool.h>
12
Daniel Boulby3f784262021-09-27 13:02:54 +010013#include "hf/arch/std.h"
14
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000015struct mpool_chunk {
16 struct mpool_chunk *next_chunk;
17 struct mpool_chunk *limit;
18};
19
20struct mpool_entry {
21 struct mpool_entry *next;
22};
23
24static bool mpool_locks_enabled = false;
25
26/**
27 * Enables the locks protecting memory pools. Before this function is called,
28 * the locks are disabled, that is, acquiring/releasing them is a no-op.
29 */
30void mpool_enable_locks(void)
31{
32 mpool_locks_enabled = true;
33}
34
35/**
36 * Acquires the lock protecting the given memory pool, if locks are enabled.
37 */
38static void mpool_lock(struct mpool *p)
39{
40 if (mpool_locks_enabled) {
41 sl_lock(&p->lock);
42 }
43}
44
45/**
46 * Releases the lock protecting the given memory pool, if locks are enabled.
47 */
48static void mpool_unlock(struct mpool *p)
49{
50 if (mpool_locks_enabled) {
51 sl_unlock(&p->lock);
52 }
53}
54
55/**
56 * Initialises the given memory pool with the given entry size, which must be
57 * at least the size of two pointers.
58 *
59 * All entries stored in the memory pool will be aligned to at least the entry
60 * size.
61 */
62void mpool_init(struct mpool *p, size_t entry_size)
63{
64 p->entry_size = entry_size;
65 p->chunk_list = NULL;
66 p->entry_list = NULL;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000067 p->fallback = NULL;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000068 sl_init(&p->lock);
69}
70
71/**
72 * Initialises the given memory pool by replicating the properties of `from`. It
73 * also pulls the chunk and free lists from `from`, consuming all its resources
74 * and making them available via the new memory pool.
75 */
76void mpool_init_from(struct mpool *p, struct mpool *from)
77{
78 mpool_init(p, from->entry_size);
79
80 mpool_lock(from);
81 p->chunk_list = from->chunk_list;
82 p->entry_list = from->entry_list;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000083 p->fallback = from->fallback;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000084
85 from->chunk_list = NULL;
86 from->entry_list = NULL;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000087 from->fallback = NULL;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000088 mpool_unlock(from);
89}
90
91/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +000092 * Initialises the given memory pool with a fallback memory pool if this pool
93 * runs out of memory.
94 */
95void mpool_init_with_fallback(struct mpool *p, struct mpool *fallback)
96{
97 mpool_init(p, fallback->entry_size);
98 p->fallback = fallback;
99}
100
101/**
102 * Finishes the given memory pool, giving all free memory to the fallback pool
103 * if there is one.
104 */
105void mpool_fini(struct mpool *p)
106{
107 struct mpool_entry *entry;
108 struct mpool_chunk *chunk;
109
110 if (!p->fallback) {
111 return;
112 }
113
114 mpool_lock(p);
115
116 /* Merge the freelist into the fallback. */
117 entry = p->entry_list;
118 while (entry != NULL) {
119 void *ptr = entry;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000120
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000121 entry = entry->next;
122 mpool_free(p->fallback, ptr);
123 }
124
125 /* Merge the chunk list into the fallback. */
126 chunk = p->chunk_list;
127 while (chunk != NULL) {
128 void *ptr = chunk;
129 size_t size = (uintptr_t)chunk->limit - (uintptr_t)chunk;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000130
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000131 chunk = chunk->next_chunk;
132 mpool_add_chunk(p->fallback, ptr, size);
133 }
134
135 p->chunk_list = NULL;
136 p->entry_list = NULL;
137 p->fallback = NULL;
138
139 mpool_unlock(p);
140}
141
142/**
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000143 * Adds a contiguous chunk of memory to the given memory pool. The chunk will
144 * eventually be broken up into entries of the size held by the memory pool.
145 *
146 * Only the portions aligned to the entry size will be added to the pool.
147 *
148 * Returns true if at least a portion of the chunk was added to pool, or false
149 * if none of the buffer was usable in the pool.
150 */
151bool mpool_add_chunk(struct mpool *p, void *begin, size_t size)
152{
153 struct mpool_chunk *chunk;
Daniel Boulby3f784262021-09-27 13:02:54 +0100154 char *new_begin;
155 char *new_end;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000156
157 /* Round begin address up, and end address down. */
Daniel Boulby3f784262021-09-27 13:02:54 +0100158 new_begin = (void *)align_up((char *)begin, p->entry_size);
159 new_end = (void *)align_down((char *)begin + size, p->entry_size);
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000160
161 /* Nothing to do if there isn't enough room for an entry. */
Karl Meakin824b63d2024-06-03 19:04:53 +0100162 if (new_begin >= new_end ||
163 (size_t)(new_end - new_begin) < p->entry_size) {
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000164 return false;
165 }
166
167 chunk = (struct mpool_chunk *)new_begin;
168 chunk->limit = (struct mpool_chunk *)new_end;
169
170 mpool_lock(p);
171 chunk->next_chunk = p->chunk_list;
172 p->chunk_list = chunk;
173 mpool_unlock(p);
174
175 return true;
176}
177
178/**
Hong-Seok Kim6d66ef62019-07-09 05:22:38 +0000179 * Allocates an entry from the given memory pool, if one is available. The
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000180 * fallback will not be used even if there is one.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000181 */
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000182static void *mpool_alloc_no_fallback(struct mpool *p)
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000183{
184 void *ret;
185 struct mpool_chunk *chunk;
186 struct mpool_chunk *new_chunk;
187
188 /* Fetch an entry from the free list if one is available. */
189 mpool_lock(p);
190 if (p->entry_list != NULL) {
191 struct mpool_entry *entry = p->entry_list;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000192
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000193 p->entry_list = entry->next;
194 ret = entry;
195 goto exit;
196 }
197
Hong-Seok Kim6d66ef62019-07-09 05:22:38 +0000198 /* There was no free list available. Try a chunk instead. */
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000199 chunk = p->chunk_list;
200 if (chunk == NULL) {
201 /* The chunk list is also empty, we're out of entries. */
202 ret = NULL;
203 goto exit;
204 }
205
Daniel Boulby3f784262021-09-27 13:02:54 +0100206 new_chunk = (struct mpool_chunk *)((char *)chunk + p->entry_size);
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000207 if (new_chunk >= chunk->limit) {
208 p->chunk_list = chunk->next_chunk;
209 } else {
210 *new_chunk = *chunk;
211 p->chunk_list = new_chunk;
212 }
213
214 ret = chunk;
215
216exit:
217 mpool_unlock(p);
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000218
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000219 return ret;
220}
221
222/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000223 * Allocates an entry from the given memory pool, if one is available. If there
224 * isn't one available, try and allocate from the fallback if there is one.
225 */
226void *mpool_alloc(struct mpool *p)
227{
228 do {
229 void *ret = mpool_alloc_no_fallback(p);
230
231 if (ret != NULL) {
232 return ret;
233 }
234
235 p = p->fallback;
236 } while (p != NULL);
237
238 return NULL;
239}
240
241/**
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000242 * Frees an entry back into the memory pool, making it available for reuse.
243 *
244 * This is meant to be used for freeing single entries. To free multiple
245 * entries, one must call mpool_add_chunk instead.
246 */
247void mpool_free(struct mpool *p, void *ptr)
248{
249 struct mpool_entry *e = ptr;
250
251 /* Store the newly freed entry in the front of the free list. */
252 mpool_lock(p);
253 e->next = p->entry_list;
254 p->entry_list = e;
255 mpool_unlock(p);
256}
257
258/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000259 * Allocates a number of contiguous and aligned entries. If a suitable
260 * allocation could not be found, the fallback will not be used even if there is
261 * one.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000262 */
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000263void *mpool_alloc_contiguous_no_fallback(struct mpool *p, size_t count,
264 size_t align)
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000265{
266 struct mpool_chunk **prev;
267 void *ret = NULL;
268
269 align *= p->entry_size;
270
271 mpool_lock(p);
272
273 /*
274 * Go through the chunk list in search of one with enough room for the
275 * requested allocation
276 */
277 prev = &p->chunk_list;
278 while (*prev != NULL) {
Daniel Boulby3f784262021-09-27 13:02:54 +0100279 char *start;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000280 struct mpool_chunk *new_chunk;
281 struct mpool_chunk *chunk = *prev;
282
283 /* Round start address up to the required alignment. */
Daniel Boulby3f784262021-09-27 13:02:54 +0100284 start = (void *)align_up((char *)chunk, align);
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000285
286 /*
287 * Calculate where the new chunk would be if we consume the
288 * requested number of entries. Then check if this chunk is big
289 * enough to satisfy the request.
290 */
291 new_chunk =
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000292 (struct mpool_chunk *)(start + (count * p->entry_size));
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000293 if (new_chunk <= chunk->limit) {
294 /* Remove the consumed area. */
295 if (new_chunk == chunk->limit) {
296 *prev = chunk->next_chunk;
297 } else {
298 *new_chunk = *chunk;
299 *prev = new_chunk;
300 }
301
302 /*
303 * Add back the space consumed by the alignment
304 * requirement, if it's big enough to fit an entry.
305 */
Karl Meakin824b63d2024-06-03 19:04:53 +0100306 if ((size_t)(start - (char *)chunk) >= p->entry_size) {
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000307 chunk->next_chunk = *prev;
308 *prev = chunk;
309 chunk->limit = (struct mpool_chunk *)start;
310 }
311
312 ret = (void *)start;
313 break;
314 }
315
316 prev = &chunk->next_chunk;
317 }
318
319 mpool_unlock(p);
320
321 return ret;
322}
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000323
324/**
325 * Allocates a number of contiguous and aligned entries. This is a best-effort
326 * operation and only succeeds if such entries can be found in the chunks list
327 * or the chunks of the fallbacks (i.e., the entry list is never used to satisfy
328 * these allocations).
329 *
330 * The alignment is specified as the number of entries, that is, if `align` is
331 * 4, the alignment in bytes will be 4 * entry_size.
332 *
333 * The caller can enventually free the returned entries by calling
334 * mpool_add_chunk.
335 */
336void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align)
337{
338 do {
339 void *ret = mpool_alloc_contiguous_no_fallback(p, count, align);
340
341 if (ret != NULL) {
342 return ret;
343 }
344
345 p = p->fallback;
346 } while (p != NULL);
347
348 return NULL;
349}