blob: 0ee7cb8524b2c0d70e696cd21954b4122e72c0f7 [file] [log] [blame]
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00003 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00007 */
8
9#include "hf/mpool.h"
10
11#include <stdbool.h>
12
Daniel Boulby3f784262021-09-27 13:02:54 +010013#include "hf/arch/std.h"
14
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000015struct mpool_chunk {
16 struct mpool_chunk *next_chunk;
17 struct mpool_chunk *limit;
18};
19
20struct mpool_entry {
21 struct mpool_entry *next;
22};
23
24static bool mpool_locks_enabled = false;
25
26/**
27 * Enables the locks protecting memory pools. Before this function is called,
28 * the locks are disabled, that is, acquiring/releasing them is a no-op.
29 */
30void mpool_enable_locks(void)
31{
32 mpool_locks_enabled = true;
33}
34
35/**
36 * Acquires the lock protecting the given memory pool, if locks are enabled.
37 */
38static void mpool_lock(struct mpool *p)
39{
40 if (mpool_locks_enabled) {
41 sl_lock(&p->lock);
42 }
43}
44
45/**
46 * Releases the lock protecting the given memory pool, if locks are enabled.
47 */
48static void mpool_unlock(struct mpool *p)
49{
50 if (mpool_locks_enabled) {
51 sl_unlock(&p->lock);
52 }
53}
54
55/**
56 * Initialises the given memory pool with the given entry size, which must be
57 * at least the size of two pointers.
58 *
59 * All entries stored in the memory pool will be aligned to at least the entry
60 * size.
61 */
62void mpool_init(struct mpool *p, size_t entry_size)
63{
64 p->entry_size = entry_size;
65 p->chunk_list = NULL;
66 p->entry_list = NULL;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000067 p->fallback = NULL;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000068 sl_init(&p->lock);
69}
70
71/**
72 * Initialises the given memory pool by replicating the properties of `from`. It
73 * also pulls the chunk and free lists from `from`, consuming all its resources
74 * and making them available via the new memory pool.
75 */
76void mpool_init_from(struct mpool *p, struct mpool *from)
77{
78 mpool_init(p, from->entry_size);
79
80 mpool_lock(from);
81 p->chunk_list = from->chunk_list;
82 p->entry_list = from->entry_list;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000083 p->fallback = from->fallback;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000084
85 from->chunk_list = NULL;
86 from->entry_list = NULL;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000087 from->fallback = NULL;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000088 mpool_unlock(from);
89}
90
91/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +000092 * Initialises the given memory pool with a fallback memory pool if this pool
93 * runs out of memory.
94 */
95void mpool_init_with_fallback(struct mpool *p, struct mpool *fallback)
96{
97 mpool_init(p, fallback->entry_size);
98 p->fallback = fallback;
99}
100
101/**
102 * Finishes the given memory pool, giving all free memory to the fallback pool
103 * if there is one.
104 */
105void mpool_fini(struct mpool *p)
106{
107 struct mpool_entry *entry;
108 struct mpool_chunk *chunk;
109
110 if (!p->fallback) {
111 return;
112 }
113
114 mpool_lock(p);
115
116 /* Merge the freelist into the fallback. */
117 entry = p->entry_list;
118 while (entry != NULL) {
119 void *ptr = entry;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000120
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000121 entry = entry->next;
122 mpool_free(p->fallback, ptr);
123 }
124
125 /* Merge the chunk list into the fallback. */
126 chunk = p->chunk_list;
127 while (chunk != NULL) {
128 void *ptr = chunk;
129 size_t size = (uintptr_t)chunk->limit - (uintptr_t)chunk;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000130
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000131 chunk = chunk->next_chunk;
132 mpool_add_chunk(p->fallback, ptr, size);
133 }
134
135 p->chunk_list = NULL;
136 p->entry_list = NULL;
137 p->fallback = NULL;
138
139 mpool_unlock(p);
140}
141
142/**
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000143 * Adds a contiguous chunk of memory to the given memory pool. The chunk will
144 * eventually be broken up into entries of the size held by the memory pool.
145 *
146 * Only the portions aligned to the entry size will be added to the pool.
147 *
148 * Returns true if at least a portion of the chunk was added to pool, or false
149 * if none of the buffer was usable in the pool.
150 */
151bool mpool_add_chunk(struct mpool *p, void *begin, size_t size)
152{
153 struct mpool_chunk *chunk;
Daniel Boulby3f784262021-09-27 13:02:54 +0100154 char *new_begin;
155 char *new_end;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000156
157 /* Round begin address up, and end address down. */
Daniel Boulby3f784262021-09-27 13:02:54 +0100158 new_begin = (void *)align_up((char *)begin, p->entry_size);
159 new_end = (void *)align_down((char *)begin + size, p->entry_size);
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000160
161 /* Nothing to do if there isn't enough room for an entry. */
162 if (new_begin >= new_end || new_end - new_begin < p->entry_size) {
163 return false;
164 }
165
166 chunk = (struct mpool_chunk *)new_begin;
167 chunk->limit = (struct mpool_chunk *)new_end;
168
169 mpool_lock(p);
170 chunk->next_chunk = p->chunk_list;
171 p->chunk_list = chunk;
172 mpool_unlock(p);
173
174 return true;
175}
176
177/**
Hong-Seok Kim6d66ef62019-07-09 05:22:38 +0000178 * Allocates an entry from the given memory pool, if one is available. The
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000179 * fallback will not be used even if there is one.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000180 */
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000181static void *mpool_alloc_no_fallback(struct mpool *p)
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000182{
183 void *ret;
184 struct mpool_chunk *chunk;
185 struct mpool_chunk *new_chunk;
186
187 /* Fetch an entry from the free list if one is available. */
188 mpool_lock(p);
189 if (p->entry_list != NULL) {
190 struct mpool_entry *entry = p->entry_list;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000191
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000192 p->entry_list = entry->next;
193 ret = entry;
194 goto exit;
195 }
196
Hong-Seok Kim6d66ef62019-07-09 05:22:38 +0000197 /* There was no free list available. Try a chunk instead. */
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000198 chunk = p->chunk_list;
199 if (chunk == NULL) {
200 /* The chunk list is also empty, we're out of entries. */
201 ret = NULL;
202 goto exit;
203 }
204
Daniel Boulby3f784262021-09-27 13:02:54 +0100205 new_chunk = (struct mpool_chunk *)((char *)chunk + p->entry_size);
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000206 if (new_chunk >= chunk->limit) {
207 p->chunk_list = chunk->next_chunk;
208 } else {
209 *new_chunk = *chunk;
210 p->chunk_list = new_chunk;
211 }
212
213 ret = chunk;
214
215exit:
216 mpool_unlock(p);
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000217
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000218 return ret;
219}
220
221/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000222 * Allocates an entry from the given memory pool, if one is available. If there
223 * isn't one available, try and allocate from the fallback if there is one.
224 */
225void *mpool_alloc(struct mpool *p)
226{
227 do {
228 void *ret = mpool_alloc_no_fallback(p);
229
230 if (ret != NULL) {
231 return ret;
232 }
233
234 p = p->fallback;
235 } while (p != NULL);
236
237 return NULL;
238}
239
240/**
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000241 * Frees an entry back into the memory pool, making it available for reuse.
242 *
243 * This is meant to be used for freeing single entries. To free multiple
244 * entries, one must call mpool_add_chunk instead.
245 */
246void mpool_free(struct mpool *p, void *ptr)
247{
248 struct mpool_entry *e = ptr;
249
250 /* Store the newly freed entry in the front of the free list. */
251 mpool_lock(p);
252 e->next = p->entry_list;
253 p->entry_list = e;
254 mpool_unlock(p);
255}
256
257/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000258 * Allocates a number of contiguous and aligned entries. If a suitable
259 * allocation could not be found, the fallback will not be used even if there is
260 * one.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000261 */
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000262void *mpool_alloc_contiguous_no_fallback(struct mpool *p, size_t count,
263 size_t align)
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000264{
265 struct mpool_chunk **prev;
266 void *ret = NULL;
267
268 align *= p->entry_size;
269
270 mpool_lock(p);
271
272 /*
273 * Go through the chunk list in search of one with enough room for the
274 * requested allocation
275 */
276 prev = &p->chunk_list;
277 while (*prev != NULL) {
Daniel Boulby3f784262021-09-27 13:02:54 +0100278 char *start;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000279 struct mpool_chunk *new_chunk;
280 struct mpool_chunk *chunk = *prev;
281
282 /* Round start address up to the required alignment. */
Daniel Boulby3f784262021-09-27 13:02:54 +0100283 start = (void *)align_up((char *)chunk, align);
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000284
285 /*
286 * Calculate where the new chunk would be if we consume the
287 * requested number of entries. Then check if this chunk is big
288 * enough to satisfy the request.
289 */
290 new_chunk =
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000291 (struct mpool_chunk *)(start + (count * p->entry_size));
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000292 if (new_chunk <= chunk->limit) {
293 /* Remove the consumed area. */
294 if (new_chunk == chunk->limit) {
295 *prev = chunk->next_chunk;
296 } else {
297 *new_chunk = *chunk;
298 *prev = new_chunk;
299 }
300
301 /*
302 * Add back the space consumed by the alignment
303 * requirement, if it's big enough to fit an entry.
304 */
Daniel Boulby3f784262021-09-27 13:02:54 +0100305 if (start - (char *)chunk >= p->entry_size) {
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000306 chunk->next_chunk = *prev;
307 *prev = chunk;
308 chunk->limit = (struct mpool_chunk *)start;
309 }
310
311 ret = (void *)start;
312 break;
313 }
314
315 prev = &chunk->next_chunk;
316 }
317
318 mpool_unlock(p);
319
320 return ret;
321}
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000322
323/**
324 * Allocates a number of contiguous and aligned entries. This is a best-effort
325 * operation and only succeeds if such entries can be found in the chunks list
326 * or the chunks of the fallbacks (i.e., the entry list is never used to satisfy
327 * these allocations).
328 *
329 * The alignment is specified as the number of entries, that is, if `align` is
330 * 4, the alignment in bytes will be 4 * entry_size.
331 *
332 * The caller can enventually free the returned entries by calling
333 * mpool_add_chunk.
334 */
335void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align)
336{
337 do {
338 void *ret = mpool_alloc_contiguous_no_fallback(p, count, align);
339
340 if (ret != NULL) {
341 return ret;
342 }
343
344 p = p->fallback;
345 } while (p != NULL);
346
347 return NULL;
348}