blob: df954e8bbba48c64217facf4f26400fcdeeabd78 [file] [log] [blame]
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00001/*
Andrew Walbran692b3252019-03-07 15:51:31 +00002 * Copyright 2018 The Hafnium Authors.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00003 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "hf/mpool.h"
18
19#include <stdbool.h>
20
21struct mpool_chunk {
22 struct mpool_chunk *next_chunk;
23 struct mpool_chunk *limit;
24};
25
26struct mpool_entry {
27 struct mpool_entry *next;
28};
29
30static bool mpool_locks_enabled = false;
31
32/**
33 * Enables the locks protecting memory pools. Before this function is called,
34 * the locks are disabled, that is, acquiring/releasing them is a no-op.
35 */
36void mpool_enable_locks(void)
37{
38 mpool_locks_enabled = true;
39}
40
41/**
42 * Acquires the lock protecting the given memory pool, if locks are enabled.
43 */
44static void mpool_lock(struct mpool *p)
45{
46 if (mpool_locks_enabled) {
47 sl_lock(&p->lock);
48 }
49}
50
51/**
52 * Releases the lock protecting the given memory pool, if locks are enabled.
53 */
54static void mpool_unlock(struct mpool *p)
55{
56 if (mpool_locks_enabled) {
57 sl_unlock(&p->lock);
58 }
59}
60
61/**
62 * Initialises the given memory pool with the given entry size, which must be
63 * at least the size of two pointers.
64 *
65 * All entries stored in the memory pool will be aligned to at least the entry
66 * size.
67 */
68void mpool_init(struct mpool *p, size_t entry_size)
69{
70 p->entry_size = entry_size;
71 p->chunk_list = NULL;
72 p->entry_list = NULL;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000073 p->fallback = NULL;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000074 sl_init(&p->lock);
75}
76
77/**
78 * Initialises the given memory pool by replicating the properties of `from`. It
79 * also pulls the chunk and free lists from `from`, consuming all its resources
80 * and making them available via the new memory pool.
81 */
82void mpool_init_from(struct mpool *p, struct mpool *from)
83{
84 mpool_init(p, from->entry_size);
85
86 mpool_lock(from);
87 p->chunk_list = from->chunk_list;
88 p->entry_list = from->entry_list;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000089 p->fallback = from->fallback;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000090
91 from->chunk_list = NULL;
92 from->entry_list = NULL;
Andrew Scull63d1f3f2018-12-06 13:29:10 +000093 from->fallback = NULL;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +000094 mpool_unlock(from);
95}
96
97/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +000098 * Initialises the given memory pool with a fallback memory pool if this pool
99 * runs out of memory.
100 */
101void mpool_init_with_fallback(struct mpool *p, struct mpool *fallback)
102{
103 mpool_init(p, fallback->entry_size);
104 p->fallback = fallback;
105}
106
107/**
108 * Finishes the given memory pool, giving all free memory to the fallback pool
109 * if there is one.
110 */
111void mpool_fini(struct mpool *p)
112{
113 struct mpool_entry *entry;
114 struct mpool_chunk *chunk;
115
116 if (!p->fallback) {
117 return;
118 }
119
120 mpool_lock(p);
121
122 /* Merge the freelist into the fallback. */
123 entry = p->entry_list;
124 while (entry != NULL) {
125 void *ptr = entry;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000126
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000127 entry = entry->next;
128 mpool_free(p->fallback, ptr);
129 }
130
131 /* Merge the chunk list into the fallback. */
132 chunk = p->chunk_list;
133 while (chunk != NULL) {
134 void *ptr = chunk;
135 size_t size = (uintptr_t)chunk->limit - (uintptr_t)chunk;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000136
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000137 chunk = chunk->next_chunk;
138 mpool_add_chunk(p->fallback, ptr, size);
139 }
140
141 p->chunk_list = NULL;
142 p->entry_list = NULL;
143 p->fallback = NULL;
144
145 mpool_unlock(p);
146}
147
148/**
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000149 * Adds a contiguous chunk of memory to the given memory pool. The chunk will
150 * eventually be broken up into entries of the size held by the memory pool.
151 *
152 * Only the portions aligned to the entry size will be added to the pool.
153 *
154 * Returns true if at least a portion of the chunk was added to pool, or false
155 * if none of the buffer was usable in the pool.
156 */
157bool mpool_add_chunk(struct mpool *p, void *begin, size_t size)
158{
159 struct mpool_chunk *chunk;
160 uintptr_t new_begin;
161 uintptr_t new_end;
162
163 /* Round begin address up, and end address down. */
164 new_begin = ((uintptr_t)begin + p->entry_size - 1) / p->entry_size *
165 p->entry_size;
166 new_end = ((uintptr_t)begin + size) / p->entry_size * p->entry_size;
167
168 /* Nothing to do if there isn't enough room for an entry. */
169 if (new_begin >= new_end || new_end - new_begin < p->entry_size) {
170 return false;
171 }
172
173 chunk = (struct mpool_chunk *)new_begin;
174 chunk->limit = (struct mpool_chunk *)new_end;
175
176 mpool_lock(p);
177 chunk->next_chunk = p->chunk_list;
178 p->chunk_list = chunk;
179 mpool_unlock(p);
180
181 return true;
182}
183
184/**
Hong-Seok Kim6d66ef62019-07-09 05:22:38 +0000185 * Allocates an entry from the given memory pool, if one is available. The
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000186 * fallback will not be used even if there is one.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000187 */
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000188static void *mpool_alloc_no_fallback(struct mpool *p)
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000189{
190 void *ret;
191 struct mpool_chunk *chunk;
192 struct mpool_chunk *new_chunk;
193
194 /* Fetch an entry from the free list if one is available. */
195 mpool_lock(p);
196 if (p->entry_list != NULL) {
197 struct mpool_entry *entry = p->entry_list;
Wedson Almeida Filho81568c42019-01-04 13:33:02 +0000198
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000199 p->entry_list = entry->next;
200 ret = entry;
201 goto exit;
202 }
203
Hong-Seok Kim6d66ef62019-07-09 05:22:38 +0000204 /* There was no free list available. Try a chunk instead. */
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000205 chunk = p->chunk_list;
206 if (chunk == NULL) {
207 /* The chunk list is also empty, we're out of entries. */
208 ret = NULL;
209 goto exit;
210 }
211
212 new_chunk = (struct mpool_chunk *)((uintptr_t)chunk + p->entry_size);
213 if (new_chunk >= chunk->limit) {
214 p->chunk_list = chunk->next_chunk;
215 } else {
216 *new_chunk = *chunk;
217 p->chunk_list = new_chunk;
218 }
219
220 ret = chunk;
221
222exit:
223 mpool_unlock(p);
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000224
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000225 return ret;
226}
227
228/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000229 * Allocates an entry from the given memory pool, if one is available. If there
230 * isn't one available, try and allocate from the fallback if there is one.
231 */
232void *mpool_alloc(struct mpool *p)
233{
234 do {
235 void *ret = mpool_alloc_no_fallback(p);
236
237 if (ret != NULL) {
238 return ret;
239 }
240
241 p = p->fallback;
242 } while (p != NULL);
243
244 return NULL;
245}
246
247/**
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000248 * Frees an entry back into the memory pool, making it available for reuse.
249 *
250 * This is meant to be used for freeing single entries. To free multiple
251 * entries, one must call mpool_add_chunk instead.
252 */
253void mpool_free(struct mpool *p, void *ptr)
254{
255 struct mpool_entry *e = ptr;
256
257 /* Store the newly freed entry in the front of the free list. */
258 mpool_lock(p);
259 e->next = p->entry_list;
260 p->entry_list = e;
261 mpool_unlock(p);
262}
263
264/**
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000265 * Allocates a number of contiguous and aligned entries. If a suitable
266 * allocation could not be found, the fallback will not be used even if there is
267 * one.
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000268 */
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000269void *mpool_alloc_contiguous_no_fallback(struct mpool *p, size_t count,
270 size_t align)
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000271{
272 struct mpool_chunk **prev;
273 void *ret = NULL;
274
275 align *= p->entry_size;
276
277 mpool_lock(p);
278
279 /*
280 * Go through the chunk list in search of one with enough room for the
281 * requested allocation
282 */
283 prev = &p->chunk_list;
284 while (*prev != NULL) {
285 uintptr_t start;
286 struct mpool_chunk *new_chunk;
287 struct mpool_chunk *chunk = *prev;
288
289 /* Round start address up to the required alignment. */
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000290 start = (((uintptr_t)chunk + align - 1) / align) * align;
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000291
292 /*
293 * Calculate where the new chunk would be if we consume the
294 * requested number of entries. Then check if this chunk is big
295 * enough to satisfy the request.
296 */
297 new_chunk =
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000298 (struct mpool_chunk *)(start + (count * p->entry_size));
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +0000299 if (new_chunk <= chunk->limit) {
300 /* Remove the consumed area. */
301 if (new_chunk == chunk->limit) {
302 *prev = chunk->next_chunk;
303 } else {
304 *new_chunk = *chunk;
305 *prev = new_chunk;
306 }
307
308 /*
309 * Add back the space consumed by the alignment
310 * requirement, if it's big enough to fit an entry.
311 */
312 if (start - (uintptr_t)chunk >= p->entry_size) {
313 chunk->next_chunk = *prev;
314 *prev = chunk;
315 chunk->limit = (struct mpool_chunk *)start;
316 }
317
318 ret = (void *)start;
319 break;
320 }
321
322 prev = &chunk->next_chunk;
323 }
324
325 mpool_unlock(p);
326
327 return ret;
328}
Andrew Scull63d1f3f2018-12-06 13:29:10 +0000329
330/**
331 * Allocates a number of contiguous and aligned entries. This is a best-effort
332 * operation and only succeeds if such entries can be found in the chunks list
333 * or the chunks of the fallbacks (i.e., the entry list is never used to satisfy
334 * these allocations).
335 *
336 * The alignment is specified as the number of entries, that is, if `align` is
337 * 4, the alignment in bytes will be 4 * entry_size.
338 *
339 * The caller can enventually free the returned entries by calling
340 * mpool_add_chunk.
341 */
342void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align)
343{
344 do {
345 void *ret = mpool_alloc_contiguous_no_fallback(p, count, align);
346
347 if (ret != NULL) {
348 return ret;
349 }
350
351 p = p->fallback;
352 } while (p != NULL);
353
354 return NULL;
355}