blob: 21bce22505f1d6e44748b5d7f058feb99a83ea89 [file] [log] [blame]
Wedson Almeida Filho11a9b0b2018-11-30 18:21:51 +00001/*
2 * Copyright 2018 Google LLC
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "hf/mpool.h"
18
19#include <stdbool.h>
20
21struct mpool_chunk {
22 struct mpool_chunk *next_chunk;
23 struct mpool_chunk *limit;
24};
25
26struct mpool_entry {
27 struct mpool_entry *next;
28};
29
30static bool mpool_locks_enabled = false;
31
32/**
33 * Enables the locks protecting memory pools. Before this function is called,
34 * the locks are disabled, that is, acquiring/releasing them is a no-op.
35 */
36void mpool_enable_locks(void)
37{
38 mpool_locks_enabled = true;
39}
40
41/**
42 * Acquires the lock protecting the given memory pool, if locks are enabled.
43 */
44static void mpool_lock(struct mpool *p)
45{
46 if (mpool_locks_enabled) {
47 sl_lock(&p->lock);
48 }
49}
50
51/**
52 * Releases the lock protecting the given memory pool, if locks are enabled.
53 */
54static void mpool_unlock(struct mpool *p)
55{
56 if (mpool_locks_enabled) {
57 sl_unlock(&p->lock);
58 }
59}
60
61/**
62 * Initialises the given memory pool with the given entry size, which must be
63 * at least the size of two pointers.
64 *
65 * All entries stored in the memory pool will be aligned to at least the entry
66 * size.
67 */
68void mpool_init(struct mpool *p, size_t entry_size)
69{
70 p->entry_size = entry_size;
71 p->chunk_list = NULL;
72 p->entry_list = NULL;
73 sl_init(&p->lock);
74}
75
76/**
77 * Initialises the given memory pool by replicating the properties of `from`. It
78 * also pulls the chunk and free lists from `from`, consuming all its resources
79 * and making them available via the new memory pool.
80 */
81void mpool_init_from(struct mpool *p, struct mpool *from)
82{
83 mpool_init(p, from->entry_size);
84
85 mpool_lock(from);
86 p->chunk_list = from->chunk_list;
87 p->entry_list = from->entry_list;
88
89 from->chunk_list = NULL;
90 from->entry_list = NULL;
91 mpool_unlock(from);
92}
93
94/**
95 * Adds a contiguous chunk of memory to the given memory pool. The chunk will
96 * eventually be broken up into entries of the size held by the memory pool.
97 *
98 * Only the portions aligned to the entry size will be added to the pool.
99 *
100 * Returns true if at least a portion of the chunk was added to pool, or false
101 * if none of the buffer was usable in the pool.
102 */
103bool mpool_add_chunk(struct mpool *p, void *begin, size_t size)
104{
105 struct mpool_chunk *chunk;
106 uintptr_t new_begin;
107 uintptr_t new_end;
108
109 /* Round begin address up, and end address down. */
110 new_begin = ((uintptr_t)begin + p->entry_size - 1) / p->entry_size *
111 p->entry_size;
112 new_end = ((uintptr_t)begin + size) / p->entry_size * p->entry_size;
113
114 /* Nothing to do if there isn't enough room for an entry. */
115 if (new_begin >= new_end || new_end - new_begin < p->entry_size) {
116 return false;
117 }
118
119 chunk = (struct mpool_chunk *)new_begin;
120 chunk->limit = (struct mpool_chunk *)new_end;
121
122 mpool_lock(p);
123 chunk->next_chunk = p->chunk_list;
124 p->chunk_list = chunk;
125 mpool_unlock(p);
126
127 return true;
128}
129
130/**
131 * Allocates an entry from the given memory pool, if one is available.
132 */
133void *mpool_alloc(struct mpool *p)
134{
135 void *ret;
136 struct mpool_chunk *chunk;
137 struct mpool_chunk *new_chunk;
138
139 /* Fetch an entry from the free list if one is available. */
140 mpool_lock(p);
141 if (p->entry_list != NULL) {
142 struct mpool_entry *entry = p->entry_list;
143 p->entry_list = entry->next;
144 ret = entry;
145 goto exit;
146 }
147
148 chunk = p->chunk_list;
149 if (chunk == NULL) {
150 /* The chunk list is also empty, we're out of entries. */
151 ret = NULL;
152 goto exit;
153 }
154
155 new_chunk = (struct mpool_chunk *)((uintptr_t)chunk + p->entry_size);
156 if (new_chunk >= chunk->limit) {
157 p->chunk_list = chunk->next_chunk;
158 } else {
159 *new_chunk = *chunk;
160 p->chunk_list = new_chunk;
161 }
162
163 ret = chunk;
164
165exit:
166 mpool_unlock(p);
167 return ret;
168}
169
170/**
171 * Frees an entry back into the memory pool, making it available for reuse.
172 *
173 * This is meant to be used for freeing single entries. To free multiple
174 * entries, one must call mpool_add_chunk instead.
175 */
176void mpool_free(struct mpool *p, void *ptr)
177{
178 struct mpool_entry *e = ptr;
179
180 /* Store the newly freed entry in the front of the free list. */
181 mpool_lock(p);
182 e->next = p->entry_list;
183 p->entry_list = e;
184 mpool_unlock(p);
185}
186
187/**
188 * Allocates a number of contiguous and aligned entries. This is a best-effort
189 * operation and only succeeds if such entries can be found in chunks (i.e., the
190 * entry list is never used to satisfy these allocations).
191 *
192 * The alignment is specified as the number of entries, that is, if `align` is
193 * 4, the alignment in bytes will be 4 * entry_size.
194 *
195 * The caller can enventually free the returned entries by calling
196 * mpool_add_chunk.
197 */
198void *mpool_alloc_contiguous(struct mpool *p, size_t count, size_t align)
199{
200 struct mpool_chunk **prev;
201 void *ret = NULL;
202
203 align *= p->entry_size;
204
205 mpool_lock(p);
206
207 /*
208 * Go through the chunk list in search of one with enough room for the
209 * requested allocation
210 */
211 prev = &p->chunk_list;
212 while (*prev != NULL) {
213 uintptr_t start;
214 struct mpool_chunk *new_chunk;
215 struct mpool_chunk *chunk = *prev;
216
217 /* Round start address up to the required alignment. */
218 start = ((uintptr_t)chunk + align - 1) / align * align;
219
220 /*
221 * Calculate where the new chunk would be if we consume the
222 * requested number of entries. Then check if this chunk is big
223 * enough to satisfy the request.
224 */
225 new_chunk =
226 (struct mpool_chunk *)(start + count * p->entry_size);
227 if (new_chunk <= chunk->limit) {
228 /* Remove the consumed area. */
229 if (new_chunk == chunk->limit) {
230 *prev = chunk->next_chunk;
231 } else {
232 *new_chunk = *chunk;
233 *prev = new_chunk;
234 }
235
236 /*
237 * Add back the space consumed by the alignment
238 * requirement, if it's big enough to fit an entry.
239 */
240 if (start - (uintptr_t)chunk >= p->entry_size) {
241 chunk->next_chunk = *prev;
242 *prev = chunk;
243 chunk->limit = (struct mpool_chunk *)start;
244 }
245
246 ret = (void *)start;
247 break;
248 }
249
250 prev = &chunk->next_chunk;
251 }
252
253 mpool_unlock(p);
254
255 return ret;
256}