blob: 9a0a9f17b30f0c59fc4c104f74f955a0fc663cb4 [file] [log] [blame]
Soby Mathewb4c6df42022-11-09 11:13:29 +00001/*
2 * SPDX-License-Identifier: Apache-2.0
3 * SPDX-FileCopyrightText: Copyright The Mbed TLS Contributors
4 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License"); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19#include <arch_helpers.h>
20#include <assert.h>
21#include <cpuid.h>
22#include <debug.h>
23#include <errno.h>
24#include <mbedtls/memory_buffer_alloc.h>
25#include <mbedtls/platform.h>
26#include <memory_alloc.h>
27#include <sizes.h>
28#include <string.h>
29
30#define MAGIC1 UL(0xFF00AA55)
31#define MAGIC2 UL(0xEE119966)
32#define MAX_BT 20
33
34#if defined(MBEDTLS_MEMORY_DEBUG)
35#error MBEDTLS_MEMORY_DEBUG is not supported by this allocator.
36#endif
37
38#if defined(MBEDTLS_MEMORY_BUFFER_ALLOC_C)
39/*
40 * If MBEDTLS_MEMORY_BUFFER_ALLOC_C is defined then the allocator from mbedTLS
41 * is going to be used, which is not desired.
42 */
43#error MBEDTLS_MEMORY_BUFFER_ALLOC_C is defined
44#endif /* MBEDTLS_MEMORY_BUFFER_ALLOC_C */
45
46#if defined(MBEDTLS_MEMORY_BACKTRACE)
47#error MBEDTLS_MEMORY_BACKTRACE is not supported by this allocator.
48#endif /* MBEDTLS_MEMORY_BACKTRACE */
49
50#if defined(MBEDTLS_THREADING_C)
51/*
52 * This allocator doesn't support multithreading. On the other hand it
53 * handles multiple heaps
54 */
55#error MBEDTLS_THREADING_C is not supported by this allocator.
56#endif /* MBEDTLS_THREADING_C */
57
58#if defined(MBEDTLS_SELF_TEST)
59#error MBEDTLS_SELF_TEST is not supported by this allocator.
60#endif /* MBEDTLS_SELF_TEST */
61
62/* Array of heaps per CPU */
63static struct buffer_alloc_ctx *ctx_per_cpu[MAX_CPUS];
64
65static inline struct buffer_alloc_ctx *get_heap_ctx(void)
66{
67 struct buffer_alloc_ctx *ctx;
68 unsigned int cpu_id = my_cpuid();
69
70 assert(cpu_id < MAX_CPUS);
71
72 ctx = ctx_per_cpu[cpu_id];
73 /* Programming error if heap is not assigned */
74 if (ctx == NULL) {
75 ERROR(" No heap assigned to this CPU %u\n", cpu_id);
76 panic();
77 }
78
79 return ctx;
80}
81
82static int verify_header(struct memory_header_s *hdr)
83{
84 if (hdr->magic1 != MAGIC1) {
85 return 1;
86 }
87
88 if (hdr->magic2 != MAGIC2) {
89 return 1;
90 }
91
92 if (hdr->alloc > 1UL) {
93 return 1;
94 }
95
Shruti Gupta9e966b82024-03-21 13:45:24 +000096 if ((hdr->prev != NULL) && (hdr->prev == hdr->next)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +000097 return 1;
98 }
99
Shruti Gupta9e966b82024-03-21 13:45:24 +0000100 if ((hdr->prev_free != NULL) && (hdr->prev_free == hdr->next_free)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000101 return 1;
102 }
103
104 return 0;
105}
106
107static int verify_chain(struct buffer_alloc_ctx *heap)
108{
109 struct memory_header_s *prv = heap->first;
110 struct memory_header_s *cur;
111
Shruti Gupta9e966b82024-03-21 13:45:24 +0000112 if ((prv == NULL) || (verify_header(prv) != 0)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000113 return 1;
114 }
115
116 if (heap->first->prev != NULL) {
117 return 1;
118 }
119
120 cur = heap->first->next;
121
122 while (cur != NULL) {
123 if (verify_header(cur) != 0) {
124 return 1;
125 }
126
127 if (cur->prev != prv) {
128 return 1;
129 }
130
131 prv = cur;
132 cur = cur->next;
133 }
134
135 return 0;
136}
137
138static void *buffer_alloc_calloc_with_heap(struct buffer_alloc_ctx *heap,
139 size_t n,
140 size_t size)
141{
142 struct memory_header_s *new;
143 struct memory_header_s *cur = heap->first_free;
144 unsigned char *p;
145 void *ret;
Shruti Gupta9e966b82024-03-21 13:45:24 +0000146 size_t original_len;
147 size_t len;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000148
Shruti Gupta9e966b82024-03-21 13:45:24 +0000149 if ((heap->buf == NULL) || (heap->first == NULL)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000150 return NULL;
151 }
152
Shruti Gupta9e966b82024-03-21 13:45:24 +0000153 original_len = n * size;
154 len = original_len;
Soby Mathewb4c6df42022-11-09 11:13:29 +0000155
Shruti Gupta9e966b82024-03-21 13:45:24 +0000156 if ((n == 0UL) || (size == 0UL) || ((len / n) != size)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000157 return NULL;
Shruti Gupta9e966b82024-03-21 13:45:24 +0000158 } else if (len > ((size_t)-MBEDTLS_MEMORY_ALIGN_MULTIPLE)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000159 return NULL;
160 }
161
162 if ((len % MBEDTLS_MEMORY_ALIGN_MULTIPLE) != 0) {
163 len -= len % MBEDTLS_MEMORY_ALIGN_MULTIPLE;
164 len += MBEDTLS_MEMORY_ALIGN_MULTIPLE;
165 }
166
167 /* Find block that fits */
168 while (cur != NULL) {
169 if (cur->size >= len) {
170 break;
171 }
172 cur = cur->next_free;
173 }
174
175 if (cur == NULL) {
176 return NULL;
177 }
178
179 if (cur->alloc != 0UL) {
180 assert(false);
181 }
182
183 /* Found location, split block if > memory_header + 4 room left */
184 if ((cur->size - len) <
185 (sizeof(struct memory_header_s) + MBEDTLS_MEMORY_ALIGN_MULTIPLE)) {
186 cur->alloc = 1UL;
187
188 /* Remove from free_list */
189 if (cur->prev_free != NULL) {
190 cur->prev_free->next_free = cur->next_free;
191 } else {
192 heap->first_free = cur->next_free;
193 }
194
195 if (cur->next_free != NULL) {
196 cur->next_free->prev_free = cur->prev_free;
197 }
198
199 cur->prev_free = NULL;
200 cur->next_free = NULL;
201
Shruti Gupta9e966b82024-03-21 13:45:24 +0000202 if ((heap->verify & MBEDTLS_MEMORY_VERIFY_ALLOC) != 0) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000203 assert(verify_chain(heap) == 0);
204 }
205
206 ret = (unsigned char *) cur + sizeof(struct memory_header_s);
Shruti Gupta9e966b82024-03-21 13:45:24 +0000207 (void)memset(ret, 0, original_len);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000208
209 return ret;
210 }
211
212 p = ((unsigned char *) cur) + sizeof(struct memory_header_s) + len;
213 new = (struct memory_header_s *) p;
214
215 new->size = cur->size - len - sizeof(struct memory_header_s);
216 new->alloc = 0;
217 new->prev = cur;
218 new->next = cur->next;
219 new->magic1 = MAGIC1;
220 new->magic2 = MAGIC2;
221
222 if (new->next != NULL) {
223 new->next->prev = new;
224 }
225
226 /* Replace cur with new in free_list */
227 new->prev_free = cur->prev_free;
228 new->next_free = cur->next_free;
229 if (new->prev_free != NULL) {
230 new->prev_free->next_free = new;
231 } else {
232 heap->first_free = new;
233 }
234
235 if (new->next_free != NULL) {
236 new->next_free->prev_free = new;
237 }
238
239 cur->alloc = 1;
240 cur->size = len;
241 cur->next = new;
242 cur->prev_free = NULL;
243 cur->next_free = NULL;
244
245 if ((heap->verify & MBEDTLS_MEMORY_VERIFY_ALLOC) != 0) {
246 assert(verify_chain(heap) == 0);
247 }
248
249 ret = (unsigned char *) cur + sizeof(struct memory_header_s);
Shruti Gupta9e966b82024-03-21 13:45:24 +0000250 (void)memset(ret, 0, original_len);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000251
252 return ret;
253}
254
255void *buffer_alloc_calloc(size_t n, size_t size)
256{
257 struct buffer_alloc_ctx *heap = get_heap_ctx();
258
259 assert(heap);
260 return buffer_alloc_calloc_with_heap(heap, n, size);
261}
262
263static void buffer_alloc_free_with_heap(struct buffer_alloc_ctx *heap,
264 void *ptr)
265{
266 struct memory_header_s *hdr;
267 struct memory_header_s *old = NULL;
268 unsigned char *p = (unsigned char *) ptr;
269
Shruti Gupta9e966b82024-03-21 13:45:24 +0000270 if ((ptr == NULL) || (heap->buf == NULL) || (heap->first == NULL)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000271 return;
272 }
273
Shruti Gupta9e966b82024-03-21 13:45:24 +0000274 if ((p < heap->buf) || (p >= (heap->buf + heap->len))) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000275 assert(0);
276 }
277
278 p -= sizeof(struct memory_header_s);
279 hdr = (struct memory_header_s *) p;
280
281 assert(verify_header(hdr) == 0);
282
283 if (hdr->alloc != 1) {
284 assert(0);
285 }
286
287 hdr->alloc = 0;
288
289 /* Regroup with block before */
Shruti Gupta9e966b82024-03-21 13:45:24 +0000290 if ((hdr->prev != NULL) && (hdr->prev->alloc == 0UL)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000291 hdr->prev->size += sizeof(struct memory_header_s) + hdr->size;
292 hdr->prev->next = hdr->next;
293 old = hdr;
294 hdr = hdr->prev;
295
296 if (hdr->next != NULL) {
297 hdr->next->prev = hdr;
298 }
299
Shruti Gupta9e966b82024-03-21 13:45:24 +0000300 (void)memset(old, 0, sizeof(struct memory_header_s));
Soby Mathewb4c6df42022-11-09 11:13:29 +0000301 }
302
303 /* Regroup with block after */
Shruti Gupta9e966b82024-03-21 13:45:24 +0000304 if ((hdr->next != NULL) && (hdr->next->alloc == 0UL)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000305 hdr->size += sizeof(struct memory_header_s) + hdr->next->size;
306 old = hdr->next;
307 hdr->next = hdr->next->next;
308
Shruti Gupta9e966b82024-03-21 13:45:24 +0000309 if ((hdr->prev_free != NULL) || (hdr->next_free != NULL)) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000310 if (hdr->prev_free != NULL) {
311 hdr->prev_free->next_free = hdr->next_free;
312 } else {
313 heap->first_free = hdr->next_free;
314 }
315 if (hdr->next_free != NULL) {
316 hdr->next_free->prev_free = hdr->prev_free;
317 }
318 }
319
320 hdr->prev_free = old->prev_free;
321 hdr->next_free = old->next_free;
322
323 if (hdr->prev_free != NULL) {
324 hdr->prev_free->next_free = hdr;
325 } else {
326 heap->first_free = hdr;
327 }
328
329 if (hdr->next_free != NULL) {
330 hdr->next_free->prev_free = hdr;
331 }
332
333 if (hdr->next != NULL) {
334 hdr->next->prev = hdr;
335 }
336
Shruti Gupta9e966b82024-03-21 13:45:24 +0000337 (void)memset(old, 0, sizeof(struct memory_header_s));
Soby Mathewb4c6df42022-11-09 11:13:29 +0000338 }
339
340 /*
341 * Prepend to free_list if we have not merged
342 * (Does not have to stay in same order as prev / next list)
343 */
344 if (old == NULL) {
345 hdr->next_free = heap->first_free;
346 if (heap->first_free != NULL) {
347 heap->first_free->prev_free = hdr;
348 }
349 heap->first_free = hdr;
350 }
351
Shruti Gupta9e966b82024-03-21 13:45:24 +0000352 if ((heap->verify & MBEDTLS_MEMORY_VERIFY_FREE) != 0) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000353 assert(verify_chain(heap));
354 }
355}
356
357void buffer_alloc_free(void *ptr)
358{
359 struct buffer_alloc_ctx *heap = get_heap_ctx();
360
361 assert(heap);
362 buffer_alloc_free_with_heap(heap, ptr);
363}
364
365int buffer_alloc_ctx_assign(struct buffer_alloc_ctx *ctx)
366{
367 unsigned int cpuid = my_cpuid();
368
369 assert(cpuid < MAX_CPUS);
370
371 if (ctx == NULL) {
372 return -EINVAL;
373 }
374
375 if (ctx_per_cpu[cpuid] != NULL) {
376 /* multiple assign */
377 return -EINVAL;
378 }
379
380 ctx_per_cpu[cpuid] = ctx;
381
382 return 0;
383}
384
385void buffer_alloc_ctx_unassign(void)
386{
387 unsigned int cpuid = my_cpuid();
388
389 assert(cpuid < MAX_CPUS);
390
391 /* multiple unassign */
392 assert(ctx_per_cpu[cpuid] != NULL);
393
394 ctx_per_cpu[cpuid] = NULL;
395}
396
Chuyue Luo8ba15972023-12-15 11:12:58 +0000397/* NOTE: This function is not currently expected to be called. */
Soby Mathewb4c6df42022-11-09 11:13:29 +0000398void mbedtls_memory_buffer_set_verify(int verify)
399{
400 struct buffer_alloc_ctx *heap = get_heap_ctx();
401
Soby Mathewb4c6df42022-11-09 11:13:29 +0000402 assert(heap);
403 heap->verify = verify;
404}
405
406int mbedtls_memory_buffer_alloc_verify(void)
407{
408 struct buffer_alloc_ctx *heap = get_heap_ctx();
409
410 assert(heap);
411 return verify_chain(heap);
412}
413
414void mbedtls_memory_buffer_alloc_init(unsigned char *buf, size_t len)
415{
416 /* The heap structure is obtained from the REC
417 * while the buffer is passed in the init function.
418 * This way the interface can remain the same.
419 */
420 struct buffer_alloc_ctx *heap = get_heap_ctx();
421
422 assert(heap);
423
Shruti Gupta9e966b82024-03-21 13:45:24 +0000424 (void)memset(heap, 0, sizeof(struct buffer_alloc_ctx));
Soby Mathewb4c6df42022-11-09 11:13:29 +0000425
426 if (len < sizeof(struct memory_header_s) +
427 MBEDTLS_MEMORY_ALIGN_MULTIPLE) {
428 return;
Shruti Gupta9e966b82024-03-21 13:45:24 +0000429 } else if (((size_t)buf % MBEDTLS_MEMORY_ALIGN_MULTIPLE) != 0U) {
Soby Mathewb4c6df42022-11-09 11:13:29 +0000430 /* Adjust len first since buf is used in the computation */
431 len -= MBEDTLS_MEMORY_ALIGN_MULTIPLE
432 - ((size_t)buf % MBEDTLS_MEMORY_ALIGN_MULTIPLE);
433 buf += MBEDTLS_MEMORY_ALIGN_MULTIPLE
434 - ((size_t)buf % MBEDTLS_MEMORY_ALIGN_MULTIPLE);
435 }
436
Shruti Gupta9e966b82024-03-21 13:45:24 +0000437 (void)memset(buf, 0, len);
Soby Mathewb4c6df42022-11-09 11:13:29 +0000438
439 heap->buf = buf;
440 heap->len = len;
441
442 heap->first = (struct memory_header_s *)buf;
443 heap->first->size = len - sizeof(struct memory_header_s);
444 heap->first->magic1 = MAGIC1;
445 heap->first->magic2 = MAGIC2;
446 heap->first_free = heap->first;
447}
448
449void mbedtls_memory_buffer_alloc_free(void)
450{
451 struct buffer_alloc_ctx *heap = get_heap_ctx();
452
453 assert(heap);
Shruti Gupta9e966b82024-03-21 13:45:24 +0000454 (void)memset(heap, 0, sizeof(struct buffer_alloc_ctx));
Soby Mathewb4c6df42022-11-09 11:13:29 +0000455}