blob: b61301922e4a526ede6e1ed737be86ce546ca65a [file] [log] [blame]
Dave Rodgmanfbc23222022-11-24 18:07:37 +00001/**
2 * \file alignment.h
3 *
4 * \brief Utility code for dealing with unaligned memory accesses
5 */
6/*
7 * Copyright The Mbed TLS Contributors
Dave Rodgman16799db2023-11-02 19:47:20 +00008 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
Dave Rodgmanfbc23222022-11-24 18:07:37 +00009 */
10
11#ifndef MBEDTLS_LIBRARY_ALIGNMENT_H
12#define MBEDTLS_LIBRARY_ALIGNMENT_H
13
14#include <stdint.h>
Dave Rodgman96d61d12022-11-24 19:33:22 +000015#include <string.h>
Dave Rodgmanf7f1f742022-11-28 14:52:45 +000016#include <stdlib.h>
Dave Rodgmanfbc23222022-11-24 18:07:37 +000017
Dave Rodgman7d8c99a2024-01-19 14:02:58 +000018#if defined(__GNUC__) && !defined(__ARMCC_VERSION) && !defined(__clang__) \
19 && !defined(__llvm__) && !defined(__INTEL_COMPILER)
20/* Defined if the compiler really is gcc and not clang, etc */
21#define MBEDTLS_COMPILER_IS_GCC
22#define MBEDTLS_GCC_VERSION \
23 (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
24#endif
25
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000026/*
27 * Define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS for architectures where unaligned memory
Dave Rodgman7f376fa2023-01-05 12:25:15 +000028 * accesses are known to be efficient.
29 *
30 * All functions defined here will behave correctly regardless, but might be less
31 * efficient when this is not defined.
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000032 */
33#if defined(__ARM_FEATURE_UNALIGNED) \
Dave Rodgmanc5cc7272023-09-15 11:41:17 +010034 || defined(MBEDTLS_ARCH_IS_X86) || defined(MBEDTLS_ARCH_IS_X64) \
Dave Rodgman0a487172023-09-15 11:52:06 +010035 || defined(MBEDTLS_PLATFORM_IS_WINDOWS_ON_ARM64)
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000036/*
37 * __ARM_FEATURE_UNALIGNED is defined where appropriate by armcc, gcc 7, clang 9
Dave Rodgman7f376fa2023-01-05 12:25:15 +000038 * (and later versions) for Arm v7 and later; all x86 platforms should have
39 * efficient unaligned access.
Dave Rodgman78fc0bd2023-08-08 10:36:15 +010040 *
41 * https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#alignment
42 * specifies that on Windows-on-Arm64, unaligned access is safe (except for uncached
43 * device memory).
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000044 */
45#define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS
46#endif
47
Dave Rodgmanc5812642024-01-19 14:04:28 +000048#if defined(__IAR_SYSTEMS_ICC__) && \
49 (defined(MBEDTLS_ARCH_IS_ARM64) || defined(MBEDTLS_ARCH_IS_ARM32) \
50 || defined(__ICCRX__) || defined(__ICCRL78__) || defined(__ICCRISCV__))
51#pragma language=save
52#pragma language=extended
53#define MBEDTLS_POP_IAR_LANGUAGE_PRAGMA
54/* IAR recommend this technique for accessing unaligned data in
55 * https://www.iar.com/knowledge/support/technical-notes/compiler/accessing-unaligned-data
56 * This results in a single load / store instruction (if unaligned access is supported).
57 * According to that document, this is only supported on certain architectures.
58 */
59 #define UINT_UNALIGNED
60typedef uint16_t __packed mbedtls_uint16_unaligned_t;
61typedef uint32_t __packed mbedtls_uint32_unaligned_t;
62typedef uint64_t __packed mbedtls_uint64_unaligned_t;
63#elif defined(MBEDTLS_COMPILER_IS_GCC) && (MBEDTLS_GCC_VERSION >= 40504) && \
64 ((MBEDTLS_GCC_VERSION < 90300) || (!defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)))
65/*
66 * Old versions of gcc, depending on how the target is specified, may generate a branch to memcpy
67 * for calls like `memcpy(dest, src, 4)` rather than generating some LDR or LDRB instructions
68 * (similar for stores).
69 * Recent versions where unaligned access is not enabled also do this.
70 *
71 * For performance (and code size, in some cases), we want to avoid the branch and just generate
72 * some inline load/store instructions since the access is small and constant-size.
73 *
74 * The manual states:
75 * "The aligned attribute specifies a minimum alignment for the variable or structure field,
76 * measured in bytes."
77 * https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html
78 *
79 * Tested with several versions of GCC from 4.5.0 up to 9.3.0
80 * We don't enable for older than 4.5.0 as this has not been tested.
81 */
82 #define UINT_UNALIGNED
83typedef uint16_t __attribute__((__aligned__(1))) mbedtls_uint16_unaligned_t;
84typedef uint32_t __attribute__((__aligned__(1))) mbedtls_uint32_unaligned_t;
85typedef uint64_t __attribute__((__aligned__(1))) mbedtls_uint64_unaligned_t;
86 #endif
87
Dave Rodgman55b5dd22024-01-19 14:06:52 +000088/*
89 * We try to force mbedtls_(get|put)_unaligned_uintXX to be always inline, because this results
90 * in code that is both smaller and faster. IAR and gcc both benefit from this when optimising
91 * for size.
92 */
93
Dave Rodgman96d61d12022-11-24 19:33:22 +000094/**
Dave Rodgmana360e192022-11-28 14:44:05 +000095 * Read the unsigned 16 bits integer from the given address, which need not
96 * be aligned.
97 *
98 * \param p pointer to 2 bytes of data
99 * \return Data at the given address
100 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000101#if defined(__IAR_SYSTEMS_ICC__)
102#pragma inline = forced
103#elif defined(__GNUC__)
104__attribute__((always_inline))
105#endif
106static inline uint16_t mbedtls_get_unaligned_uint16(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +0000107{
108 uint16_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000109#if defined(UINT_UNALIGNED)
110 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
111 r = *p16;
112#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100113 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000114#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000115 return r;
116}
117
118/**
119 * Write the unsigned 16 bits integer to the given address, which need not
120 * be aligned.
121 *
122 * \param p pointer to 2 bytes of data
123 * \param x data to write
124 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000125#if defined(__IAR_SYSTEMS_ICC__)
126#pragma inline = forced
127#elif defined(__GNUC__)
128__attribute__((always_inline))
129#endif
130static inline void mbedtls_put_unaligned_uint16(void *p, uint16_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000131{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000132#if defined(UINT_UNALIGNED)
133 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
134 *p16 = x;
135#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100136 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000137#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000138}
139
140/**
Dave Rodgman96d61d12022-11-24 19:33:22 +0000141 * Read the unsigned 32 bits integer from the given address, which need not
142 * be aligned.
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000143 *
Dave Rodgman96d61d12022-11-24 19:33:22 +0000144 * \param p pointer to 4 bytes of data
Dave Rodgman875d2382022-11-24 20:43:15 +0000145 * \return Data at the given address
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000146 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000147#if defined(__IAR_SYSTEMS_ICC__)
148#pragma inline = forced
149#elif defined(__GNUC__)
150__attribute__((always_inline))
151#endif
152static inline uint32_t mbedtls_get_unaligned_uint32(const void *p)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000153{
154 uint32_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000155#if defined(UINT_UNALIGNED)
156 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
157 r = *p32;
158#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100159 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000160#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000161 return r;
162}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000163
Dave Rodgman96d61d12022-11-24 19:33:22 +0000164/**
165 * Write the unsigned 32 bits integer to the given address, which need not
166 * be aligned.
167 *
168 * \param p pointer to 4 bytes of data
169 * \param x data to write
170 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000171#if defined(__IAR_SYSTEMS_ICC__)
172#pragma inline = forced
173#elif defined(__GNUC__)
174__attribute__((always_inline))
175#endif
176static inline void mbedtls_put_unaligned_uint32(void *p, uint32_t x)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000177{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000178#if defined(UINT_UNALIGNED)
179 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
180 *p32 = x;
181#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100182 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000183#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000184}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000185
Dave Rodgmana360e192022-11-28 14:44:05 +0000186/**
187 * Read the unsigned 64 bits integer from the given address, which need not
188 * be aligned.
189 *
190 * \param p pointer to 8 bytes of data
191 * \return Data at the given address
192 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000193#if defined(__IAR_SYSTEMS_ICC__)
194#pragma inline = forced
195#elif defined(__GNUC__)
196__attribute__((always_inline))
197#endif
198static inline uint64_t mbedtls_get_unaligned_uint64(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +0000199{
200 uint64_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000201#if defined(UINT_UNALIGNED)
202 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
203 r = *p64;
204#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100205 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000206#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000207 return r;
208}
209
210/**
211 * Write the unsigned 64 bits integer to the given address, which need not
212 * be aligned.
213 *
214 * \param p pointer to 8 bytes of data
215 * \param x data to write
216 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000217#if defined(__IAR_SYSTEMS_ICC__)
218#pragma inline = forced
219#elif defined(__GNUC__)
220__attribute__((always_inline))
221#endif
222static inline void mbedtls_put_unaligned_uint64(void *p, uint64_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000223{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000224#if defined(UINT_UNALIGNED)
225 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
226 *p64 = x;
227#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100228 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000229#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000230}
231
Dave Rodgmanc5812642024-01-19 14:04:28 +0000232#if defined(MBEDTLS_POP_IAR_LANGUAGE_PRAGMA)
233#pragma language=restore
234#endif
235
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000236/** Byte Reading Macros
237 *
238 * Given a multi-byte integer \p x, MBEDTLS_BYTE_n retrieves the n-th
239 * byte from x, where byte 0 is the least significant byte.
240 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100241#define MBEDTLS_BYTE_0(x) ((uint8_t) ((x) & 0xff))
Dave Rodgman914c6322023-03-01 09:30:14 +0000242#define MBEDTLS_BYTE_1(x) ((uint8_t) (((x) >> 8) & 0xff))
Gilles Peskine449bd832023-01-11 14:50:10 +0100243#define MBEDTLS_BYTE_2(x) ((uint8_t) (((x) >> 16) & 0xff))
244#define MBEDTLS_BYTE_3(x) ((uint8_t) (((x) >> 24) & 0xff))
245#define MBEDTLS_BYTE_4(x) ((uint8_t) (((x) >> 32) & 0xff))
246#define MBEDTLS_BYTE_5(x) ((uint8_t) (((x) >> 40) & 0xff))
247#define MBEDTLS_BYTE_6(x) ((uint8_t) (((x) >> 48) & 0xff))
248#define MBEDTLS_BYTE_7(x) ((uint8_t) (((x) >> 56) & 0xff))
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000249
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000250/*
251 * Detect GCC built-in byteswap routines
252 */
253#if defined(__GNUC__) && defined(__GNUC_PREREQ)
Gilles Peskine449bd832023-01-11 14:50:10 +0100254#if __GNUC_PREREQ(4, 8)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000255#define MBEDTLS_BSWAP16 __builtin_bswap16
256#endif /* __GNUC_PREREQ(4,8) */
Gilles Peskine449bd832023-01-11 14:50:10 +0100257#if __GNUC_PREREQ(4, 3)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000258#define MBEDTLS_BSWAP32 __builtin_bswap32
259#define MBEDTLS_BSWAP64 __builtin_bswap64
260#endif /* __GNUC_PREREQ(4,3) */
261#endif /* defined(__GNUC__) && defined(__GNUC_PREREQ) */
262
263/*
264 * Detect Clang built-in byteswap routines
265 */
266#if defined(__clang__) && defined(__has_builtin)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000267#if __has_builtin(__builtin_bswap16) && !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000268#define MBEDTLS_BSWAP16 __builtin_bswap16
269#endif /* __has_builtin(__builtin_bswap16) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000270#if __has_builtin(__builtin_bswap32) && !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000271#define MBEDTLS_BSWAP32 __builtin_bswap32
272#endif /* __has_builtin(__builtin_bswap32) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000273#if __has_builtin(__builtin_bswap64) && !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000274#define MBEDTLS_BSWAP64 __builtin_bswap64
275#endif /* __has_builtin(__builtin_bswap64) */
276#endif /* defined(__clang__) && defined(__has_builtin) */
277
278/*
279 * Detect MSVC built-in byteswap routines
280 */
281#if defined(_MSC_VER)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000282#if !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000283#define MBEDTLS_BSWAP16 _byteswap_ushort
Dave Rodgmane47899d2023-02-28 17:39:03 +0000284#endif
285#if !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000286#define MBEDTLS_BSWAP32 _byteswap_ulong
Dave Rodgmane47899d2023-02-28 17:39:03 +0000287#endif
288#if !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000289#define MBEDTLS_BSWAP64 _byteswap_uint64
Dave Rodgmane47899d2023-02-28 17:39:03 +0000290#endif
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000291#endif /* defined(_MSC_VER) */
292
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000293/* Detect armcc built-in byteswap routine */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000294#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 410000) && !defined(MBEDTLS_BSWAP32)
Tom Cosgrovef2b5a132023-04-26 17:00:12 +0100295#if defined(__ARM_ACLE) /* ARM Compiler 6 - earlier versions don't need a header */
296#include <arm_acle.h>
297#endif
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000298#define MBEDTLS_BSWAP32 __rev
299#endif
300
Dave Rodgman650674b2023-12-05 12:16:48 +0000301/* Detect IAR built-in byteswap routine */
302#if defined(__IAR_SYSTEMS_ICC__)
303#if defined(__ARM_ACLE)
304#include <arm_acle.h>
305#define MBEDTLS_BSWAP16(x) ((uint16_t) __rev16((uint32_t) (x)))
306#define MBEDTLS_BSWAP32 __rev
307#define MBEDTLS_BSWAP64 __revll
308#endif
309#endif
310
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000311/*
312 * Where compiler built-ins are not present, fall back to C code that the
313 * compiler may be able to detect and transform into the relevant bswap or
314 * similar instruction.
315 */
316#if !defined(MBEDTLS_BSWAP16)
Gilles Peskine449bd832023-01-11 14:50:10 +0100317static inline uint16_t mbedtls_bswap16(uint16_t x)
318{
Dave Rodgman6298b242022-11-28 14:51:49 +0000319 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100320 (x & 0x00ff) << 8 |
321 (x & 0xff00) >> 8;
Dave Rodgman6298b242022-11-28 14:51:49 +0000322}
323#define MBEDTLS_BSWAP16 mbedtls_bswap16
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000324#endif /* !defined(MBEDTLS_BSWAP16) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000325
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000326#if !defined(MBEDTLS_BSWAP32)
Gilles Peskine449bd832023-01-11 14:50:10 +0100327static inline uint32_t mbedtls_bswap32(uint32_t x)
328{
Dave Rodgman6298b242022-11-28 14:51:49 +0000329 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100330 (x & 0x000000ff) << 24 |
331 (x & 0x0000ff00) << 8 |
332 (x & 0x00ff0000) >> 8 |
333 (x & 0xff000000) >> 24;
Dave Rodgman6298b242022-11-28 14:51:49 +0000334}
335#define MBEDTLS_BSWAP32 mbedtls_bswap32
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000336#endif /* !defined(MBEDTLS_BSWAP32) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000337
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000338#if !defined(MBEDTLS_BSWAP64)
Gilles Peskine449bd832023-01-11 14:50:10 +0100339static inline uint64_t mbedtls_bswap64(uint64_t x)
340{
Dave Rodgman6298b242022-11-28 14:51:49 +0000341 return
Tom Cosgrovebbe166e2023-03-08 13:23:24 +0000342 (x & 0x00000000000000ffULL) << 56 |
343 (x & 0x000000000000ff00ULL) << 40 |
344 (x & 0x0000000000ff0000ULL) << 24 |
345 (x & 0x00000000ff000000ULL) << 8 |
346 (x & 0x000000ff00000000ULL) >> 8 |
347 (x & 0x0000ff0000000000ULL) >> 24 |
348 (x & 0x00ff000000000000ULL) >> 40 |
349 (x & 0xff00000000000000ULL) >> 56;
Dave Rodgman6298b242022-11-28 14:51:49 +0000350}
351#define MBEDTLS_BSWAP64 mbedtls_bswap64
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000352#endif /* !defined(MBEDTLS_BSWAP64) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000353
Dave Rodgmane5c42592022-11-28 14:47:46 +0000354#if !defined(__BYTE_ORDER__)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000355
356#if defined(__LITTLE_ENDIAN__)
357/* IAR defines __xxx_ENDIAN__, but not __BYTE_ORDER__ */
358#define MBEDTLS_IS_BIG_ENDIAN 0
359#elif defined(__BIG_ENDIAN__)
360#define MBEDTLS_IS_BIG_ENDIAN 1
361#else
Dave Rodgmane5c42592022-11-28 14:47:46 +0000362static const uint16_t mbedtls_byte_order_detector = { 0x100 };
363#define MBEDTLS_IS_BIG_ENDIAN (*((unsigned char *) (&mbedtls_byte_order_detector)) == 0x01)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000364#endif
365
Dave Rodgmane5c42592022-11-28 14:47:46 +0000366#else
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000367
368#if (__BYTE_ORDER__) == (__ORDER_BIG_ENDIAN__)
369#define MBEDTLS_IS_BIG_ENDIAN 1
370#else
371#define MBEDTLS_IS_BIG_ENDIAN 0
372#endif
373
Dave Rodgmane5c42592022-11-28 14:47:46 +0000374#endif /* !defined(__BYTE_ORDER__) */
375
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000376/**
377 * Get the unsigned 32 bits integer corresponding to four bytes in
378 * big-endian order (MSB first).
379 *
380 * \param data Base address of the memory to get the four bytes from.
381 * \param offset Offset from \p data of the first and most significant
382 * byte of the four bytes to build the 32 bits unsigned
383 * integer from.
384 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000385#define MBEDTLS_GET_UINT32_BE(data, offset) \
386 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000387 ? mbedtls_get_unaligned_uint32((data) + (offset)) \
388 : MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000389 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000390
391/**
392 * Put in memory a 32 bits unsigned integer in big-endian order.
393 *
394 * \param n 32 bits unsigned integer to put in memory.
395 * \param data Base address of the memory where to put the 32
396 * bits unsigned integer in.
397 * \param offset Offset from \p data where to put the most significant
398 * byte of the 32 bits unsigned integer \p n.
399 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000400#define MBEDTLS_PUT_UINT32_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100401 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000402 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100403 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000404 mbedtls_put_unaligned_uint32((data) + (offset), (uint32_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100405 } \
406 else \
407 { \
408 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
409 } \
410 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000411
412/**
413 * Get the unsigned 32 bits integer corresponding to four bytes in
414 * little-endian order (LSB first).
415 *
416 * \param data Base address of the memory to get the four bytes from.
417 * \param offset Offset from \p data of the first and least significant
418 * byte of the four bytes to build the 32 bits unsigned
419 * integer from.
420 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000421#define MBEDTLS_GET_UINT32_LE(data, offset) \
422 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000423 ? MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
424 : mbedtls_get_unaligned_uint32((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000425 )
Dave Rodgmana5110b02022-11-28 14:48:45 +0000426
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000427
428/**
429 * Put in memory a 32 bits unsigned integer in little-endian order.
430 *
431 * \param n 32 bits unsigned integer to put in memory.
432 * \param data Base address of the memory where to put the 32
433 * bits unsigned integer in.
434 * \param offset Offset from \p data where to put the least significant
435 * byte of the 32 bits unsigned integer \p n.
436 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000437#define MBEDTLS_PUT_UINT32_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100438 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000439 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100440 { \
441 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
442 } \
443 else \
444 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000445 mbedtls_put_unaligned_uint32((data) + (offset), ((uint32_t) (n))); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100446 } \
447 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000448
449/**
450 * Get the unsigned 16 bits integer corresponding to two bytes in
451 * little-endian order (LSB first).
452 *
453 * \param data Base address of the memory to get the two bytes from.
454 * \param offset Offset from \p data of the first and least significant
455 * byte of the two bytes to build the 16 bits unsigned
456 * integer from.
457 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000458#define MBEDTLS_GET_UINT16_LE(data, offset) \
459 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000460 ? MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
461 : mbedtls_get_unaligned_uint16((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000462 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000463
464/**
465 * Put in memory a 16 bits unsigned integer in little-endian order.
466 *
467 * \param n 16 bits unsigned integer to put in memory.
468 * \param data Base address of the memory where to put the 16
469 * bits unsigned integer in.
470 * \param offset Offset from \p data where to put the least significant
471 * byte of the 16 bits unsigned integer \p n.
472 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000473#define MBEDTLS_PUT_UINT16_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100474 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000475 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100476 { \
477 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
478 } \
479 else \
480 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000481 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100482 } \
483 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000484
485/**
486 * Get the unsigned 16 bits integer corresponding to two bytes in
487 * big-endian order (MSB first).
488 *
489 * \param data Base address of the memory to get the two bytes from.
490 * \param offset Offset from \p data of the first and most significant
491 * byte of the two bytes to build the 16 bits unsigned
492 * integer from.
493 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000494#define MBEDTLS_GET_UINT16_BE(data, offset) \
495 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000496 ? mbedtls_get_unaligned_uint16((data) + (offset)) \
497 : MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000498 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000499
500/**
501 * Put in memory a 16 bits unsigned integer in big-endian order.
502 *
503 * \param n 16 bits unsigned integer to put in memory.
504 * \param data Base address of the memory where to put the 16
505 * bits unsigned integer in.
506 * \param offset Offset from \p data where to put the most significant
507 * byte of the 16 bits unsigned integer \p n.
508 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000509#define MBEDTLS_PUT_UINT16_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100510 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000511 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100512 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000513 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100514 } \
515 else \
516 { \
517 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
518 } \
519 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000520
521/**
522 * Get the unsigned 24 bits integer corresponding to three bytes in
523 * big-endian order (MSB first).
524 *
525 * \param data Base address of the memory to get the three bytes from.
526 * \param offset Offset from \p data of the first and most significant
527 * byte of the three bytes to build the 24 bits unsigned
528 * integer from.
529 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000530#define MBEDTLS_GET_UINT24_BE(data, offset) \
531 ( \
532 ((uint32_t) (data)[(offset)] << 16) \
533 | ((uint32_t) (data)[(offset) + 1] << 8) \
534 | ((uint32_t) (data)[(offset) + 2]) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000535 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000536
537/**
538 * Put in memory a 24 bits unsigned integer in big-endian order.
539 *
540 * \param n 24 bits unsigned integer to put in memory.
541 * \param data Base address of the memory where to put the 24
542 * bits unsigned integer in.
543 * \param offset Offset from \p data where to put the most significant
544 * byte of the 24 bits unsigned integer \p n.
545 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100546#define MBEDTLS_PUT_UINT24_BE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000547 { \
548 (data)[(offset)] = MBEDTLS_BYTE_2(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100549 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
550 (data)[(offset) + 2] = MBEDTLS_BYTE_0(n); \
551 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000552
553/**
554 * Get the unsigned 24 bits integer corresponding to three bytes in
555 * little-endian order (LSB first).
556 *
557 * \param data Base address of the memory to get the three bytes from.
558 * \param offset Offset from \p data of the first and least significant
559 * byte of the three bytes to build the 24 bits unsigned
560 * integer from.
561 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000562#define MBEDTLS_GET_UINT24_LE(data, offset) \
563 ( \
564 ((uint32_t) (data)[(offset)]) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100565 | ((uint32_t) (data)[(offset) + 1] << 8) \
566 | ((uint32_t) (data)[(offset) + 2] << 16) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000567 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000568
569/**
570 * Put in memory a 24 bits unsigned integer in little-endian order.
571 *
572 * \param n 24 bits unsigned integer to put in memory.
573 * \param data Base address of the memory where to put the 24
574 * bits unsigned integer in.
575 * \param offset Offset from \p data where to put the least significant
576 * byte of the 24 bits unsigned integer \p n.
577 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100578#define MBEDTLS_PUT_UINT24_LE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000579 { \
580 (data)[(offset)] = MBEDTLS_BYTE_0(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100581 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
582 (data)[(offset) + 2] = MBEDTLS_BYTE_2(n); \
583 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000584
585/**
586 * Get the unsigned 64 bits integer corresponding to eight bytes in
587 * big-endian order (MSB first).
588 *
589 * \param data Base address of the memory to get the eight bytes from.
590 * \param offset Offset from \p data of the first and most significant
591 * byte of the eight bytes to build the 64 bits unsigned
592 * integer from.
593 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000594#define MBEDTLS_GET_UINT64_BE(data, offset) \
595 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000596 ? mbedtls_get_unaligned_uint64((data) + (offset)) \
597 : MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000598 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000599
600/**
601 * Put in memory a 64 bits unsigned integer in big-endian order.
602 *
603 * \param n 64 bits unsigned integer to put in memory.
604 * \param data Base address of the memory where to put the 64
605 * bits unsigned integer in.
606 * \param offset Offset from \p data where to put the most significant
607 * byte of the 64 bits unsigned integer \p n.
608 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000609#define MBEDTLS_PUT_UINT64_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100610 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000611 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100612 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000613 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100614 } \
615 else \
616 { \
617 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
618 } \
619 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000620
621/**
622 * Get the unsigned 64 bits integer corresponding to eight bytes in
623 * little-endian order (LSB first).
624 *
625 * \param data Base address of the memory to get the eight bytes from.
626 * \param offset Offset from \p data of the first and least significant
627 * byte of the eight bytes to build the 64 bits unsigned
628 * integer from.
629 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000630#define MBEDTLS_GET_UINT64_LE(data, offset) \
631 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000632 ? MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
633 : mbedtls_get_unaligned_uint64((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000634 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000635
636/**
637 * Put in memory a 64 bits unsigned integer in little-endian order.
638 *
639 * \param n 64 bits unsigned integer to put in memory.
640 * \param data Base address of the memory where to put the 64
641 * bits unsigned integer in.
642 * \param offset Offset from \p data where to put the least significant
643 * byte of the 64 bits unsigned integer \p n.
644 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000645#define MBEDTLS_PUT_UINT64_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100646 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000647 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100648 { \
649 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
650 } \
651 else \
652 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000653 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100654 } \
655 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000656
657#endif /* MBEDTLS_LIBRARY_ALIGNMENT_H */