blob: 8db550fa536a0b7f68cf97d5609567bf39f255a5 [file] [log] [blame]
Dave Rodgmanfbc23222022-11-24 18:07:37 +00001/**
2 * \file alignment.h
3 *
4 * \brief Utility code for dealing with unaligned memory accesses
5 */
6/*
7 * Copyright The Mbed TLS Contributors
Dave Rodgman16799db2023-11-02 19:47:20 +00008 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
Dave Rodgmanfbc23222022-11-24 18:07:37 +00009 */
10
11#ifndef MBEDTLS_LIBRARY_ALIGNMENT_H
12#define MBEDTLS_LIBRARY_ALIGNMENT_H
13
14#include <stdint.h>
Dave Rodgman96d61d12022-11-24 19:33:22 +000015#include <string.h>
Dave Rodgmanf7f1f742022-11-28 14:52:45 +000016#include <stdlib.h>
Dave Rodgmanfbc23222022-11-24 18:07:37 +000017
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000018/*
19 * Define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS for architectures where unaligned memory
Dave Rodgman7f376fa2023-01-05 12:25:15 +000020 * accesses are known to be efficient.
21 *
22 * All functions defined here will behave correctly regardless, but might be less
23 * efficient when this is not defined.
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000024 */
25#if defined(__ARM_FEATURE_UNALIGNED) \
Dave Rodgmanc5cc7272023-09-15 11:41:17 +010026 || defined(MBEDTLS_ARCH_IS_X86) || defined(MBEDTLS_ARCH_IS_X64) \
Dave Rodgman0a487172023-09-15 11:52:06 +010027 || defined(MBEDTLS_PLATFORM_IS_WINDOWS_ON_ARM64)
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000028/*
29 * __ARM_FEATURE_UNALIGNED is defined where appropriate by armcc, gcc 7, clang 9
Dave Rodgman7f376fa2023-01-05 12:25:15 +000030 * (and later versions) for Arm v7 and later; all x86 platforms should have
31 * efficient unaligned access.
Dave Rodgman78fc0bd2023-08-08 10:36:15 +010032 *
33 * https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#alignment
34 * specifies that on Windows-on-Arm64, unaligned access is safe (except for uncached
35 * device memory).
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000036 */
37#define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS
38#endif
39
Dave Rodgmanc5812642024-01-19 14:04:28 +000040#if defined(__IAR_SYSTEMS_ICC__) && \
41 (defined(MBEDTLS_ARCH_IS_ARM64) || defined(MBEDTLS_ARCH_IS_ARM32) \
42 || defined(__ICCRX__) || defined(__ICCRL78__) || defined(__ICCRISCV__))
43#pragma language=save
44#pragma language=extended
45#define MBEDTLS_POP_IAR_LANGUAGE_PRAGMA
46/* IAR recommend this technique for accessing unaligned data in
47 * https://www.iar.com/knowledge/support/technical-notes/compiler/accessing-unaligned-data
48 * This results in a single load / store instruction (if unaligned access is supported).
49 * According to that document, this is only supported on certain architectures.
50 */
51 #define UINT_UNALIGNED
52typedef uint16_t __packed mbedtls_uint16_unaligned_t;
53typedef uint32_t __packed mbedtls_uint32_unaligned_t;
54typedef uint64_t __packed mbedtls_uint64_unaligned_t;
55#elif defined(MBEDTLS_COMPILER_IS_GCC) && (MBEDTLS_GCC_VERSION >= 40504) && \
56 ((MBEDTLS_GCC_VERSION < 90300) || (!defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)))
57/*
58 * Old versions of gcc, depending on how the target is specified, may generate a branch to memcpy
59 * for calls like `memcpy(dest, src, 4)` rather than generating some LDR or LDRB instructions
60 * (similar for stores).
61 * Recent versions where unaligned access is not enabled also do this.
62 *
63 * For performance (and code size, in some cases), we want to avoid the branch and just generate
64 * some inline load/store instructions since the access is small and constant-size.
65 *
66 * The manual states:
67 * "The aligned attribute specifies a minimum alignment for the variable or structure field,
68 * measured in bytes."
69 * https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html
70 *
71 * Tested with several versions of GCC from 4.5.0 up to 9.3.0
72 * We don't enable for older than 4.5.0 as this has not been tested.
73 */
Dave Rodgmanb327a1e2024-02-06 11:21:26 +000074 #define UINT_UNALIGNED_UNION
75typedef union { uint16_t x; } __attribute__((packed)) mbedtls_uint16_unaligned_t;
76typedef union { uint32_t x; } __attribute__((packed)) mbedtls_uint32_unaligned_t;
77typedef union { uint64_t x; } __attribute__((packed)) mbedtls_uint64_unaligned_t;
Dave Rodgmanc5812642024-01-19 14:04:28 +000078 #endif
79
Dave Rodgman55b5dd22024-01-19 14:06:52 +000080/*
81 * We try to force mbedtls_(get|put)_unaligned_uintXX to be always inline, because this results
82 * in code that is both smaller and faster. IAR and gcc both benefit from this when optimising
83 * for size.
84 */
85
Dave Rodgman96d61d12022-11-24 19:33:22 +000086/**
Dave Rodgmana360e192022-11-28 14:44:05 +000087 * Read the unsigned 16 bits integer from the given address, which need not
88 * be aligned.
89 *
90 * \param p pointer to 2 bytes of data
91 * \return Data at the given address
92 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +000093#if defined(__IAR_SYSTEMS_ICC__)
94#pragma inline = forced
95#elif defined(__GNUC__)
96__attribute__((always_inline))
97#endif
98static inline uint16_t mbedtls_get_unaligned_uint16(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +000099{
100 uint16_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000101#if defined(UINT_UNALIGNED)
102 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
103 r = *p16;
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000104#elif defined(UINT_UNALIGNED_UNION)
105 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
106 r = p16->x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000107#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100108 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000109#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000110 return r;
111}
112
113/**
114 * Write the unsigned 16 bits integer to the given address, which need not
115 * be aligned.
116 *
117 * \param p pointer to 2 bytes of data
118 * \param x data to write
119 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000120#if defined(__IAR_SYSTEMS_ICC__)
121#pragma inline = forced
122#elif defined(__GNUC__)
123__attribute__((always_inline))
124#endif
125static inline void mbedtls_put_unaligned_uint16(void *p, uint16_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000126{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000127#if defined(UINT_UNALIGNED)
128 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
129 *p16 = x;
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000130#elif defined(UINT_UNALIGNED_UNION)
131 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
132 p16->x = x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000133#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100134 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000135#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000136}
137
138/**
Dave Rodgman96d61d12022-11-24 19:33:22 +0000139 * Read the unsigned 32 bits integer from the given address, which need not
140 * be aligned.
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000141 *
Dave Rodgman96d61d12022-11-24 19:33:22 +0000142 * \param p pointer to 4 bytes of data
Dave Rodgman875d2382022-11-24 20:43:15 +0000143 * \return Data at the given address
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000144 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000145#if defined(__IAR_SYSTEMS_ICC__)
146#pragma inline = forced
147#elif defined(__GNUC__)
148__attribute__((always_inline))
149#endif
150static inline uint32_t mbedtls_get_unaligned_uint32(const void *p)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000151{
152 uint32_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000153#if defined(UINT_UNALIGNED)
154 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
155 r = *p32;
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000156#elif defined(UINT_UNALIGNED_UNION)
157 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
158 r = p32->x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000159#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100160 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000161#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000162 return r;
163}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000164
Dave Rodgman96d61d12022-11-24 19:33:22 +0000165/**
166 * Write the unsigned 32 bits integer to the given address, which need not
167 * be aligned.
168 *
169 * \param p pointer to 4 bytes of data
170 * \param x data to write
171 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000172#if defined(__IAR_SYSTEMS_ICC__)
173#pragma inline = forced
174#elif defined(__GNUC__)
175__attribute__((always_inline))
176#endif
177static inline void mbedtls_put_unaligned_uint32(void *p, uint32_t x)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000178{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000179#if defined(UINT_UNALIGNED)
180 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
181 *p32 = x;
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000182#elif defined(UINT_UNALIGNED_UNION)
183 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
184 p32->x = x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000185#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100186 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000187#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000188}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000189
Dave Rodgmana360e192022-11-28 14:44:05 +0000190/**
191 * Read the unsigned 64 bits integer from the given address, which need not
192 * be aligned.
193 *
194 * \param p pointer to 8 bytes of data
195 * \return Data at the given address
196 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000197#if defined(__IAR_SYSTEMS_ICC__)
198#pragma inline = forced
199#elif defined(__GNUC__)
200__attribute__((always_inline))
201#endif
202static inline uint64_t mbedtls_get_unaligned_uint64(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +0000203{
204 uint64_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000205#if defined(UINT_UNALIGNED)
206 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
207 r = *p64;
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000208#elif defined(UINT_UNALIGNED_UNION)
209 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
210 r = p64->x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000211#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100212 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000213#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000214 return r;
215}
216
217/**
218 * Write the unsigned 64 bits integer to the given address, which need not
219 * be aligned.
220 *
221 * \param p pointer to 8 bytes of data
222 * \param x data to write
223 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000224#if defined(__IAR_SYSTEMS_ICC__)
225#pragma inline = forced
226#elif defined(__GNUC__)
227__attribute__((always_inline))
228#endif
229static inline void mbedtls_put_unaligned_uint64(void *p, uint64_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000230{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000231#if defined(UINT_UNALIGNED)
232 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
233 *p64 = x;
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000234#elif defined(UINT_UNALIGNED_UNION)
235 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
236 p64->x = x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000237#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100238 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000239#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000240}
241
Dave Rodgmanc5812642024-01-19 14:04:28 +0000242#if defined(MBEDTLS_POP_IAR_LANGUAGE_PRAGMA)
243#pragma language=restore
244#endif
245
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000246/** Byte Reading Macros
247 *
248 * Given a multi-byte integer \p x, MBEDTLS_BYTE_n retrieves the n-th
249 * byte from x, where byte 0 is the least significant byte.
250 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100251#define MBEDTLS_BYTE_0(x) ((uint8_t) ((x) & 0xff))
Dave Rodgman914c6322023-03-01 09:30:14 +0000252#define MBEDTLS_BYTE_1(x) ((uint8_t) (((x) >> 8) & 0xff))
Gilles Peskine449bd832023-01-11 14:50:10 +0100253#define MBEDTLS_BYTE_2(x) ((uint8_t) (((x) >> 16) & 0xff))
254#define MBEDTLS_BYTE_3(x) ((uint8_t) (((x) >> 24) & 0xff))
255#define MBEDTLS_BYTE_4(x) ((uint8_t) (((x) >> 32) & 0xff))
256#define MBEDTLS_BYTE_5(x) ((uint8_t) (((x) >> 40) & 0xff))
257#define MBEDTLS_BYTE_6(x) ((uint8_t) (((x) >> 48) & 0xff))
258#define MBEDTLS_BYTE_7(x) ((uint8_t) (((x) >> 56) & 0xff))
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000259
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000260/*
261 * Detect GCC built-in byteswap routines
262 */
263#if defined(__GNUC__) && defined(__GNUC_PREREQ)
Gilles Peskine449bd832023-01-11 14:50:10 +0100264#if __GNUC_PREREQ(4, 8)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000265#define MBEDTLS_BSWAP16 __builtin_bswap16
266#endif /* __GNUC_PREREQ(4,8) */
Gilles Peskine449bd832023-01-11 14:50:10 +0100267#if __GNUC_PREREQ(4, 3)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000268#define MBEDTLS_BSWAP32 __builtin_bswap32
269#define MBEDTLS_BSWAP64 __builtin_bswap64
270#endif /* __GNUC_PREREQ(4,3) */
271#endif /* defined(__GNUC__) && defined(__GNUC_PREREQ) */
272
273/*
274 * Detect Clang built-in byteswap routines
275 */
276#if defined(__clang__) && defined(__has_builtin)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000277#if __has_builtin(__builtin_bswap16) && !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000278#define MBEDTLS_BSWAP16 __builtin_bswap16
279#endif /* __has_builtin(__builtin_bswap16) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000280#if __has_builtin(__builtin_bswap32) && !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000281#define MBEDTLS_BSWAP32 __builtin_bswap32
282#endif /* __has_builtin(__builtin_bswap32) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000283#if __has_builtin(__builtin_bswap64) && !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000284#define MBEDTLS_BSWAP64 __builtin_bswap64
285#endif /* __has_builtin(__builtin_bswap64) */
286#endif /* defined(__clang__) && defined(__has_builtin) */
287
288/*
289 * Detect MSVC built-in byteswap routines
290 */
291#if defined(_MSC_VER)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000292#if !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000293#define MBEDTLS_BSWAP16 _byteswap_ushort
Dave Rodgmane47899d2023-02-28 17:39:03 +0000294#endif
295#if !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000296#define MBEDTLS_BSWAP32 _byteswap_ulong
Dave Rodgmane47899d2023-02-28 17:39:03 +0000297#endif
298#if !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000299#define MBEDTLS_BSWAP64 _byteswap_uint64
Dave Rodgmane47899d2023-02-28 17:39:03 +0000300#endif
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000301#endif /* defined(_MSC_VER) */
302
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000303/* Detect armcc built-in byteswap routine */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000304#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 410000) && !defined(MBEDTLS_BSWAP32)
Tom Cosgrovef2b5a132023-04-26 17:00:12 +0100305#if defined(__ARM_ACLE) /* ARM Compiler 6 - earlier versions don't need a header */
306#include <arm_acle.h>
307#endif
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000308#define MBEDTLS_BSWAP32 __rev
309#endif
310
Dave Rodgman650674b2023-12-05 12:16:48 +0000311/* Detect IAR built-in byteswap routine */
312#if defined(__IAR_SYSTEMS_ICC__)
313#if defined(__ARM_ACLE)
314#include <arm_acle.h>
315#define MBEDTLS_BSWAP16(x) ((uint16_t) __rev16((uint32_t) (x)))
316#define MBEDTLS_BSWAP32 __rev
317#define MBEDTLS_BSWAP64 __revll
318#endif
319#endif
320
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000321/*
322 * Where compiler built-ins are not present, fall back to C code that the
323 * compiler may be able to detect and transform into the relevant bswap or
324 * similar instruction.
325 */
326#if !defined(MBEDTLS_BSWAP16)
Gilles Peskine449bd832023-01-11 14:50:10 +0100327static inline uint16_t mbedtls_bswap16(uint16_t x)
328{
Dave Rodgman6298b242022-11-28 14:51:49 +0000329 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100330 (x & 0x00ff) << 8 |
331 (x & 0xff00) >> 8;
Dave Rodgman6298b242022-11-28 14:51:49 +0000332}
333#define MBEDTLS_BSWAP16 mbedtls_bswap16
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000334#endif /* !defined(MBEDTLS_BSWAP16) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000335
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000336#if !defined(MBEDTLS_BSWAP32)
Gilles Peskine449bd832023-01-11 14:50:10 +0100337static inline uint32_t mbedtls_bswap32(uint32_t x)
338{
Dave Rodgman6298b242022-11-28 14:51:49 +0000339 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100340 (x & 0x000000ff) << 24 |
341 (x & 0x0000ff00) << 8 |
342 (x & 0x00ff0000) >> 8 |
343 (x & 0xff000000) >> 24;
Dave Rodgman6298b242022-11-28 14:51:49 +0000344}
345#define MBEDTLS_BSWAP32 mbedtls_bswap32
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000346#endif /* !defined(MBEDTLS_BSWAP32) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000347
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000348#if !defined(MBEDTLS_BSWAP64)
Gilles Peskine449bd832023-01-11 14:50:10 +0100349static inline uint64_t mbedtls_bswap64(uint64_t x)
350{
Dave Rodgman6298b242022-11-28 14:51:49 +0000351 return
Tom Cosgrovebbe166e2023-03-08 13:23:24 +0000352 (x & 0x00000000000000ffULL) << 56 |
353 (x & 0x000000000000ff00ULL) << 40 |
354 (x & 0x0000000000ff0000ULL) << 24 |
355 (x & 0x00000000ff000000ULL) << 8 |
356 (x & 0x000000ff00000000ULL) >> 8 |
357 (x & 0x0000ff0000000000ULL) >> 24 |
358 (x & 0x00ff000000000000ULL) >> 40 |
359 (x & 0xff00000000000000ULL) >> 56;
Dave Rodgman6298b242022-11-28 14:51:49 +0000360}
361#define MBEDTLS_BSWAP64 mbedtls_bswap64
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000362#endif /* !defined(MBEDTLS_BSWAP64) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000363
Dave Rodgmane5c42592022-11-28 14:47:46 +0000364#if !defined(__BYTE_ORDER__)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000365
366#if defined(__LITTLE_ENDIAN__)
367/* IAR defines __xxx_ENDIAN__, but not __BYTE_ORDER__ */
368#define MBEDTLS_IS_BIG_ENDIAN 0
369#elif defined(__BIG_ENDIAN__)
370#define MBEDTLS_IS_BIG_ENDIAN 1
371#else
Dave Rodgmane5c42592022-11-28 14:47:46 +0000372static const uint16_t mbedtls_byte_order_detector = { 0x100 };
373#define MBEDTLS_IS_BIG_ENDIAN (*((unsigned char *) (&mbedtls_byte_order_detector)) == 0x01)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000374#endif
375
Dave Rodgmane5c42592022-11-28 14:47:46 +0000376#else
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000377
378#if (__BYTE_ORDER__) == (__ORDER_BIG_ENDIAN__)
379#define MBEDTLS_IS_BIG_ENDIAN 1
380#else
381#define MBEDTLS_IS_BIG_ENDIAN 0
382#endif
383
Dave Rodgmane5c42592022-11-28 14:47:46 +0000384#endif /* !defined(__BYTE_ORDER__) */
385
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000386/**
387 * Get the unsigned 32 bits integer corresponding to four bytes in
388 * big-endian order (MSB first).
389 *
390 * \param data Base address of the memory to get the four bytes from.
391 * \param offset Offset from \p data of the first and most significant
392 * byte of the four bytes to build the 32 bits unsigned
393 * integer from.
394 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000395#define MBEDTLS_GET_UINT32_BE(data, offset) \
396 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000397 ? mbedtls_get_unaligned_uint32((data) + (offset)) \
398 : MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000399 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000400
401/**
402 * Put in memory a 32 bits unsigned integer in big-endian order.
403 *
404 * \param n 32 bits unsigned integer to put in memory.
405 * \param data Base address of the memory where to put the 32
406 * bits unsigned integer in.
407 * \param offset Offset from \p data where to put the most significant
408 * byte of the 32 bits unsigned integer \p n.
409 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000410#define MBEDTLS_PUT_UINT32_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100411 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000412 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100413 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000414 mbedtls_put_unaligned_uint32((data) + (offset), (uint32_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100415 } \
416 else \
417 { \
418 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
419 } \
420 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000421
422/**
423 * Get the unsigned 32 bits integer corresponding to four bytes in
424 * little-endian order (LSB first).
425 *
426 * \param data Base address of the memory to get the four bytes from.
427 * \param offset Offset from \p data of the first and least significant
428 * byte of the four bytes to build the 32 bits unsigned
429 * integer from.
430 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000431#define MBEDTLS_GET_UINT32_LE(data, offset) \
432 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000433 ? MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
434 : mbedtls_get_unaligned_uint32((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000435 )
Dave Rodgmana5110b02022-11-28 14:48:45 +0000436
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000437
438/**
439 * Put in memory a 32 bits unsigned integer in little-endian order.
440 *
441 * \param n 32 bits unsigned integer to put in memory.
442 * \param data Base address of the memory where to put the 32
443 * bits unsigned integer in.
444 * \param offset Offset from \p data where to put the least significant
445 * byte of the 32 bits unsigned integer \p n.
446 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000447#define MBEDTLS_PUT_UINT32_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100448 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000449 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100450 { \
451 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
452 } \
453 else \
454 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000455 mbedtls_put_unaligned_uint32((data) + (offset), ((uint32_t) (n))); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100456 } \
457 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000458
459/**
460 * Get the unsigned 16 bits integer corresponding to two bytes in
461 * little-endian order (LSB first).
462 *
463 * \param data Base address of the memory to get the two bytes from.
464 * \param offset Offset from \p data of the first and least significant
465 * byte of the two bytes to build the 16 bits unsigned
466 * integer from.
467 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000468#define MBEDTLS_GET_UINT16_LE(data, offset) \
469 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000470 ? MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
471 : mbedtls_get_unaligned_uint16((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000472 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000473
474/**
475 * Put in memory a 16 bits unsigned integer in little-endian order.
476 *
477 * \param n 16 bits unsigned integer to put in memory.
478 * \param data Base address of the memory where to put the 16
479 * bits unsigned integer in.
480 * \param offset Offset from \p data where to put the least significant
481 * byte of the 16 bits unsigned integer \p n.
482 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000483#define MBEDTLS_PUT_UINT16_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100484 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000485 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100486 { \
487 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
488 } \
489 else \
490 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000491 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100492 } \
493 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000494
495/**
496 * Get the unsigned 16 bits integer corresponding to two bytes in
497 * big-endian order (MSB first).
498 *
499 * \param data Base address of the memory to get the two bytes from.
500 * \param offset Offset from \p data of the first and most significant
501 * byte of the two bytes to build the 16 bits unsigned
502 * integer from.
503 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000504#define MBEDTLS_GET_UINT16_BE(data, offset) \
505 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000506 ? mbedtls_get_unaligned_uint16((data) + (offset)) \
507 : MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000508 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000509
510/**
511 * Put in memory a 16 bits unsigned integer in big-endian order.
512 *
513 * \param n 16 bits unsigned integer to put in memory.
514 * \param data Base address of the memory where to put the 16
515 * bits unsigned integer in.
516 * \param offset Offset from \p data where to put the most significant
517 * byte of the 16 bits unsigned integer \p n.
518 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000519#define MBEDTLS_PUT_UINT16_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100520 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000521 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100522 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000523 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100524 } \
525 else \
526 { \
527 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
528 } \
529 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000530
531/**
532 * Get the unsigned 24 bits integer corresponding to three bytes in
533 * big-endian order (MSB first).
534 *
535 * \param data Base address of the memory to get the three bytes from.
536 * \param offset Offset from \p data of the first and most significant
537 * byte of the three bytes to build the 24 bits unsigned
538 * integer from.
539 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000540#define MBEDTLS_GET_UINT24_BE(data, offset) \
541 ( \
542 ((uint32_t) (data)[(offset)] << 16) \
543 | ((uint32_t) (data)[(offset) + 1] << 8) \
544 | ((uint32_t) (data)[(offset) + 2]) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000545 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000546
547/**
548 * Put in memory a 24 bits unsigned integer in big-endian order.
549 *
550 * \param n 24 bits unsigned integer to put in memory.
551 * \param data Base address of the memory where to put the 24
552 * bits unsigned integer in.
553 * \param offset Offset from \p data where to put the most significant
554 * byte of the 24 bits unsigned integer \p n.
555 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100556#define MBEDTLS_PUT_UINT24_BE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000557 { \
558 (data)[(offset)] = MBEDTLS_BYTE_2(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100559 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
560 (data)[(offset) + 2] = MBEDTLS_BYTE_0(n); \
561 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000562
563/**
564 * Get the unsigned 24 bits integer corresponding to three bytes in
565 * little-endian order (LSB first).
566 *
567 * \param data Base address of the memory to get the three bytes from.
568 * \param offset Offset from \p data of the first and least significant
569 * byte of the three bytes to build the 24 bits unsigned
570 * integer from.
571 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000572#define MBEDTLS_GET_UINT24_LE(data, offset) \
573 ( \
574 ((uint32_t) (data)[(offset)]) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100575 | ((uint32_t) (data)[(offset) + 1] << 8) \
576 | ((uint32_t) (data)[(offset) + 2] << 16) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000577 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000578
579/**
580 * Put in memory a 24 bits unsigned integer in little-endian order.
581 *
582 * \param n 24 bits unsigned integer to put in memory.
583 * \param data Base address of the memory where to put the 24
584 * bits unsigned integer in.
585 * \param offset Offset from \p data where to put the least significant
586 * byte of the 24 bits unsigned integer \p n.
587 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100588#define MBEDTLS_PUT_UINT24_LE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000589 { \
590 (data)[(offset)] = MBEDTLS_BYTE_0(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100591 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
592 (data)[(offset) + 2] = MBEDTLS_BYTE_2(n); \
593 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000594
595/**
596 * Get the unsigned 64 bits integer corresponding to eight bytes in
597 * big-endian order (MSB first).
598 *
599 * \param data Base address of the memory to get the eight bytes from.
600 * \param offset Offset from \p data of the first and most significant
601 * byte of the eight bytes to build the 64 bits unsigned
602 * integer from.
603 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000604#define MBEDTLS_GET_UINT64_BE(data, offset) \
605 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000606 ? mbedtls_get_unaligned_uint64((data) + (offset)) \
607 : MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000608 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000609
610/**
611 * Put in memory a 64 bits unsigned integer in big-endian order.
612 *
613 * \param n 64 bits unsigned integer to put in memory.
614 * \param data Base address of the memory where to put the 64
615 * bits unsigned integer in.
616 * \param offset Offset from \p data where to put the most significant
617 * byte of the 64 bits unsigned integer \p n.
618 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000619#define MBEDTLS_PUT_UINT64_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100620 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000621 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100622 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000623 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100624 } \
625 else \
626 { \
627 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
628 } \
629 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000630
631/**
632 * Get the unsigned 64 bits integer corresponding to eight bytes in
633 * little-endian order (LSB first).
634 *
635 * \param data Base address of the memory to get the eight bytes from.
636 * \param offset Offset from \p data of the first and least significant
637 * byte of the eight bytes to build the 64 bits unsigned
638 * integer from.
639 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000640#define MBEDTLS_GET_UINT64_LE(data, offset) \
641 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000642 ? MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
643 : mbedtls_get_unaligned_uint64((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000644 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000645
646/**
647 * Put in memory a 64 bits unsigned integer in little-endian order.
648 *
649 * \param n 64 bits unsigned integer to put in memory.
650 * \param data Base address of the memory where to put the 64
651 * bits unsigned integer in.
652 * \param offset Offset from \p data where to put the least significant
653 * byte of the 64 bits unsigned integer \p n.
654 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000655#define MBEDTLS_PUT_UINT64_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100656 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000657 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100658 { \
659 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
660 } \
661 else \
662 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000663 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100664 } \
665 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000666
667#endif /* MBEDTLS_LIBRARY_ALIGNMENT_H */