blob: fece47dc873a94a67ca1c42c8b54cd42e846b1c2 [file] [log] [blame]
Dave Rodgmanfbc23222022-11-24 18:07:37 +00001/**
2 * \file alignment.h
3 *
4 * \brief Utility code for dealing with unaligned memory accesses
5 */
6/*
7 * Copyright The Mbed TLS Contributors
Dave Rodgman16799db2023-11-02 19:47:20 +00008 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
Dave Rodgmanfbc23222022-11-24 18:07:37 +00009 */
10
11#ifndef MBEDTLS_LIBRARY_ALIGNMENT_H
12#define MBEDTLS_LIBRARY_ALIGNMENT_H
13
14#include <stdint.h>
Dave Rodgman96d61d12022-11-24 19:33:22 +000015#include <string.h>
Dave Rodgmanf7f1f742022-11-28 14:52:45 +000016#include <stdlib.h>
Dave Rodgmanfbc23222022-11-24 18:07:37 +000017
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000018/*
19 * Define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS for architectures where unaligned memory
Dave Rodgman7f376fa2023-01-05 12:25:15 +000020 * accesses are known to be efficient.
21 *
22 * All functions defined here will behave correctly regardless, but might be less
23 * efficient when this is not defined.
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000024 */
25#if defined(__ARM_FEATURE_UNALIGNED) \
Dave Rodgmanc5cc7272023-09-15 11:41:17 +010026 || defined(MBEDTLS_ARCH_IS_X86) || defined(MBEDTLS_ARCH_IS_X64) \
Dave Rodgman0a487172023-09-15 11:52:06 +010027 || defined(MBEDTLS_PLATFORM_IS_WINDOWS_ON_ARM64)
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000028/*
29 * __ARM_FEATURE_UNALIGNED is defined where appropriate by armcc, gcc 7, clang 9
Dave Rodgman7f376fa2023-01-05 12:25:15 +000030 * (and later versions) for Arm v7 and later; all x86 platforms should have
31 * efficient unaligned access.
Dave Rodgman78fc0bd2023-08-08 10:36:15 +010032 *
33 * https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#alignment
34 * specifies that on Windows-on-Arm64, unaligned access is safe (except for uncached
35 * device memory).
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000036 */
37#define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS
38#endif
39
Dave Rodgmanc5812642024-01-19 14:04:28 +000040#if defined(__IAR_SYSTEMS_ICC__) && \
41 (defined(MBEDTLS_ARCH_IS_ARM64) || defined(MBEDTLS_ARCH_IS_ARM32) \
42 || defined(__ICCRX__) || defined(__ICCRL78__) || defined(__ICCRISCV__))
43#pragma language=save
44#pragma language=extended
45#define MBEDTLS_POP_IAR_LANGUAGE_PRAGMA
46/* IAR recommend this technique for accessing unaligned data in
47 * https://www.iar.com/knowledge/support/technical-notes/compiler/accessing-unaligned-data
48 * This results in a single load / store instruction (if unaligned access is supported).
49 * According to that document, this is only supported on certain architectures.
50 */
51 #define UINT_UNALIGNED
52typedef uint16_t __packed mbedtls_uint16_unaligned_t;
53typedef uint32_t __packed mbedtls_uint32_unaligned_t;
54typedef uint64_t __packed mbedtls_uint64_unaligned_t;
55#elif defined(MBEDTLS_COMPILER_IS_GCC) && (MBEDTLS_GCC_VERSION >= 40504) && \
Dave Rodgmanec9936d2024-02-06 12:56:45 +000056 ((MBEDTLS_GCC_VERSION < 60300) || (!defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)))
Dave Rodgmanc5812642024-01-19 14:04:28 +000057/*
Dave Rodgmanf4e82342024-02-06 12:57:03 +000058 * gcc may generate a branch to memcpy for calls like `memcpy(dest, src, 4)` rather than
59 * generating some LDR or LDRB instructions (similar for stores).
60 *
Dave Rodgmand09f96b2024-02-06 13:51:58 +000061 * This is architecture dependent: x86-64 seems fine even with old gcc; 32-bit Arm
62 * is affected. To keep it simple, we enable for all architectures.
Dave Rodgmanf4e82342024-02-06 12:57:03 +000063 *
Dave Rodgmand09f96b2024-02-06 13:51:58 +000064 * For versions of gcc < 5.4.0 this issue always happens.
65 * For gcc < 6.3.0, this issue happens at -O0
66 * For all versions, this issue happens iff unaligned access is not supported.
67 *
68 * For gcc 4.x, this implementation will generate byte-by-byte loads even if unaligned access is
69 * supported, which is correct but not optimal.
Dave Rodgmanc5812642024-01-19 14:04:28 +000070 *
71 * For performance (and code size, in some cases), we want to avoid the branch and just generate
72 * some inline load/store instructions since the access is small and constant-size.
73 *
74 * The manual states:
Dave Rodgmanf4e82342024-02-06 12:57:03 +000075 * "The packed attribute specifies that a variable or structure field should have the smallest
76 * possible alignment—one byte for a variable"
77 * https://gcc.gnu.org/onlinedocs/gcc-4.5.4/gcc/Variable-Attributes.html
Dave Rodgmanc5812642024-01-19 14:04:28 +000078 *
Dave Rodgmand09f96b2024-02-06 13:51:58 +000079 * Previous implementations used __attribute__((__aligned__(1)), but had issues with a gcc bug:
80 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94662
81 *
Dave Rodgmanf4e82342024-02-06 12:57:03 +000082 * Tested with several versions of GCC from 4.5.0 up to 13.2.0
Dave Rodgmanc5812642024-01-19 14:04:28 +000083 * We don't enable for older than 4.5.0 as this has not been tested.
84 */
Dave Rodgman22b934e2024-02-06 12:57:16 +000085 #define UINT_UNALIGNED_STRUCT
86typedef struct { uint16_t x; } __attribute__((packed)) mbedtls_uint16_unaligned_t;
87typedef struct { uint32_t x; } __attribute__((packed)) mbedtls_uint32_unaligned_t;
88typedef struct { uint64_t x; } __attribute__((packed)) mbedtls_uint64_unaligned_t;
Dave Rodgmanc5812642024-01-19 14:04:28 +000089 #endif
90
Dave Rodgman55b5dd22024-01-19 14:06:52 +000091/*
92 * We try to force mbedtls_(get|put)_unaligned_uintXX to be always inline, because this results
93 * in code that is both smaller and faster. IAR and gcc both benefit from this when optimising
94 * for size.
95 */
96
Dave Rodgman96d61d12022-11-24 19:33:22 +000097/**
Dave Rodgmana360e192022-11-28 14:44:05 +000098 * Read the unsigned 16 bits integer from the given address, which need not
99 * be aligned.
100 *
101 * \param p pointer to 2 bytes of data
102 * \return Data at the given address
103 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000104#if defined(__IAR_SYSTEMS_ICC__)
105#pragma inline = forced
106#elif defined(__GNUC__)
107__attribute__((always_inline))
108#endif
109static inline uint16_t mbedtls_get_unaligned_uint16(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +0000110{
111 uint16_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000112#if defined(UINT_UNALIGNED)
113 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
114 r = *p16;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000115#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000116 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
117 r = p16->x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000118#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100119 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000120#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000121 return r;
122}
123
124/**
125 * Write the unsigned 16 bits integer to the given address, which need not
126 * be aligned.
127 *
128 * \param p pointer to 2 bytes of data
129 * \param x data to write
130 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000131#if defined(__IAR_SYSTEMS_ICC__)
132#pragma inline = forced
133#elif defined(__GNUC__)
134__attribute__((always_inline))
135#endif
136static inline void mbedtls_put_unaligned_uint16(void *p, uint16_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000137{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000138#if defined(UINT_UNALIGNED)
139 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
140 *p16 = x;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000141#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000142 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
143 p16->x = x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000144#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100145 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000146#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000147}
148
149/**
Dave Rodgman96d61d12022-11-24 19:33:22 +0000150 * Read the unsigned 32 bits integer from the given address, which need not
151 * be aligned.
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000152 *
Dave Rodgman96d61d12022-11-24 19:33:22 +0000153 * \param p pointer to 4 bytes of data
Dave Rodgman875d2382022-11-24 20:43:15 +0000154 * \return Data at the given address
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000155 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000156#if defined(__IAR_SYSTEMS_ICC__)
157#pragma inline = forced
158#elif defined(__GNUC__)
159__attribute__((always_inline))
160#endif
161static inline uint32_t mbedtls_get_unaligned_uint32(const void *p)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000162{
163 uint32_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000164#if defined(UINT_UNALIGNED)
165 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
166 r = *p32;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000167#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000168 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
169 r = p32->x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000170#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100171 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000172#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000173 return r;
174}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000175
Dave Rodgman96d61d12022-11-24 19:33:22 +0000176/**
177 * Write the unsigned 32 bits integer to the given address, which need not
178 * be aligned.
179 *
180 * \param p pointer to 4 bytes of data
181 * \param x data to write
182 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000183#if defined(__IAR_SYSTEMS_ICC__)
184#pragma inline = forced
185#elif defined(__GNUC__)
186__attribute__((always_inline))
187#endif
188static inline void mbedtls_put_unaligned_uint32(void *p, uint32_t x)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000189{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000190#if defined(UINT_UNALIGNED)
191 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
192 *p32 = x;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000193#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000194 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
195 p32->x = x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000196#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100197 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000198#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000199}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000200
Dave Rodgmana360e192022-11-28 14:44:05 +0000201/**
202 * Read the unsigned 64 bits integer from the given address, which need not
203 * be aligned.
204 *
205 * \param p pointer to 8 bytes of data
206 * \return Data at the given address
207 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000208#if defined(__IAR_SYSTEMS_ICC__)
209#pragma inline = forced
210#elif defined(__GNUC__)
211__attribute__((always_inline))
212#endif
213static inline uint64_t mbedtls_get_unaligned_uint64(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +0000214{
215 uint64_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000216#if defined(UINT_UNALIGNED)
217 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
218 r = *p64;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000219#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000220 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
221 r = p64->x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000222#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100223 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000224#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000225 return r;
226}
227
228/**
229 * Write the unsigned 64 bits integer to the given address, which need not
230 * be aligned.
231 *
232 * \param p pointer to 8 bytes of data
233 * \param x data to write
234 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000235#if defined(__IAR_SYSTEMS_ICC__)
236#pragma inline = forced
237#elif defined(__GNUC__)
238__attribute__((always_inline))
239#endif
240static inline void mbedtls_put_unaligned_uint64(void *p, uint64_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000241{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000242#if defined(UINT_UNALIGNED)
243 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
244 *p64 = x;
Dave Rodgman22b934e2024-02-06 12:57:16 +0000245#elif defined(UINT_UNALIGNED_STRUCT)
Dave Rodgmanb327a1e2024-02-06 11:21:26 +0000246 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
247 p64->x = x;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000248#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100249 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000250#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000251}
252
Dave Rodgmanc5812642024-01-19 14:04:28 +0000253#if defined(MBEDTLS_POP_IAR_LANGUAGE_PRAGMA)
254#pragma language=restore
255#endif
256
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000257/** Byte Reading Macros
258 *
259 * Given a multi-byte integer \p x, MBEDTLS_BYTE_n retrieves the n-th
260 * byte from x, where byte 0 is the least significant byte.
261 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100262#define MBEDTLS_BYTE_0(x) ((uint8_t) ((x) & 0xff))
Dave Rodgman914c6322023-03-01 09:30:14 +0000263#define MBEDTLS_BYTE_1(x) ((uint8_t) (((x) >> 8) & 0xff))
Gilles Peskine449bd832023-01-11 14:50:10 +0100264#define MBEDTLS_BYTE_2(x) ((uint8_t) (((x) >> 16) & 0xff))
265#define MBEDTLS_BYTE_3(x) ((uint8_t) (((x) >> 24) & 0xff))
266#define MBEDTLS_BYTE_4(x) ((uint8_t) (((x) >> 32) & 0xff))
267#define MBEDTLS_BYTE_5(x) ((uint8_t) (((x) >> 40) & 0xff))
268#define MBEDTLS_BYTE_6(x) ((uint8_t) (((x) >> 48) & 0xff))
269#define MBEDTLS_BYTE_7(x) ((uint8_t) (((x) >> 56) & 0xff))
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000270
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000271/*
272 * Detect GCC built-in byteswap routines
273 */
274#if defined(__GNUC__) && defined(__GNUC_PREREQ)
Gilles Peskine449bd832023-01-11 14:50:10 +0100275#if __GNUC_PREREQ(4, 8)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000276#define MBEDTLS_BSWAP16 __builtin_bswap16
277#endif /* __GNUC_PREREQ(4,8) */
Gilles Peskine449bd832023-01-11 14:50:10 +0100278#if __GNUC_PREREQ(4, 3)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000279#define MBEDTLS_BSWAP32 __builtin_bswap32
280#define MBEDTLS_BSWAP64 __builtin_bswap64
281#endif /* __GNUC_PREREQ(4,3) */
282#endif /* defined(__GNUC__) && defined(__GNUC_PREREQ) */
283
284/*
285 * Detect Clang built-in byteswap routines
286 */
287#if defined(__clang__) && defined(__has_builtin)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000288#if __has_builtin(__builtin_bswap16) && !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000289#define MBEDTLS_BSWAP16 __builtin_bswap16
290#endif /* __has_builtin(__builtin_bswap16) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000291#if __has_builtin(__builtin_bswap32) && !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000292#define MBEDTLS_BSWAP32 __builtin_bswap32
293#endif /* __has_builtin(__builtin_bswap32) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000294#if __has_builtin(__builtin_bswap64) && !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000295#define MBEDTLS_BSWAP64 __builtin_bswap64
296#endif /* __has_builtin(__builtin_bswap64) */
297#endif /* defined(__clang__) && defined(__has_builtin) */
298
299/*
300 * Detect MSVC built-in byteswap routines
301 */
302#if defined(_MSC_VER)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000303#if !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000304#define MBEDTLS_BSWAP16 _byteswap_ushort
Dave Rodgmane47899d2023-02-28 17:39:03 +0000305#endif
306#if !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000307#define MBEDTLS_BSWAP32 _byteswap_ulong
Dave Rodgmane47899d2023-02-28 17:39:03 +0000308#endif
309#if !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000310#define MBEDTLS_BSWAP64 _byteswap_uint64
Dave Rodgmane47899d2023-02-28 17:39:03 +0000311#endif
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000312#endif /* defined(_MSC_VER) */
313
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000314/* Detect armcc built-in byteswap routine */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000315#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 410000) && !defined(MBEDTLS_BSWAP32)
Tom Cosgrovef2b5a132023-04-26 17:00:12 +0100316#if defined(__ARM_ACLE) /* ARM Compiler 6 - earlier versions don't need a header */
317#include <arm_acle.h>
318#endif
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000319#define MBEDTLS_BSWAP32 __rev
320#endif
321
Dave Rodgman650674b2023-12-05 12:16:48 +0000322/* Detect IAR built-in byteswap routine */
323#if defined(__IAR_SYSTEMS_ICC__)
324#if defined(__ARM_ACLE)
325#include <arm_acle.h>
326#define MBEDTLS_BSWAP16(x) ((uint16_t) __rev16((uint32_t) (x)))
327#define MBEDTLS_BSWAP32 __rev
328#define MBEDTLS_BSWAP64 __revll
329#endif
330#endif
331
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000332/*
333 * Where compiler built-ins are not present, fall back to C code that the
334 * compiler may be able to detect and transform into the relevant bswap or
335 * similar instruction.
336 */
337#if !defined(MBEDTLS_BSWAP16)
Gilles Peskine449bd832023-01-11 14:50:10 +0100338static inline uint16_t mbedtls_bswap16(uint16_t x)
339{
Dave Rodgman6298b242022-11-28 14:51:49 +0000340 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100341 (x & 0x00ff) << 8 |
342 (x & 0xff00) >> 8;
Dave Rodgman6298b242022-11-28 14:51:49 +0000343}
344#define MBEDTLS_BSWAP16 mbedtls_bswap16
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000345#endif /* !defined(MBEDTLS_BSWAP16) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000346
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000347#if !defined(MBEDTLS_BSWAP32)
Gilles Peskine449bd832023-01-11 14:50:10 +0100348static inline uint32_t mbedtls_bswap32(uint32_t x)
349{
Dave Rodgman6298b242022-11-28 14:51:49 +0000350 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100351 (x & 0x000000ff) << 24 |
352 (x & 0x0000ff00) << 8 |
353 (x & 0x00ff0000) >> 8 |
354 (x & 0xff000000) >> 24;
Dave Rodgman6298b242022-11-28 14:51:49 +0000355}
356#define MBEDTLS_BSWAP32 mbedtls_bswap32
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000357#endif /* !defined(MBEDTLS_BSWAP32) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000358
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000359#if !defined(MBEDTLS_BSWAP64)
Gilles Peskine449bd832023-01-11 14:50:10 +0100360static inline uint64_t mbedtls_bswap64(uint64_t x)
361{
Dave Rodgman6298b242022-11-28 14:51:49 +0000362 return
Tom Cosgrovebbe166e2023-03-08 13:23:24 +0000363 (x & 0x00000000000000ffULL) << 56 |
364 (x & 0x000000000000ff00ULL) << 40 |
365 (x & 0x0000000000ff0000ULL) << 24 |
366 (x & 0x00000000ff000000ULL) << 8 |
367 (x & 0x000000ff00000000ULL) >> 8 |
368 (x & 0x0000ff0000000000ULL) >> 24 |
369 (x & 0x00ff000000000000ULL) >> 40 |
370 (x & 0xff00000000000000ULL) >> 56;
Dave Rodgman6298b242022-11-28 14:51:49 +0000371}
372#define MBEDTLS_BSWAP64 mbedtls_bswap64
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000373#endif /* !defined(MBEDTLS_BSWAP64) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000374
Dave Rodgmane5c42592022-11-28 14:47:46 +0000375#if !defined(__BYTE_ORDER__)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000376
377#if defined(__LITTLE_ENDIAN__)
378/* IAR defines __xxx_ENDIAN__, but not __BYTE_ORDER__ */
379#define MBEDTLS_IS_BIG_ENDIAN 0
380#elif defined(__BIG_ENDIAN__)
381#define MBEDTLS_IS_BIG_ENDIAN 1
382#else
Dave Rodgmane5c42592022-11-28 14:47:46 +0000383static const uint16_t mbedtls_byte_order_detector = { 0x100 };
384#define MBEDTLS_IS_BIG_ENDIAN (*((unsigned char *) (&mbedtls_byte_order_detector)) == 0x01)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000385#endif
386
Dave Rodgmane5c42592022-11-28 14:47:46 +0000387#else
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000388
389#if (__BYTE_ORDER__) == (__ORDER_BIG_ENDIAN__)
390#define MBEDTLS_IS_BIG_ENDIAN 1
391#else
392#define MBEDTLS_IS_BIG_ENDIAN 0
393#endif
394
Dave Rodgmane5c42592022-11-28 14:47:46 +0000395#endif /* !defined(__BYTE_ORDER__) */
396
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000397/**
398 * Get the unsigned 32 bits integer corresponding to four bytes in
399 * big-endian order (MSB first).
400 *
401 * \param data Base address of the memory to get the four bytes from.
402 * \param offset Offset from \p data of the first and most significant
403 * byte of the four bytes to build the 32 bits unsigned
404 * integer from.
405 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000406#define MBEDTLS_GET_UINT32_BE(data, offset) \
407 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000408 ? mbedtls_get_unaligned_uint32((data) + (offset)) \
409 : MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000410 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000411
412/**
413 * Put in memory a 32 bits unsigned integer in big-endian order.
414 *
415 * \param n 32 bits unsigned integer to put in memory.
416 * \param data Base address of the memory where to put the 32
417 * bits unsigned integer in.
418 * \param offset Offset from \p data where to put the most significant
419 * byte of the 32 bits unsigned integer \p n.
420 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000421#define MBEDTLS_PUT_UINT32_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100422 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000423 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100424 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000425 mbedtls_put_unaligned_uint32((data) + (offset), (uint32_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100426 } \
427 else \
428 { \
429 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
430 } \
431 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000432
433/**
434 * Get the unsigned 32 bits integer corresponding to four bytes in
435 * little-endian order (LSB first).
436 *
437 * \param data Base address of the memory to get the four bytes from.
438 * \param offset Offset from \p data of the first and least significant
439 * byte of the four bytes to build the 32 bits unsigned
440 * integer from.
441 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000442#define MBEDTLS_GET_UINT32_LE(data, offset) \
443 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000444 ? MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
445 : mbedtls_get_unaligned_uint32((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000446 )
Dave Rodgmana5110b02022-11-28 14:48:45 +0000447
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000448
449/**
450 * Put in memory a 32 bits unsigned integer in little-endian order.
451 *
452 * \param n 32 bits unsigned integer to put in memory.
453 * \param data Base address of the memory where to put the 32
454 * bits unsigned integer in.
455 * \param offset Offset from \p data where to put the least significant
456 * byte of the 32 bits unsigned integer \p n.
457 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000458#define MBEDTLS_PUT_UINT32_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100459 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000460 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100461 { \
462 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
463 } \
464 else \
465 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000466 mbedtls_put_unaligned_uint32((data) + (offset), ((uint32_t) (n))); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100467 } \
468 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000469
470/**
471 * Get the unsigned 16 bits integer corresponding to two bytes in
472 * little-endian order (LSB first).
473 *
474 * \param data Base address of the memory to get the two bytes from.
475 * \param offset Offset from \p data of the first and least significant
476 * byte of the two bytes to build the 16 bits unsigned
477 * integer from.
478 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000479#define MBEDTLS_GET_UINT16_LE(data, offset) \
480 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000481 ? MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
482 : mbedtls_get_unaligned_uint16((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000483 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000484
485/**
486 * Put in memory a 16 bits unsigned integer in little-endian order.
487 *
488 * \param n 16 bits unsigned integer to put in memory.
489 * \param data Base address of the memory where to put the 16
490 * bits unsigned integer in.
491 * \param offset Offset from \p data where to put the least significant
492 * byte of the 16 bits unsigned integer \p n.
493 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000494#define MBEDTLS_PUT_UINT16_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100495 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000496 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100497 { \
498 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
499 } \
500 else \
501 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000502 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100503 } \
504 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000505
506/**
507 * Get the unsigned 16 bits integer corresponding to two bytes in
508 * big-endian order (MSB first).
509 *
510 * \param data Base address of the memory to get the two bytes from.
511 * \param offset Offset from \p data of the first and most significant
512 * byte of the two bytes to build the 16 bits unsigned
513 * integer from.
514 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000515#define MBEDTLS_GET_UINT16_BE(data, offset) \
516 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000517 ? mbedtls_get_unaligned_uint16((data) + (offset)) \
518 : MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000519 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000520
521/**
522 * Put in memory a 16 bits unsigned integer in big-endian order.
523 *
524 * \param n 16 bits unsigned integer to put in memory.
525 * \param data Base address of the memory where to put the 16
526 * bits unsigned integer in.
527 * \param offset Offset from \p data where to put the most significant
528 * byte of the 16 bits unsigned integer \p n.
529 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000530#define MBEDTLS_PUT_UINT16_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100531 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000532 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100533 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000534 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100535 } \
536 else \
537 { \
538 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
539 } \
540 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000541
542/**
543 * Get the unsigned 24 bits integer corresponding to three bytes in
544 * big-endian order (MSB first).
545 *
546 * \param data Base address of the memory to get the three bytes from.
547 * \param offset Offset from \p data of the first and most significant
548 * byte of the three bytes to build the 24 bits unsigned
549 * integer from.
550 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000551#define MBEDTLS_GET_UINT24_BE(data, offset) \
552 ( \
553 ((uint32_t) (data)[(offset)] << 16) \
554 | ((uint32_t) (data)[(offset) + 1] << 8) \
555 | ((uint32_t) (data)[(offset) + 2]) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000556 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000557
558/**
559 * Put in memory a 24 bits unsigned integer in big-endian order.
560 *
561 * \param n 24 bits unsigned integer to put in memory.
562 * \param data Base address of the memory where to put the 24
563 * bits unsigned integer in.
564 * \param offset Offset from \p data where to put the most significant
565 * byte of the 24 bits unsigned integer \p n.
566 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100567#define MBEDTLS_PUT_UINT24_BE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000568 { \
569 (data)[(offset)] = MBEDTLS_BYTE_2(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100570 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
571 (data)[(offset) + 2] = MBEDTLS_BYTE_0(n); \
572 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000573
574/**
575 * Get the unsigned 24 bits integer corresponding to three bytes in
576 * little-endian order (LSB first).
577 *
578 * \param data Base address of the memory to get the three bytes from.
579 * \param offset Offset from \p data of the first and least significant
580 * byte of the three bytes to build the 24 bits unsigned
581 * integer from.
582 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000583#define MBEDTLS_GET_UINT24_LE(data, offset) \
584 ( \
585 ((uint32_t) (data)[(offset)]) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100586 | ((uint32_t) (data)[(offset) + 1] << 8) \
587 | ((uint32_t) (data)[(offset) + 2] << 16) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000588 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000589
590/**
591 * Put in memory a 24 bits unsigned integer in little-endian order.
592 *
593 * \param n 24 bits unsigned integer to put in memory.
594 * \param data Base address of the memory where to put the 24
595 * bits unsigned integer in.
596 * \param offset Offset from \p data where to put the least significant
597 * byte of the 24 bits unsigned integer \p n.
598 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100599#define MBEDTLS_PUT_UINT24_LE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000600 { \
601 (data)[(offset)] = MBEDTLS_BYTE_0(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100602 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
603 (data)[(offset) + 2] = MBEDTLS_BYTE_2(n); \
604 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000605
606/**
607 * Get the unsigned 64 bits integer corresponding to eight bytes in
608 * big-endian order (MSB first).
609 *
610 * \param data Base address of the memory to get the eight bytes from.
611 * \param offset Offset from \p data of the first and most significant
612 * byte of the eight bytes to build the 64 bits unsigned
613 * integer from.
614 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000615#define MBEDTLS_GET_UINT64_BE(data, offset) \
616 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000617 ? mbedtls_get_unaligned_uint64((data) + (offset)) \
618 : MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000619 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000620
621/**
622 * Put in memory a 64 bits unsigned integer in big-endian order.
623 *
624 * \param n 64 bits unsigned integer to put in memory.
625 * \param data Base address of the memory where to put the 64
626 * bits unsigned integer in.
627 * \param offset Offset from \p data where to put the most significant
628 * byte of the 64 bits unsigned integer \p n.
629 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000630#define MBEDTLS_PUT_UINT64_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100631 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000632 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100633 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000634 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100635 } \
636 else \
637 { \
638 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
639 } \
640 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000641
642/**
643 * Get the unsigned 64 bits integer corresponding to eight bytes in
644 * little-endian order (LSB first).
645 *
646 * \param data Base address of the memory to get the eight bytes from.
647 * \param offset Offset from \p data of the first and least significant
648 * byte of the eight bytes to build the 64 bits unsigned
649 * integer from.
650 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000651#define MBEDTLS_GET_UINT64_LE(data, offset) \
652 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000653 ? MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
654 : mbedtls_get_unaligned_uint64((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000655 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000656
657/**
658 * Put in memory a 64 bits unsigned integer in little-endian order.
659 *
660 * \param n 64 bits unsigned integer to put in memory.
661 * \param data Base address of the memory where to put the 64
662 * bits unsigned integer in.
663 * \param offset Offset from \p data where to put the least significant
664 * byte of the 64 bits unsigned integer \p n.
665 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000666#define MBEDTLS_PUT_UINT64_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100667 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000668 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100669 { \
670 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
671 } \
672 else \
673 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000674 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100675 } \
676 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000677
678#endif /* MBEDTLS_LIBRARY_ALIGNMENT_H */