blob: 248f29bc723e691aeb863614ae7eb006dbe3df46 [file] [log] [blame]
Dave Rodgmanfbc23222022-11-24 18:07:37 +00001/**
2 * \file alignment.h
3 *
4 * \brief Utility code for dealing with unaligned memory accesses
5 */
6/*
7 * Copyright The Mbed TLS Contributors
Dave Rodgman16799db2023-11-02 19:47:20 +00008 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
Dave Rodgmanfbc23222022-11-24 18:07:37 +00009 */
10
11#ifndef MBEDTLS_LIBRARY_ALIGNMENT_H
12#define MBEDTLS_LIBRARY_ALIGNMENT_H
13
14#include <stdint.h>
Dave Rodgman96d61d12022-11-24 19:33:22 +000015#include <string.h>
Dave Rodgmanf7f1f742022-11-28 14:52:45 +000016#include <stdlib.h>
Dave Rodgmanfbc23222022-11-24 18:07:37 +000017
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000018/*
19 * Define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS for architectures where unaligned memory
Dave Rodgman7f376fa2023-01-05 12:25:15 +000020 * accesses are known to be efficient.
21 *
22 * All functions defined here will behave correctly regardless, but might be less
23 * efficient when this is not defined.
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000024 */
25#if defined(__ARM_FEATURE_UNALIGNED) \
Dave Rodgmanc5cc7272023-09-15 11:41:17 +010026 || defined(MBEDTLS_ARCH_IS_X86) || defined(MBEDTLS_ARCH_IS_X64) \
Dave Rodgman0a487172023-09-15 11:52:06 +010027 || defined(MBEDTLS_PLATFORM_IS_WINDOWS_ON_ARM64)
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000028/*
29 * __ARM_FEATURE_UNALIGNED is defined where appropriate by armcc, gcc 7, clang 9
Dave Rodgman7f376fa2023-01-05 12:25:15 +000030 * (and later versions) for Arm v7 and later; all x86 platforms should have
31 * efficient unaligned access.
Dave Rodgman78fc0bd2023-08-08 10:36:15 +010032 *
33 * https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#alignment
34 * specifies that on Windows-on-Arm64, unaligned access is safe (except for uncached
35 * device memory).
Dave Rodgmanb9cd19b2022-12-30 21:32:03 +000036 */
37#define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS
38#endif
39
Dave Rodgmanc5812642024-01-19 14:04:28 +000040#if defined(__IAR_SYSTEMS_ICC__) && \
41 (defined(MBEDTLS_ARCH_IS_ARM64) || defined(MBEDTLS_ARCH_IS_ARM32) \
42 || defined(__ICCRX__) || defined(__ICCRL78__) || defined(__ICCRISCV__))
43#pragma language=save
44#pragma language=extended
45#define MBEDTLS_POP_IAR_LANGUAGE_PRAGMA
46/* IAR recommend this technique for accessing unaligned data in
47 * https://www.iar.com/knowledge/support/technical-notes/compiler/accessing-unaligned-data
48 * This results in a single load / store instruction (if unaligned access is supported).
49 * According to that document, this is only supported on certain architectures.
50 */
51 #define UINT_UNALIGNED
52typedef uint16_t __packed mbedtls_uint16_unaligned_t;
53typedef uint32_t __packed mbedtls_uint32_unaligned_t;
54typedef uint64_t __packed mbedtls_uint64_unaligned_t;
55#elif defined(MBEDTLS_COMPILER_IS_GCC) && (MBEDTLS_GCC_VERSION >= 40504) && \
56 ((MBEDTLS_GCC_VERSION < 90300) || (!defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)))
57/*
58 * Old versions of gcc, depending on how the target is specified, may generate a branch to memcpy
59 * for calls like `memcpy(dest, src, 4)` rather than generating some LDR or LDRB instructions
60 * (similar for stores).
61 * Recent versions where unaligned access is not enabled also do this.
62 *
63 * For performance (and code size, in some cases), we want to avoid the branch and just generate
64 * some inline load/store instructions since the access is small and constant-size.
65 *
66 * The manual states:
67 * "The aligned attribute specifies a minimum alignment for the variable or structure field,
68 * measured in bytes."
69 * https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html
70 *
71 * Tested with several versions of GCC from 4.5.0 up to 9.3.0
72 * We don't enable for older than 4.5.0 as this has not been tested.
73 */
74 #define UINT_UNALIGNED
75typedef uint16_t __attribute__((__aligned__(1))) mbedtls_uint16_unaligned_t;
76typedef uint32_t __attribute__((__aligned__(1))) mbedtls_uint32_unaligned_t;
77typedef uint64_t __attribute__((__aligned__(1))) mbedtls_uint64_unaligned_t;
78 #endif
79
Dave Rodgman55b5dd22024-01-19 14:06:52 +000080/*
81 * We try to force mbedtls_(get|put)_unaligned_uintXX to be always inline, because this results
82 * in code that is both smaller and faster. IAR and gcc both benefit from this when optimising
83 * for size.
84 */
85
Dave Rodgman96d61d12022-11-24 19:33:22 +000086/**
Dave Rodgmana360e192022-11-28 14:44:05 +000087 * Read the unsigned 16 bits integer from the given address, which need not
88 * be aligned.
89 *
90 * \param p pointer to 2 bytes of data
91 * \return Data at the given address
92 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +000093#if defined(__IAR_SYSTEMS_ICC__)
94#pragma inline = forced
95#elif defined(__GNUC__)
96__attribute__((always_inline))
97#endif
98static inline uint16_t mbedtls_get_unaligned_uint16(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +000099{
100 uint16_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000101#if defined(UINT_UNALIGNED)
102 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
103 r = *p16;
104#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100105 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000106#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000107 return r;
108}
109
110/**
111 * Write the unsigned 16 bits integer to the given address, which need not
112 * be aligned.
113 *
114 * \param p pointer to 2 bytes of data
115 * \param x data to write
116 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000117#if defined(__IAR_SYSTEMS_ICC__)
118#pragma inline = forced
119#elif defined(__GNUC__)
120__attribute__((always_inline))
121#endif
122static inline void mbedtls_put_unaligned_uint16(void *p, uint16_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000123{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000124#if defined(UINT_UNALIGNED)
125 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
126 *p16 = x;
127#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100128 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000129#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000130}
131
132/**
Dave Rodgman96d61d12022-11-24 19:33:22 +0000133 * Read the unsigned 32 bits integer from the given address, which need not
134 * be aligned.
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000135 *
Dave Rodgman96d61d12022-11-24 19:33:22 +0000136 * \param p pointer to 4 bytes of data
Dave Rodgman875d2382022-11-24 20:43:15 +0000137 * \return Data at the given address
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000138 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000139#if defined(__IAR_SYSTEMS_ICC__)
140#pragma inline = forced
141#elif defined(__GNUC__)
142__attribute__((always_inline))
143#endif
144static inline uint32_t mbedtls_get_unaligned_uint32(const void *p)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000145{
146 uint32_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000147#if defined(UINT_UNALIGNED)
148 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
149 r = *p32;
150#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100151 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000152#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000153 return r;
154}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000155
Dave Rodgman96d61d12022-11-24 19:33:22 +0000156/**
157 * Write the unsigned 32 bits integer to the given address, which need not
158 * be aligned.
159 *
160 * \param p pointer to 4 bytes of data
161 * \param x data to write
162 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000163#if defined(__IAR_SYSTEMS_ICC__)
164#pragma inline = forced
165#elif defined(__GNUC__)
166__attribute__((always_inline))
167#endif
168static inline void mbedtls_put_unaligned_uint32(void *p, uint32_t x)
Dave Rodgman96d61d12022-11-24 19:33:22 +0000169{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000170#if defined(UINT_UNALIGNED)
171 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
172 *p32 = x;
173#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100174 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000175#endif
Dave Rodgman96d61d12022-11-24 19:33:22 +0000176}
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000177
Dave Rodgmana360e192022-11-28 14:44:05 +0000178/**
179 * Read the unsigned 64 bits integer from the given address, which need not
180 * be aligned.
181 *
182 * \param p pointer to 8 bytes of data
183 * \return Data at the given address
184 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000185#if defined(__IAR_SYSTEMS_ICC__)
186#pragma inline = forced
187#elif defined(__GNUC__)
188__attribute__((always_inline))
189#endif
190static inline uint64_t mbedtls_get_unaligned_uint64(const void *p)
Dave Rodgmana360e192022-11-28 14:44:05 +0000191{
192 uint64_t r;
Dave Rodgmanc5812642024-01-19 14:04:28 +0000193#if defined(UINT_UNALIGNED)
194 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
195 r = *p64;
196#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100197 memcpy(&r, p, sizeof(r));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000198#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000199 return r;
200}
201
202/**
203 * Write the unsigned 64 bits integer to the given address, which need not
204 * be aligned.
205 *
206 * \param p pointer to 8 bytes of data
207 * \param x data to write
208 */
Dave Rodgman55b5dd22024-01-19 14:06:52 +0000209#if defined(__IAR_SYSTEMS_ICC__)
210#pragma inline = forced
211#elif defined(__GNUC__)
212__attribute__((always_inline))
213#endif
214static inline void mbedtls_put_unaligned_uint64(void *p, uint64_t x)
Dave Rodgmana360e192022-11-28 14:44:05 +0000215{
Dave Rodgmanc5812642024-01-19 14:04:28 +0000216#if defined(UINT_UNALIGNED)
217 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
218 *p64 = x;
219#else
Gilles Peskine449bd832023-01-11 14:50:10 +0100220 memcpy(p, &x, sizeof(x));
Dave Rodgmanc5812642024-01-19 14:04:28 +0000221#endif
Dave Rodgmana360e192022-11-28 14:44:05 +0000222}
223
Dave Rodgmanc5812642024-01-19 14:04:28 +0000224#if defined(MBEDTLS_POP_IAR_LANGUAGE_PRAGMA)
225#pragma language=restore
226#endif
227
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000228/** Byte Reading Macros
229 *
230 * Given a multi-byte integer \p x, MBEDTLS_BYTE_n retrieves the n-th
231 * byte from x, where byte 0 is the least significant byte.
232 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100233#define MBEDTLS_BYTE_0(x) ((uint8_t) ((x) & 0xff))
Dave Rodgman914c6322023-03-01 09:30:14 +0000234#define MBEDTLS_BYTE_1(x) ((uint8_t) (((x) >> 8) & 0xff))
Gilles Peskine449bd832023-01-11 14:50:10 +0100235#define MBEDTLS_BYTE_2(x) ((uint8_t) (((x) >> 16) & 0xff))
236#define MBEDTLS_BYTE_3(x) ((uint8_t) (((x) >> 24) & 0xff))
237#define MBEDTLS_BYTE_4(x) ((uint8_t) (((x) >> 32) & 0xff))
238#define MBEDTLS_BYTE_5(x) ((uint8_t) (((x) >> 40) & 0xff))
239#define MBEDTLS_BYTE_6(x) ((uint8_t) (((x) >> 48) & 0xff))
240#define MBEDTLS_BYTE_7(x) ((uint8_t) (((x) >> 56) & 0xff))
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000241
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000242/*
243 * Detect GCC built-in byteswap routines
244 */
245#if defined(__GNUC__) && defined(__GNUC_PREREQ)
Gilles Peskine449bd832023-01-11 14:50:10 +0100246#if __GNUC_PREREQ(4, 8)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000247#define MBEDTLS_BSWAP16 __builtin_bswap16
248#endif /* __GNUC_PREREQ(4,8) */
Gilles Peskine449bd832023-01-11 14:50:10 +0100249#if __GNUC_PREREQ(4, 3)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000250#define MBEDTLS_BSWAP32 __builtin_bswap32
251#define MBEDTLS_BSWAP64 __builtin_bswap64
252#endif /* __GNUC_PREREQ(4,3) */
253#endif /* defined(__GNUC__) && defined(__GNUC_PREREQ) */
254
255/*
256 * Detect Clang built-in byteswap routines
257 */
258#if defined(__clang__) && defined(__has_builtin)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000259#if __has_builtin(__builtin_bswap16) && !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000260#define MBEDTLS_BSWAP16 __builtin_bswap16
261#endif /* __has_builtin(__builtin_bswap16) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000262#if __has_builtin(__builtin_bswap32) && !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000263#define MBEDTLS_BSWAP32 __builtin_bswap32
264#endif /* __has_builtin(__builtin_bswap32) */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000265#if __has_builtin(__builtin_bswap64) && !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000266#define MBEDTLS_BSWAP64 __builtin_bswap64
267#endif /* __has_builtin(__builtin_bswap64) */
268#endif /* defined(__clang__) && defined(__has_builtin) */
269
270/*
271 * Detect MSVC built-in byteswap routines
272 */
273#if defined(_MSC_VER)
Dave Rodgmane47899d2023-02-28 17:39:03 +0000274#if !defined(MBEDTLS_BSWAP16)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000275#define MBEDTLS_BSWAP16 _byteswap_ushort
Dave Rodgmane47899d2023-02-28 17:39:03 +0000276#endif
277#if !defined(MBEDTLS_BSWAP32)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000278#define MBEDTLS_BSWAP32 _byteswap_ulong
Dave Rodgmane47899d2023-02-28 17:39:03 +0000279#endif
280#if !defined(MBEDTLS_BSWAP64)
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000281#define MBEDTLS_BSWAP64 _byteswap_uint64
Dave Rodgmane47899d2023-02-28 17:39:03 +0000282#endif
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000283#endif /* defined(_MSC_VER) */
284
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000285/* Detect armcc built-in byteswap routine */
Dave Rodgmane47899d2023-02-28 17:39:03 +0000286#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 410000) && !defined(MBEDTLS_BSWAP32)
Tom Cosgrovef2b5a132023-04-26 17:00:12 +0100287#if defined(__ARM_ACLE) /* ARM Compiler 6 - earlier versions don't need a header */
288#include <arm_acle.h>
289#endif
Dave Rodgman2dae4b32022-11-30 12:07:36 +0000290#define MBEDTLS_BSWAP32 __rev
291#endif
292
Dave Rodgman650674b2023-12-05 12:16:48 +0000293/* Detect IAR built-in byteswap routine */
294#if defined(__IAR_SYSTEMS_ICC__)
295#if defined(__ARM_ACLE)
296#include <arm_acle.h>
297#define MBEDTLS_BSWAP16(x) ((uint16_t) __rev16((uint32_t) (x)))
298#define MBEDTLS_BSWAP32 __rev
299#define MBEDTLS_BSWAP64 __revll
300#endif
301#endif
302
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000303/*
304 * Where compiler built-ins are not present, fall back to C code that the
305 * compiler may be able to detect and transform into the relevant bswap or
306 * similar instruction.
307 */
308#if !defined(MBEDTLS_BSWAP16)
Gilles Peskine449bd832023-01-11 14:50:10 +0100309static inline uint16_t mbedtls_bswap16(uint16_t x)
310{
Dave Rodgman6298b242022-11-28 14:51:49 +0000311 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100312 (x & 0x00ff) << 8 |
313 (x & 0xff00) >> 8;
Dave Rodgman6298b242022-11-28 14:51:49 +0000314}
315#define MBEDTLS_BSWAP16 mbedtls_bswap16
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000316#endif /* !defined(MBEDTLS_BSWAP16) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000317
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000318#if !defined(MBEDTLS_BSWAP32)
Gilles Peskine449bd832023-01-11 14:50:10 +0100319static inline uint32_t mbedtls_bswap32(uint32_t x)
320{
Dave Rodgman6298b242022-11-28 14:51:49 +0000321 return
Gilles Peskine449bd832023-01-11 14:50:10 +0100322 (x & 0x000000ff) << 24 |
323 (x & 0x0000ff00) << 8 |
324 (x & 0x00ff0000) >> 8 |
325 (x & 0xff000000) >> 24;
Dave Rodgman6298b242022-11-28 14:51:49 +0000326}
327#define MBEDTLS_BSWAP32 mbedtls_bswap32
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000328#endif /* !defined(MBEDTLS_BSWAP32) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000329
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000330#if !defined(MBEDTLS_BSWAP64)
Gilles Peskine449bd832023-01-11 14:50:10 +0100331static inline uint64_t mbedtls_bswap64(uint64_t x)
332{
Dave Rodgman6298b242022-11-28 14:51:49 +0000333 return
Tom Cosgrovebbe166e2023-03-08 13:23:24 +0000334 (x & 0x00000000000000ffULL) << 56 |
335 (x & 0x000000000000ff00ULL) << 40 |
336 (x & 0x0000000000ff0000ULL) << 24 |
337 (x & 0x00000000ff000000ULL) << 8 |
338 (x & 0x000000ff00000000ULL) >> 8 |
339 (x & 0x0000ff0000000000ULL) >> 24 |
340 (x & 0x00ff000000000000ULL) >> 40 |
341 (x & 0xff00000000000000ULL) >> 56;
Dave Rodgman6298b242022-11-28 14:51:49 +0000342}
343#define MBEDTLS_BSWAP64 mbedtls_bswap64
Dave Rodgmanf7f1f742022-11-28 14:52:45 +0000344#endif /* !defined(MBEDTLS_BSWAP64) */
Dave Rodgman6298b242022-11-28 14:51:49 +0000345
Dave Rodgmane5c42592022-11-28 14:47:46 +0000346#if !defined(__BYTE_ORDER__)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000347
348#if defined(__LITTLE_ENDIAN__)
349/* IAR defines __xxx_ENDIAN__, but not __BYTE_ORDER__ */
350#define MBEDTLS_IS_BIG_ENDIAN 0
351#elif defined(__BIG_ENDIAN__)
352#define MBEDTLS_IS_BIG_ENDIAN 1
353#else
Dave Rodgmane5c42592022-11-28 14:47:46 +0000354static const uint16_t mbedtls_byte_order_detector = { 0x100 };
355#define MBEDTLS_IS_BIG_ENDIAN (*((unsigned char *) (&mbedtls_byte_order_detector)) == 0x01)
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000356#endif
357
Dave Rodgmane5c42592022-11-28 14:47:46 +0000358#else
Dave Rodgmanf3c04f32023-12-05 12:06:11 +0000359
360#if (__BYTE_ORDER__) == (__ORDER_BIG_ENDIAN__)
361#define MBEDTLS_IS_BIG_ENDIAN 1
362#else
363#define MBEDTLS_IS_BIG_ENDIAN 0
364#endif
365
Dave Rodgmane5c42592022-11-28 14:47:46 +0000366#endif /* !defined(__BYTE_ORDER__) */
367
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000368/**
369 * Get the unsigned 32 bits integer corresponding to four bytes in
370 * big-endian order (MSB first).
371 *
372 * \param data Base address of the memory to get the four bytes from.
373 * \param offset Offset from \p data of the first and most significant
374 * byte of the four bytes to build the 32 bits unsigned
375 * integer from.
376 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000377#define MBEDTLS_GET_UINT32_BE(data, offset) \
378 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000379 ? mbedtls_get_unaligned_uint32((data) + (offset)) \
380 : MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000381 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000382
383/**
384 * Put in memory a 32 bits unsigned integer in big-endian order.
385 *
386 * \param n 32 bits unsigned integer to put in memory.
387 * \param data Base address of the memory where to put the 32
388 * bits unsigned integer in.
389 * \param offset Offset from \p data where to put the most significant
390 * byte of the 32 bits unsigned integer \p n.
391 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000392#define MBEDTLS_PUT_UINT32_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100393 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000394 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100395 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000396 mbedtls_put_unaligned_uint32((data) + (offset), (uint32_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100397 } \
398 else \
399 { \
400 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
401 } \
402 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000403
404/**
405 * Get the unsigned 32 bits integer corresponding to four bytes in
406 * little-endian order (LSB first).
407 *
408 * \param data Base address of the memory to get the four bytes from.
409 * \param offset Offset from \p data of the first and least significant
410 * byte of the four bytes to build the 32 bits unsigned
411 * integer from.
412 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000413#define MBEDTLS_GET_UINT32_LE(data, offset) \
414 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000415 ? MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
416 : mbedtls_get_unaligned_uint32((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000417 )
Dave Rodgmana5110b02022-11-28 14:48:45 +0000418
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000419
420/**
421 * Put in memory a 32 bits unsigned integer in little-endian order.
422 *
423 * \param n 32 bits unsigned integer to put in memory.
424 * \param data Base address of the memory where to put the 32
425 * bits unsigned integer in.
426 * \param offset Offset from \p data where to put the least significant
427 * byte of the 32 bits unsigned integer \p n.
428 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000429#define MBEDTLS_PUT_UINT32_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100430 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000431 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100432 { \
433 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
434 } \
435 else \
436 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000437 mbedtls_put_unaligned_uint32((data) + (offset), ((uint32_t) (n))); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100438 } \
439 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000440
441/**
442 * Get the unsigned 16 bits integer corresponding to two bytes in
443 * little-endian order (LSB first).
444 *
445 * \param data Base address of the memory to get the two bytes from.
446 * \param offset Offset from \p data of the first and least significant
447 * byte of the two bytes to build the 16 bits unsigned
448 * integer from.
449 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000450#define MBEDTLS_GET_UINT16_LE(data, offset) \
451 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000452 ? MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
453 : mbedtls_get_unaligned_uint16((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000454 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000455
456/**
457 * Put in memory a 16 bits unsigned integer in little-endian order.
458 *
459 * \param n 16 bits unsigned integer to put in memory.
460 * \param data Base address of the memory where to put the 16
461 * bits unsigned integer in.
462 * \param offset Offset from \p data where to put the least significant
463 * byte of the 16 bits unsigned integer \p n.
464 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000465#define MBEDTLS_PUT_UINT16_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100466 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000467 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100468 { \
469 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
470 } \
471 else \
472 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000473 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100474 } \
475 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000476
477/**
478 * Get the unsigned 16 bits integer corresponding to two bytes in
479 * big-endian order (MSB first).
480 *
481 * \param data Base address of the memory to get the two bytes from.
482 * \param offset Offset from \p data of the first and most significant
483 * byte of the two bytes to build the 16 bits unsigned
484 * integer from.
485 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000486#define MBEDTLS_GET_UINT16_BE(data, offset) \
487 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000488 ? mbedtls_get_unaligned_uint16((data) + (offset)) \
489 : MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000490 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000491
492/**
493 * Put in memory a 16 bits unsigned integer in big-endian order.
494 *
495 * \param n 16 bits unsigned integer to put in memory.
496 * \param data Base address of the memory where to put the 16
497 * bits unsigned integer in.
498 * \param offset Offset from \p data where to put the most significant
499 * byte of the 16 bits unsigned integer \p n.
500 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000501#define MBEDTLS_PUT_UINT16_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100502 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000503 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100504 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000505 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100506 } \
507 else \
508 { \
509 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
510 } \
511 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000512
513/**
514 * Get the unsigned 24 bits integer corresponding to three bytes in
515 * big-endian order (MSB first).
516 *
517 * \param data Base address of the memory to get the three bytes from.
518 * \param offset Offset from \p data of the first and most significant
519 * byte of the three bytes to build the 24 bits unsigned
520 * integer from.
521 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000522#define MBEDTLS_GET_UINT24_BE(data, offset) \
523 ( \
524 ((uint32_t) (data)[(offset)] << 16) \
525 | ((uint32_t) (data)[(offset) + 1] << 8) \
526 | ((uint32_t) (data)[(offset) + 2]) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000527 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000528
529/**
530 * Put in memory a 24 bits unsigned integer in big-endian order.
531 *
532 * \param n 24 bits unsigned integer to put in memory.
533 * \param data Base address of the memory where to put the 24
534 * bits unsigned integer in.
535 * \param offset Offset from \p data where to put the most significant
536 * byte of the 24 bits unsigned integer \p n.
537 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100538#define MBEDTLS_PUT_UINT24_BE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000539 { \
540 (data)[(offset)] = MBEDTLS_BYTE_2(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100541 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
542 (data)[(offset) + 2] = MBEDTLS_BYTE_0(n); \
543 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000544
545/**
546 * Get the unsigned 24 bits integer corresponding to three bytes in
547 * little-endian order (LSB first).
548 *
549 * \param data Base address of the memory to get the three bytes from.
550 * \param offset Offset from \p data of the first and least significant
551 * byte of the three bytes to build the 24 bits unsigned
552 * integer from.
553 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000554#define MBEDTLS_GET_UINT24_LE(data, offset) \
555 ( \
556 ((uint32_t) (data)[(offset)]) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100557 | ((uint32_t) (data)[(offset) + 1] << 8) \
558 | ((uint32_t) (data)[(offset) + 2] << 16) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000559 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000560
561/**
562 * Put in memory a 24 bits unsigned integer in little-endian order.
563 *
564 * \param n 24 bits unsigned integer to put in memory.
565 * \param data Base address of the memory where to put the 24
566 * bits unsigned integer in.
567 * \param offset Offset from \p data where to put the least significant
568 * byte of the 24 bits unsigned integer \p n.
569 */
Gilles Peskine449bd832023-01-11 14:50:10 +0100570#define MBEDTLS_PUT_UINT24_LE(n, data, offset) \
Dave Rodgman914c6322023-03-01 09:30:14 +0000571 { \
572 (data)[(offset)] = MBEDTLS_BYTE_0(n); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100573 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
574 (data)[(offset) + 2] = MBEDTLS_BYTE_2(n); \
575 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000576
577/**
578 * Get the unsigned 64 bits integer corresponding to eight bytes in
579 * big-endian order (MSB first).
580 *
581 * \param data Base address of the memory to get the eight bytes from.
582 * \param offset Offset from \p data of the first and most significant
583 * byte of the eight bytes to build the 64 bits unsigned
584 * integer from.
585 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000586#define MBEDTLS_GET_UINT64_BE(data, offset) \
587 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000588 ? mbedtls_get_unaligned_uint64((data) + (offset)) \
589 : MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000590 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000591
592/**
593 * Put in memory a 64 bits unsigned integer in big-endian order.
594 *
595 * \param n 64 bits unsigned integer to put in memory.
596 * \param data Base address of the memory where to put the 64
597 * bits unsigned integer in.
598 * \param offset Offset from \p data where to put the most significant
599 * byte of the 64 bits unsigned integer \p n.
600 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000601#define MBEDTLS_PUT_UINT64_BE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100602 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000603 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100604 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000605 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100606 } \
607 else \
608 { \
609 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
610 } \
611 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000612
613/**
614 * Get the unsigned 64 bits integer corresponding to eight bytes in
615 * little-endian order (LSB first).
616 *
617 * \param data Base address of the memory to get the eight bytes from.
618 * \param offset Offset from \p data of the first and least significant
619 * byte of the eight bytes to build the 64 bits unsigned
620 * integer from.
621 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000622#define MBEDTLS_GET_UINT64_LE(data, offset) \
623 ((MBEDTLS_IS_BIG_ENDIAN) \
Dave Rodgmana5110b02022-11-28 14:48:45 +0000624 ? MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
625 : mbedtls_get_unaligned_uint64((data) + (offset)) \
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000626 )
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000627
628/**
629 * Put in memory a 64 bits unsigned integer in little-endian order.
630 *
631 * \param n 64 bits unsigned integer to put in memory.
632 * \param data Base address of the memory where to put the 64
633 * bits unsigned integer in.
634 * \param offset Offset from \p data where to put the least significant
635 * byte of the 64 bits unsigned integer \p n.
636 */
Dave Rodgman914c6322023-03-01 09:30:14 +0000637#define MBEDTLS_PUT_UINT64_LE(n, data, offset) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100638 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000639 if (MBEDTLS_IS_BIG_ENDIAN) \
Gilles Peskine449bd832023-01-11 14:50:10 +0100640 { \
641 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
642 } \
643 else \
644 { \
Dave Rodgman914c6322023-03-01 09:30:14 +0000645 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
Gilles Peskine449bd832023-01-11 14:50:10 +0100646 } \
647 }
Dave Rodgmanfbc23222022-11-24 18:07:37 +0000648
649#endif /* MBEDTLS_LIBRARY_ALIGNMENT_H */