blob: eaaa5b5c3f248b7723cf07e4563f48e0dbc5b0e9 [file] [log] [blame]
Jerry Yu49231312023-01-10 16:57:21 +08001/*
Dave Rodgmanf918d422023-03-17 17:52:23 +00002 * Armv8-A Cryptographic Extension support functions for Aarch64
Jerry Yu49231312023-01-10 16:57:21 +08003 *
4 * Copyright The Mbed TLS Contributors
Dave Rodgman16799db2023-11-02 19:47:20 +00005 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
Jerry Yu49231312023-01-10 16:57:21 +08006 */
7
Dave Rodgman27e3c872023-10-08 10:29:26 +01008#if defined(__clang__) && (__clang_major__ >= 4)
9
Dave Rodgman9fd1b522023-10-10 15:23:44 +010010/* Ideally, we would simply use MBEDTLS_ARCH_IS_ARMV8_A in the following #if,
Dave Rodgman27e3c872023-10-08 10:29:26 +010011 * but that is defined by build_info.h, and we need this block to happen first. */
12#if defined(__ARM_ARCH)
13#if __ARM_ARCH >= 8
Dave Rodgman9fd1b522023-10-10 15:23:44 +010014#define MBEDTLS_AESCE_ARCH_IS_ARMV8_A
Dave Rodgman27e3c872023-10-08 10:29:26 +010015#endif
16#endif
17
Dave Rodgman9fd1b522023-10-10 15:23:44 +010018#if defined(MBEDTLS_AESCE_ARCH_IS_ARMV8_A) && !defined(__ARM_FEATURE_CRYPTO)
Jerry Yu48b999c2023-03-03 15:51:07 +080019/* TODO: Re-consider above after https://reviews.llvm.org/D131064 merged.
20 *
21 * The intrinsic declaration are guarded by predefined ACLE macros in clang:
22 * these are normally only enabled by the -march option on the command line.
23 * By defining the macros ourselves we gain access to those declarations without
24 * requiring -march on the command line.
25 *
Dave Rodgmana0f10da2023-09-05 11:43:17 +010026 * `arm_neon.h` is included by common.h, so we put these defines
Jerry Yu48b999c2023-03-03 15:51:07 +080027 * at the top of this file, before any includes.
28 */
29#define __ARM_FEATURE_CRYPTO 1
Jerry Yuae129c32023-03-03 15:55:56 +080030/* See: https://arm-software.github.io/acle/main/acle.html#cryptographic-extensions
31 *
Jerry Yu490bf082023-03-06 15:21:44 +080032 * `__ARM_FEATURE_CRYPTO` is deprecated, but we need to continue to specify it
33 * for older compilers.
Jerry Yuae129c32023-03-03 15:55:56 +080034 */
35#define __ARM_FEATURE_AES 1
Dave Rodgmandb6ab242023-03-14 16:03:57 +000036#define MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG
Jerry Yu490bf082023-03-06 15:21:44 +080037#endif
Jerry Yu48b999c2023-03-03 15:51:07 +080038
Dave Rodgman27e3c872023-10-08 10:29:26 +010039#endif /* defined(__clang__) && (__clang_major__ >= 4) */
40
Jerry Yu49231312023-01-10 16:57:21 +080041#include <string.h>
42#include "common.h"
43
44#if defined(MBEDTLS_AESCE_C)
45
46#include "aesce.h"
47
Dave Rodgman410ad442023-11-28 13:42:17 +000048#if defined(MBEDTLS_AESCE_HAVE_CODE)
Jerry Yu49231312023-01-10 16:57:21 +080049
Jerry Yu61c4cfa2023-04-26 11:06:51 +080050/* Compiler version checks. */
Jerry Yudb368de2023-04-26 16:55:37 +080051#if defined(__clang__)
Dave Rodgman48b965d2023-10-09 12:19:44 +010052# if defined(MBEDTLS_ARCH_IS_ARM32) && (__clang_major__ < 11)
Dave Rodgmanb34fe8b2023-10-10 09:52:46 +010053# error "Minimum version of Clang for MBEDTLS_AESCE_C on 32-bit Arm or Thumb is 11.0."
Dave Rodgman48b965d2023-10-09 12:19:44 +010054# elif defined(MBEDTLS_ARCH_IS_ARM64) && (__clang_major__ < 4)
55# error "Minimum version of Clang for MBEDTLS_AESCE_C on aarch64 is 4.0."
Jerry Yudb368de2023-04-26 16:55:37 +080056# endif
57#elif defined(__GNUC__)
58# if __GNUC__ < 6
59# error "Minimum version of GCC for MBEDTLS_AESCE_C is 6.0."
60# endif
61#elif defined(_MSC_VER)
Jerry Yu61c4cfa2023-04-26 11:06:51 +080062/* TODO: We haven't verified MSVC from 1920 to 1928. If someone verified that,
63 * please update this and document of `MBEDTLS_AESCE_C` in
64 * `mbedtls_config.h`. */
Jerry Yudb368de2023-04-26 16:55:37 +080065# if _MSC_VER < 1929
66# error "Minimum version of MSVC for MBEDTLS_AESCE_C is 2019 version 16.11.2."
67# endif
Dave Rodgman4b8e8dc2023-10-08 21:41:40 +010068#elif defined(__ARMCC_VERSION)
69# if defined(MBEDTLS_ARCH_IS_ARM32) && (__ARMCC_VERSION < 6200002)
70/* TODO: We haven't verified armclang for 32-bit Arm/Thumb prior to 6.20.
71 * If someone verified that, please update this and document of
72 * `MBEDTLS_AESCE_C` in `mbedtls_config.h`. */
73# error "Minimum version of armclang for MBEDTLS_AESCE_C on 32-bit Arm is 6.20."
74# elif defined(MBEDTLS_ARCH_IS_ARM64) && (__ARMCC_VERSION < 6060000)
75# error "Minimum version of armclang for MBEDTLS_AESCE_C on aarch64 is 6.6."
76# endif
Jerry Yu61c4cfa2023-04-26 11:06:51 +080077#endif
78
Jerry Yu580e06f2023-04-28 17:42:40 +080079#if !(defined(__ARM_FEATURE_CRYPTO) || defined(__ARM_FEATURE_AES)) || \
80 defined(MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG)
Jerry Yub1d06bb2023-05-05 14:05:07 +080081# if defined(__ARMCOMPILER_VERSION)
82# if __ARMCOMPILER_VERSION <= 6090000
83# error "Must use minimum -march=armv8-a+crypto for MBEDTLS_AESCE_C"
84# else
Jerry Yu893be8d2023-07-13 17:32:11 +080085# pragma clang attribute push (__attribute__((target("aes"))), apply_to=function)
Jerry Yub1d06bb2023-05-05 14:05:07 +080086# define MBEDTLS_POP_TARGET_PRAGMA
87# endif
88# elif defined(__clang__)
Jerry Yu893be8d2023-07-13 17:32:11 +080089# pragma clang attribute push (__attribute__((target("aes"))), apply_to=function)
Jerry Yuec9be842023-03-14 10:42:47 +080090# define MBEDTLS_POP_TARGET_PRAGMA
91# elif defined(__GNUC__)
Jerry Yuec9be842023-03-14 10:42:47 +080092# pragma GCC push_options
Beniamin Sandu471a9752023-06-25 20:16:16 +030093# pragma GCC target ("+crypto")
Jerry Yuec9be842023-03-14 10:42:47 +080094# define MBEDTLS_POP_TARGET_PRAGMA
Jerry Yu07d28d82023-03-20 18:12:36 +080095# elif defined(_MSC_VER)
Jerry Yu61c4cfa2023-04-26 11:06:51 +080096# error "Required feature(__ARM_FEATURE_AES) is not enabled."
Jerry Yu49231312023-01-10 16:57:21 +080097# endif
Jerry Yu580e06f2023-04-28 17:42:40 +080098#endif /* !(__ARM_FEATURE_CRYPTO || __ARM_FEATURE_AES) ||
99 MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG */
Jerry Yu49231312023-01-10 16:57:21 +0800100
Dave Rodgman45661322023-08-04 12:31:58 +0100101#if defined(__linux__) && !defined(MBEDTLS_AES_USE_HARDWARE_ONLY)
102
Jerry Yub95c7762023-01-10 16:59:51 +0800103#include <sys/auxv.h>
Dave Rodgman5e419372023-10-23 15:30:20 +0100104#if !defined(HWCAP_NEON)
105#define HWCAP_NEON (1 << 12)
106#endif
107#if !defined(HWCAP2_AES)
108#define HWCAP2_AES (1 << 0)
109#endif
110#if !defined(HWCAP_AES)
111#define HWCAP_AES (1 << 3)
112#endif
113#if !defined(HWCAP_ASIMD)
114#define HWCAP_ASIMD (1 << 1)
115#endif
Dave Rodgman45661322023-08-04 12:31:58 +0100116
Dave Rodgmanb30adce2023-08-04 12:52:51 +0100117signed char mbedtls_aesce_has_support_result = -1;
Jerry Yub95c7762023-01-10 16:59:51 +0800118
Jerry Yu36606232023-04-19 10:44:29 +0800119#if !defined(MBEDTLS_AES_USE_HARDWARE_ONLY)
Jerry Yub95c7762023-01-10 16:59:51 +0800120/*
121 * AES instruction support detection routine
122 */
Dave Rodgman45661322023-08-04 12:31:58 +0100123int mbedtls_aesce_has_support_impl(void)
Jerry Yub95c7762023-01-10 16:59:51 +0800124{
Dave Rodgman45661322023-08-04 12:31:58 +0100125 /* To avoid many calls to getauxval, cache the result. This is
126 * thread-safe, because we store the result in a char so cannot
127 * be vulnerable to non-atomic updates.
128 * It is possible that we could end up setting result more than
129 * once, but that is harmless.
130 */
Dave Rodgmanb30adce2023-08-04 12:52:51 +0100131 if (mbedtls_aesce_has_support_result == -1) {
Dave Rodgman851cf5a2023-10-08 12:26:41 +0100132#if defined(MBEDTLS_ARCH_IS_ARM32)
133 unsigned long auxval = getauxval(AT_HWCAP);
134 unsigned long auxval2 = getauxval(AT_HWCAP2);
135 if (((auxval & HWCAP_NEON) == HWCAP_NEON) &&
136 ((auxval2 & HWCAP2_AES) == HWCAP2_AES)) {
137 mbedtls_aesce_has_support_result = 1;
138 } else {
139 mbedtls_aesce_has_support_result = 0;
140 }
141#else
Dave Rodgman45661322023-08-04 12:31:58 +0100142 unsigned long auxval = getauxval(AT_HWCAP);
143 if ((auxval & (HWCAP_ASIMD | HWCAP_AES)) ==
144 (HWCAP_ASIMD | HWCAP_AES)) {
145 mbedtls_aesce_has_support_result = 1;
146 } else {
147 mbedtls_aesce_has_support_result = 0;
148 }
Dave Rodgman851cf5a2023-10-08 12:26:41 +0100149#endif
Dave Rodgman45661322023-08-04 12:31:58 +0100150 }
151 return mbedtls_aesce_has_support_result;
Jerry Yub95c7762023-01-10 16:59:51 +0800152}
Jerry Yu0d4f4e52023-03-31 14:32:47 +0800153#endif
Jerry Yub95c7762023-01-10 16:59:51 +0800154
Dave Rodgman45661322023-08-04 12:31:58 +0100155#endif /* defined(__linux__) && !defined(MBEDTLS_AES_USE_HARDWARE_ONLY) */
156
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100157/* Single round of AESCE encryption */
158#define AESCE_ENCRYPT_ROUND \
159 block = vaeseq_u8(block, vld1q_u8(keys)); \
160 block = vaesmcq_u8(block); \
161 keys += 16
162/* Two rounds of AESCE encryption */
163#define AESCE_ENCRYPT_ROUND_X2 AESCE_ENCRYPT_ROUND; AESCE_ENCRYPT_ROUND
164
Dave Rodgman9bb7e6f2023-06-16 09:41:21 +0100165MBEDTLS_OPTIMIZE_FOR_PERFORMANCE
Jerry Yu2bb3d812023-01-10 17:38:26 +0800166static uint8x16_t aesce_encrypt_block(uint8x16_t block,
167 unsigned char *keys,
168 int rounds)
169{
Dave Rodgman73b0c0b2023-06-16 14:48:14 +0100170 /* 10, 12 or 14 rounds. Unroll loop. */
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100171 if (rounds == 10) {
172 goto rounds_10;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800173 }
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100174 if (rounds == 12) {
175 goto rounds_12;
176 }
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100177 AESCE_ENCRYPT_ROUND_X2;
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100178rounds_12:
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100179 AESCE_ENCRYPT_ROUND_X2;
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100180rounds_10:
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100181 AESCE_ENCRYPT_ROUND_X2;
182 AESCE_ENCRYPT_ROUND_X2;
183 AESCE_ENCRYPT_ROUND_X2;
184 AESCE_ENCRYPT_ROUND_X2;
185 AESCE_ENCRYPT_ROUND;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800186
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800187 /* AES AddRoundKey for the previous round.
188 * SubBytes, ShiftRows for the final round. */
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100189 block = vaeseq_u8(block, vld1q_u8(keys));
190 keys += 16;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800191
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800192 /* Final round: no MixColumns */
Jerry Yu3304c202023-02-22 14:37:11 +0800193
194 /* Final AddRoundKey */
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100195 block = veorq_u8(block, vld1q_u8(keys));
Jerry Yu2bb3d812023-01-10 17:38:26 +0800196
197 return block;
198}
199
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100200/* Single round of AESCE decryption
201 *
202 * AES AddRoundKey, SubBytes, ShiftRows
203 *
204 * block = vaesdq_u8(block, vld1q_u8(keys));
205 *
206 * AES inverse MixColumns for the next round.
207 *
208 * This means that we switch the order of the inverse AddRoundKey and
209 * inverse MixColumns operations. We have to do this as AddRoundKey is
210 * done in an atomic instruction together with the inverses of SubBytes
211 * and ShiftRows.
212 *
213 * It works because MixColumns is a linear operation over GF(2^8) and
214 * AddRoundKey is an exclusive or, which is equivalent to addition over
215 * GF(2^8). (The inverse of MixColumns needs to be applied to the
216 * affected round keys separately which has been done when the
217 * decryption round keys were calculated.)
218 *
219 * block = vaesimcq_u8(block);
220 */
221#define AESCE_DECRYPT_ROUND \
222 block = vaesdq_u8(block, vld1q_u8(keys)); \
223 block = vaesimcq_u8(block); \
224 keys += 16
225/* Two rounds of AESCE decryption */
226#define AESCE_DECRYPT_ROUND_X2 AESCE_DECRYPT_ROUND; AESCE_DECRYPT_ROUND
227
Yanray Wangb67b4742023-10-31 17:10:32 +0800228#if !defined(MBEDTLS_BLOCK_CIPHER_NO_DECRYPT)
Jerry Yu2bb3d812023-01-10 17:38:26 +0800229static uint8x16_t aesce_decrypt_block(uint8x16_t block,
230 unsigned char *keys,
231 int rounds)
232{
Dave Rodgman73b0c0b2023-06-16 14:48:14 +0100233 /* 10, 12 or 14 rounds. Unroll loop. */
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100234 if (rounds == 10) {
235 goto rounds_10;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800236 }
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100237 if (rounds == 12) {
238 goto rounds_12;
239 }
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100240 AESCE_DECRYPT_ROUND_X2;
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100241rounds_12:
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100242 AESCE_DECRYPT_ROUND_X2;
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100243rounds_10:
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100244 AESCE_DECRYPT_ROUND_X2;
245 AESCE_DECRYPT_ROUND_X2;
246 AESCE_DECRYPT_ROUND_X2;
247 AESCE_DECRYPT_ROUND_X2;
248 AESCE_DECRYPT_ROUND;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800249
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800250 /* The inverses of AES AddRoundKey, SubBytes, ShiftRows finishing up the
251 * last full round. */
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100252 block = vaesdq_u8(block, vld1q_u8(keys));
253 keys += 16;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800254
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800255 /* Inverse AddRoundKey for inverting the initial round key addition. */
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100256 block = veorq_u8(block, vld1q_u8(keys));
Jerry Yu2bb3d812023-01-10 17:38:26 +0800257
258 return block;
259}
Yanray Wang590c9b72023-08-28 15:40:23 +0800260#endif
Jerry Yu2bb3d812023-01-10 17:38:26 +0800261
262/*
263 * AES-ECB block en(de)cryption
264 */
265int mbedtls_aesce_crypt_ecb(mbedtls_aes_context *ctx,
266 int mode,
267 const unsigned char input[16],
268 unsigned char output[16])
269{
270 uint8x16_t block = vld1q_u8(&input[0]);
271 unsigned char *keys = (unsigned char *) (ctx->buf + ctx->rk_offset);
272
Yanray Wang0d76b6e2023-11-02 11:54:39 +0800273#if !defined(MBEDTLS_BLOCK_CIPHER_NO_DECRYPT)
Yanray Wang111159b2023-11-10 13:41:12 +0800274 if (mode == MBEDTLS_AES_DECRYPT) {
Jerry Yu2bb3d812023-01-10 17:38:26 +0800275 block = aesce_decrypt_block(block, keys, ctx->nr);
Yanray Wang111159b2023-11-10 13:41:12 +0800276 } else
Yanray Wangc4347912023-11-14 10:10:49 +0800277#else
278 (void) mode;
Yanray Wang0d76b6e2023-11-02 11:54:39 +0800279#endif
Yanray Wang111159b2023-11-10 13:41:12 +0800280 {
281 block = aesce_encrypt_block(block, keys, ctx->nr);
Jerry Yu2bb3d812023-01-10 17:38:26 +0800282 }
283 vst1q_u8(&output[0], block);
284
285 return 0;
286}
287
Jerry Yue096da12023-01-10 17:07:01 +0800288/*
289 * Compute decryption round keys from encryption round keys
290 */
Yanray Wangb67b4742023-10-31 17:10:32 +0800291#if !defined(MBEDTLS_BLOCK_CIPHER_NO_DECRYPT)
Jerry Yue096da12023-01-10 17:07:01 +0800292void mbedtls_aesce_inverse_key(unsigned char *invkey,
293 const unsigned char *fwdkey,
294 int nr)
295{
296 int i, j;
297 j = nr;
298 vst1q_u8(invkey, vld1q_u8(fwdkey + j * 16));
299 for (i = 1, j--; j > 0; i++, j--) {
300 vst1q_u8(invkey + i * 16,
301 vaesimcq_u8(vld1q_u8(fwdkey + j * 16)));
302 }
303 vst1q_u8(invkey + i * 16, vld1q_u8(fwdkey + j * 16));
304
305}
Yanray Wang590c9b72023-08-28 15:40:23 +0800306#endif
Jerry Yue096da12023-01-10 17:07:01 +0800307
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800308static inline uint32_t aes_rot_word(uint32_t word)
Jerry Yu3f2fb712023-01-10 17:05:42 +0800309{
310 return (word << (32 - 8)) | (word >> 8);
311}
312
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800313static inline uint32_t aes_sub_word(uint32_t in)
Jerry Yu3f2fb712023-01-10 17:05:42 +0800314{
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800315 uint8x16_t v = vreinterpretq_u8_u32(vdupq_n_u32(in));
Jerry Yu3f2fb712023-01-10 17:05:42 +0800316 uint8x16_t zero = vdupq_n_u8(0);
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800317
318 /* vaeseq_u8 does both SubBytes and ShiftRows. Taking the first row yields
319 * the correct result as ShiftRows doesn't change the first row. */
320 v = vaeseq_u8(zero, v);
321 return vgetq_lane_u32(vreinterpretq_u32_u8(v), 0);
Jerry Yu3f2fb712023-01-10 17:05:42 +0800322}
323
324/*
Jerry Yubaae4012023-02-21 15:26:13 +0800325 * Key expansion function
Jerry Yu3f2fb712023-01-10 17:05:42 +0800326 */
Jerry Yubaae4012023-02-21 15:26:13 +0800327static void aesce_setkey_enc(unsigned char *rk,
328 const unsigned char *key,
329 const size_t key_bit_length)
Jerry Yu3f2fb712023-01-10 17:05:42 +0800330{
Jerry Yubaae4012023-02-21 15:26:13 +0800331 static uint8_t const rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10,
332 0x20, 0x40, 0x80, 0x1b, 0x36 };
Jerry Yu947bf962023-02-23 11:07:57 +0800333 /* See https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197.pdf
334 * - Section 5, Nr = Nk + 6
Jerry Yu2c266512023-03-01 11:18:20 +0800335 * - Section 5.2, the length of round keys is Nb*(Nr+1)
Jerry Yu947bf962023-02-23 11:07:57 +0800336 */
337 const uint32_t key_len_in_words = key_bit_length / 32; /* Nk */
338 const size_t round_key_len_in_words = 4; /* Nb */
Jerry Yu2c266512023-03-01 11:18:20 +0800339 const size_t rounds_needed = key_len_in_words + 6; /* Nr */
340 const size_t round_keys_len_in_words =
341 round_key_len_in_words * (rounds_needed + 1); /* Nb*(Nr+1) */
342 const uint32_t *rko_end = (uint32_t *) rk + round_keys_len_in_words;
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800343
Jerry Yu3304c202023-02-22 14:37:11 +0800344 memcpy(rk, key, key_len_in_words * 4);
Jerry Yu3f2fb712023-01-10 17:05:42 +0800345
Jerry Yu3304c202023-02-22 14:37:11 +0800346 for (uint32_t *rki = (uint32_t *) rk;
347 rki + key_len_in_words < rko_end;
348 rki += key_len_in_words) {
349
Dave Rodgmane4a6f5a2023-11-04 12:20:09 +0000350 size_t iteration = (size_t) (rki - (uint32_t *) rk) / key_len_in_words;
Jerry Yu3304c202023-02-22 14:37:11 +0800351 uint32_t *rko;
Jerry Yubaae4012023-02-21 15:26:13 +0800352 rko = rki + key_len_in_words;
353 rko[0] = aes_rot_word(aes_sub_word(rki[key_len_in_words - 1]));
Jerry Yu3304c202023-02-22 14:37:11 +0800354 rko[0] ^= rcon[iteration] ^ rki[0];
Jerry Yu3f2fb712023-01-10 17:05:42 +0800355 rko[1] = rko[0] ^ rki[1];
356 rko[2] = rko[1] ^ rki[2];
357 rko[3] = rko[2] ^ rki[3];
Jerry Yufac5a542023-02-23 10:13:40 +0800358 if (rko + key_len_in_words > rko_end) {
Jerry Yu3304c202023-02-22 14:37:11 +0800359 /* Do not write overflow words.*/
360 continue;
361 }
Yanray Wange2bc1582023-05-08 10:28:53 +0800362#if !defined(MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH)
Jerry Yubaae4012023-02-21 15:26:13 +0800363 switch (key_bit_length) {
Jerry Yu3304c202023-02-22 14:37:11 +0800364 case 128:
365 break;
Jerry Yubaae4012023-02-21 15:26:13 +0800366 case 192:
Jerry Yu3304c202023-02-22 14:37:11 +0800367 rko[4] = rko[3] ^ rki[4];
368 rko[5] = rko[4] ^ rki[5];
Jerry Yubaae4012023-02-21 15:26:13 +0800369 break;
370 case 256:
Jerry Yu3304c202023-02-22 14:37:11 +0800371 rko[4] = aes_sub_word(rko[3]) ^ rki[4];
372 rko[5] = rko[4] ^ rki[5];
373 rko[6] = rko[5] ^ rki[6];
374 rko[7] = rko[6] ^ rki[7];
Jerry Yubaae4012023-02-21 15:26:13 +0800375 break;
Jerry Yu3f2fb712023-01-10 17:05:42 +0800376 }
Yanray Wange2bc1582023-05-08 10:28:53 +0800377#endif /* !MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH */
Jerry Yu3f2fb712023-01-10 17:05:42 +0800378 }
379}
380
381/*
382 * Key expansion, wrapper
383 */
384int mbedtls_aesce_setkey_enc(unsigned char *rk,
385 const unsigned char *key,
386 size_t bits)
387{
388 switch (bits) {
Jerry Yubaae4012023-02-21 15:26:13 +0800389 case 128:
390 case 192:
391 case 256:
Jerry Yuba1e78f2023-02-24 11:18:16 +0800392 aesce_setkey_enc(rk, key, bits);
393 break;
394 default:
395 return MBEDTLS_ERR_AES_INVALID_KEY_LENGTH;
Jerry Yu3f2fb712023-01-10 17:05:42 +0800396 }
397
398 return 0;
399}
400
Jerry Yudf87a122023-01-10 18:17:15 +0800401#if defined(MBEDTLS_GCM_C)
402
Dave Rodgman46267f62023-10-09 14:47:50 +0100403#if defined(MBEDTLS_ARCH_IS_ARM32)
404
405#if defined(__clang__)
406/* On clang for A32/T32, work around some missing intrinsics and types which are listed in
407 * [ACLE](https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#polynomial-1)
408 * These are only required for GCM.
409 */
Dave Rodgman46267f62023-10-09 14:47:50 +0100410#define vreinterpretq_u64_p64(a) ((uint64x2_t) a)
411
412typedef uint8x16_t poly128_t;
413
414static inline poly128_t vmull_p64(poly64_t a, poly64_t b)
Jerry Yu132d0cb2023-03-02 17:35:53 +0800415{
Dave Rodgman46267f62023-10-09 14:47:50 +0100416 poly128_t r;
417 asm ("vmull.p64 %[r], %[a], %[b]" : [r] "=w" (r) : [a] "w" (a), [b] "w" (b) :);
418 return r;
Jerry Yu132d0cb2023-03-02 17:35:53 +0800419}
Dave Rodgman46267f62023-10-09 14:47:50 +0100420
Dave Rodgman90291df2023-10-10 09:51:16 +0100421/* This is set to cause some more missing intrinsics to be defined below */
422#define COMMON_MISSING_INTRINSICS
Dave Rodgman46267f62023-10-09 14:47:50 +0100423
424static inline poly128_t vmull_high_p64(poly64x2_t a, poly64x2_t b)
425{
426 return vmull_p64((poly64_t) (vget_high_u64((uint64x2_t) a)),
427 (poly64_t) (vget_high_u64((uint64x2_t) b)));
428}
429
430#endif /* defined(__clang__) */
431
432static inline uint8x16_t vrbitq_u8(uint8x16_t x)
433{
434 /* There is no vrbitq_u8 instruction in A32/T32, so provide
435 * an equivalent non-Neon implementation. Reverse bit order in each
436 * byte with 4x rbit, rev. */
437 asm ("ldm %[p], { r2-r5 } \n\t"
438 "rbit r2, r2 \n\t"
439 "rev r2, r2 \n\t"
440 "rbit r3, r3 \n\t"
441 "rev r3, r3 \n\t"
442 "rbit r4, r4 \n\t"
443 "rev r4, r4 \n\t"
444 "rbit r5, r5 \n\t"
445 "rev r5, r5 \n\t"
446 "stm %[p], { r2-r5 } \n\t"
447 :
448 /* Output: 16 bytes of memory pointed to by &x */
449 "+m" (*(uint8_t(*)[16]) &x)
450 :
451 [p] "r" (&x)
452 :
453 "r2", "r3", "r4", "r5"
454 );
455 return x;
456}
Dave Rodgman46267f62023-10-09 14:47:50 +0100457
Dave Rodgman90291df2023-10-10 09:51:16 +0100458#endif /* defined(MBEDTLS_ARCH_IS_ARM32) */
Dave Rodgman46267f62023-10-09 14:47:50 +0100459
460#if defined(MBEDTLS_COMPILER_IS_GCC) && __GNUC__ == 5
Jerry Yu132d0cb2023-03-02 17:35:53 +0800461/* Some intrinsics are not available for GCC 5.X. */
Dave Rodgman90291df2023-10-10 09:51:16 +0100462#define COMMON_MISSING_INTRINSICS
463#endif /* MBEDTLS_COMPILER_IS_GCC && __GNUC__ == 5 */
464
465
466#if defined(COMMON_MISSING_INTRINSICS)
467
468/* Missing intrinsics common to both GCC 5, and Clang on 32-bit */
469
470#define vreinterpretq_p64_u8(a) ((poly64x2_t) a)
Jerry Yu132d0cb2023-03-02 17:35:53 +0800471#define vreinterpretq_u8_p128(a) ((uint8x16_t) a)
Dave Rodgman46267f62023-10-09 14:47:50 +0100472
Dave Rodgman90291df2023-10-10 09:51:16 +0100473static inline poly64x1_t vget_low_p64(poly64x2_t a)
Jerry Yu132d0cb2023-03-02 17:35:53 +0800474{
Dave Rodgman90291df2023-10-10 09:51:16 +0100475 uint64x1_t r = vget_low_u64(vreinterpretq_u64_p64(a));
476 return (poly64x1_t) r;
477
Jerry Yu132d0cb2023-03-02 17:35:53 +0800478}
Dave Rodgman90291df2023-10-10 09:51:16 +0100479
480#endif /* COMMON_MISSING_INTRINSICS */
Jerry Yu132d0cb2023-03-02 17:35:53 +0800481
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800482/* vmull_p64/vmull_high_p64 wrappers.
483 *
484 * Older compilers miss some intrinsic functions for `poly*_t`. We use
485 * uint8x16_t and uint8x16x3_t as input/output parameters.
486 */
Dave Rodgman46267f62023-10-09 14:47:50 +0100487#if defined(MBEDTLS_COMPILER_IS_GCC)
Jerry Yu9db4b1f2023-03-21 16:56:43 +0800488/* GCC reports incompatible type error without cast. GCC think poly64_t and
489 * poly64x1_t are different, that is different with MSVC and Clang. */
490#define MBEDTLS_VMULL_P64(a, b) vmull_p64((poly64_t) a, (poly64_t) b)
491#else
492/* MSVC reports `error C2440: 'type cast'` with cast. Clang does not report
493 * error with/without cast. And I think poly64_t and poly64x1_t are same, no
494 * cast for clang also. */
495#define MBEDTLS_VMULL_P64(a, b) vmull_p64(a, b)
Dave Rodgman46267f62023-10-09 14:47:50 +0100496#endif /* MBEDTLS_COMPILER_IS_GCC */
497
Jerry Yudf87a122023-01-10 18:17:15 +0800498static inline uint8x16_t pmull_low(uint8x16_t a, uint8x16_t b)
499{
Jerry Yu9db4b1f2023-03-21 16:56:43 +0800500
Jerry Yudf87a122023-01-10 18:17:15 +0800501 return vreinterpretq_u8_p128(
Jerry Yu9db4b1f2023-03-21 16:56:43 +0800502 MBEDTLS_VMULL_P64(
Dave Rodgmane467d622023-11-03 23:40:31 +0000503 (poly64_t) vget_low_p64(vreinterpretq_p64_u8(a)),
504 (poly64_t) vget_low_p64(vreinterpretq_p64_u8(b))
Jerry Yu9db4b1f2023-03-21 16:56:43 +0800505 ));
Jerry Yudf87a122023-01-10 18:17:15 +0800506}
507
508static inline uint8x16_t pmull_high(uint8x16_t a, uint8x16_t b)
509{
510 return vreinterpretq_u8_p128(
511 vmull_high_p64(vreinterpretq_p64_u8(a),
512 vreinterpretq_p64_u8(b)));
513}
514
Jerry Yuf0526a92023-03-14 15:00:29 +0800515/* GHASH does 128b polynomial multiplication on block in GF(2^128) defined by
Jerry Yu49b43672023-03-13 10:09:34 +0800516 * `x^128 + x^7 + x^2 + x + 1`.
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800517 *
518 * Arm64 only has 64b->128b polynomial multipliers, we need to do 4 64b
519 * multiplies to generate a 128b.
520 *
521 * `poly_mult_128` executes polynomial multiplication and outputs 256b that
522 * represented by 3 128b due to code size optimization.
523 *
524 * Output layout:
525 * | | | |
526 * |------------|-------------|-------------|
527 * | ret.val[0] | h3:h2:00:00 | high 128b |
Jerry Yu8f810602023-03-14 17:28:52 +0800528 * | ret.val[1] | :m2:m1:00 | middle 128b |
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800529 * | ret.val[2] | : :l1:l0 | low 128b |
530 */
Jerry Yudf87a122023-01-10 18:17:15 +0800531static inline uint8x16x3_t poly_mult_128(uint8x16_t a, uint8x16_t b)
532{
533 uint8x16x3_t ret;
Jerry Yu8f810602023-03-14 17:28:52 +0800534 uint8x16_t h, m, l; /* retval high/middle/low */
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800535 uint8x16_t c, d, e;
536
537 h = pmull_high(a, b); /* h3:h2:00:00 = a1*b1 */
538 l = pmull_low(a, b); /* : :l1:l0 = a0*b0 */
539 c = vextq_u8(b, b, 8); /* :c1:c0 = b0:b1 */
540 d = pmull_high(a, c); /* :d2:d1:00 = a1*b0 */
541 e = pmull_low(a, c); /* :e2:e1:00 = a0*b1 */
542 m = veorq_u8(d, e); /* :m2:m1:00 = d + e */
543
544 ret.val[0] = h;
545 ret.val[1] = m;
546 ret.val[2] = l;
Jerry Yudf87a122023-01-10 18:17:15 +0800547 return ret;
548}
549
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800550/*
551 * Modulo reduction.
552 *
553 * See: https://www.researchgate.net/publication/285612706_Implementing_GCM_on_ARMv8
554 *
555 * Section 4.3
556 *
557 * Modular reduction is slightly more complex. Write the GCM modulus as f(z) =
558 * z^128 +r(z), where r(z) = z^7+z^2+z+ 1. The well known approach is to
Jerry Yube4fdef2023-03-15 14:50:42 +0800559 * consider that z^128 ≡r(z) (mod z^128 +r(z)), allowing us to write the 256-bit
560 * operand to be reduced as a(z) = h(z)z^128 +l(z)≡h(z)r(z) + l(z). That is, we
561 * simply multiply the higher part of the operand by r(z) and add it to l(z). If
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800562 * the result is still larger than 128 bits, we reduce again.
563 */
564static inline uint8x16_t poly_mult_reduce(uint8x16x3_t input)
Jerry Yudf87a122023-01-10 18:17:15 +0800565{
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800566 uint8x16_t const ZERO = vdupq_n_u8(0);
Jerry Yu8b6df3f2023-03-21 16:59:13 +0800567
Jerry Yudf87a122023-01-10 18:17:15 +0800568 uint64x2_t r = vreinterpretq_u64_u8(vdupq_n_u8(0x87));
Jerry Yu8b6df3f2023-03-21 16:59:13 +0800569#if defined(__GNUC__)
570 /* use 'asm' as an optimisation barrier to prevent loading MODULO from
571 * memory. It is for GNUC compatible compilers.
572 */
Dave Rodgmancb5c9fb2023-10-10 10:06:02 +0100573 asm volatile ("" : "+w" (r));
Jerry Yu8b6df3f2023-03-21 16:59:13 +0800574#endif
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800575 uint8x16_t const MODULO = vreinterpretq_u8_u64(vshrq_n_u64(r, 64 - 8));
Jerry Yu8f810602023-03-14 17:28:52 +0800576 uint8x16_t h, m, l; /* input high/middle/low 128b */
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800577 uint8x16_t c, d, e, f, g, n, o;
578 h = input.val[0]; /* h3:h2:00:00 */
579 m = input.val[1]; /* :m2:m1:00 */
580 l = input.val[2]; /* : :l1:l0 */
581 c = pmull_high(h, MODULO); /* :c2:c1:00 = reduction of h3 */
582 d = pmull_low(h, MODULO); /* : :d1:d0 = reduction of h2 */
583 e = veorq_u8(c, m); /* :e2:e1:00 = m2:m1:00 + c2:c1:00 */
584 f = pmull_high(e, MODULO); /* : :f1:f0 = reduction of e2 */
585 g = vextq_u8(ZERO, e, 8); /* : :g1:00 = e1:00 */
586 n = veorq_u8(d, l); /* : :n1:n0 = d1:d0 + l1:l0 */
587 o = veorq_u8(n, f); /* o1:o0 = f1:f0 + n1:n0 */
588 return veorq_u8(o, g); /* = o1:o0 + g1:00 */
Jerry Yudf87a122023-01-10 18:17:15 +0800589}
590
591/*
592 * GCM multiplication: c = a times b in GF(2^128)
593 */
594void mbedtls_aesce_gcm_mult(unsigned char c[16],
595 const unsigned char a[16],
596 const unsigned char b[16])
597{
598 uint8x16_t va, vb, vc;
599 va = vrbitq_u8(vld1q_u8(&a[0]));
600 vb = vrbitq_u8(vld1q_u8(&b[0]));
601 vc = vrbitq_u8(poly_mult_reduce(poly_mult_128(va, vb)));
602 vst1q_u8(&c[0], vc);
603}
604
605#endif /* MBEDTLS_GCM_C */
Jerry Yu48b999c2023-03-03 15:51:07 +0800606
607#if defined(MBEDTLS_POP_TARGET_PRAGMA)
608#if defined(__clang__)
609#pragma clang attribute pop
610#elif defined(__GNUC__)
611#pragma GCC pop_options
612#endif
613#undef MBEDTLS_POP_TARGET_PRAGMA
614#endif
615
Dave Rodgman6eee57b2023-11-30 11:01:04 +0000616#endif /* MBEDTLS_AESCE_HAVE_CODE */
Jerry Yu49231312023-01-10 16:57:21 +0800617
618#endif /* MBEDTLS_AESCE_C */