blob: cc0015bc7701abfb2f34805fae9bacd56ad03e5e [file] [log] [blame]
Jerry Yu49231312023-01-10 16:57:21 +08001/*
Dave Rodgmanf918d422023-03-17 17:52:23 +00002 * Armv8-A Cryptographic Extension support functions for Aarch64
Jerry Yu49231312023-01-10 16:57:21 +08003 *
4 * Copyright The Mbed TLS Contributors
5 * SPDX-License-Identifier: Apache-2.0
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License"); you may
8 * not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
Dave Rodgman27e3c872023-10-08 10:29:26 +010020#if defined(__clang__) && (__clang_major__ >= 4)
21
22/* Ideally, we would simply use MBEDTLS_ARCH_IS_ARMV8 in the following #if,
23 * but that is defined by build_info.h, and we need this block to happen first. */
24#if defined(__ARM_ARCH)
25#if __ARM_ARCH >= 8
26#define MBEDTLS_AESCE_ARCH_IS_ARMV8
27#endif
28#endif
29
30#if defined(MBEDTLS_AESCE_ARCH_IS_ARMV8) && !defined(__ARM_FEATURE_CRYPTO)
Jerry Yu48b999c2023-03-03 15:51:07 +080031/* TODO: Re-consider above after https://reviews.llvm.org/D131064 merged.
32 *
33 * The intrinsic declaration are guarded by predefined ACLE macros in clang:
34 * these are normally only enabled by the -march option on the command line.
35 * By defining the macros ourselves we gain access to those declarations without
36 * requiring -march on the command line.
37 *
38 * `arm_neon.h` could be included by any header file, so we put these defines
39 * at the top of this file, before any includes.
40 */
41#define __ARM_FEATURE_CRYPTO 1
Jerry Yuae129c32023-03-03 15:55:56 +080042/* See: https://arm-software.github.io/acle/main/acle.html#cryptographic-extensions
43 *
Jerry Yu490bf082023-03-06 15:21:44 +080044 * `__ARM_FEATURE_CRYPTO` is deprecated, but we need to continue to specify it
45 * for older compilers.
Jerry Yuae129c32023-03-03 15:55:56 +080046 */
47#define __ARM_FEATURE_AES 1
Dave Rodgmandb6ab242023-03-14 16:03:57 +000048#define MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG
Jerry Yu490bf082023-03-06 15:21:44 +080049#endif
Jerry Yu48b999c2023-03-03 15:51:07 +080050
Dave Rodgman27e3c872023-10-08 10:29:26 +010051#endif /* defined(__clang__) && (__clang_major__ >= 4) */
52
Jerry Yu49231312023-01-10 16:57:21 +080053#include <string.h>
54#include "common.h"
55
56#if defined(MBEDTLS_AESCE_C)
57
58#include "aesce.h"
59
Dave Rodgman27e3c872023-10-08 10:29:26 +010060#if defined(MBEDTLS_ARCH_IS_ARMV8)
Jerry Yu49231312023-01-10 16:57:21 +080061
Jerry Yu61c4cfa2023-04-26 11:06:51 +080062/* Compiler version checks. */
Jerry Yudb368de2023-04-26 16:55:37 +080063#if defined(__clang__)
64# if __clang_major__ < 4
65# error "Minimum version of Clang for MBEDTLS_AESCE_C is 4.0."
66# endif
67#elif defined(__GNUC__)
68# if __GNUC__ < 6
69# error "Minimum version of GCC for MBEDTLS_AESCE_C is 6.0."
70# endif
71#elif defined(_MSC_VER)
Jerry Yu61c4cfa2023-04-26 11:06:51 +080072/* TODO: We haven't verified MSVC from 1920 to 1928. If someone verified that,
73 * please update this and document of `MBEDTLS_AESCE_C` in
74 * `mbedtls_config.h`. */
Jerry Yudb368de2023-04-26 16:55:37 +080075# if _MSC_VER < 1929
76# error "Minimum version of MSVC for MBEDTLS_AESCE_C is 2019 version 16.11.2."
77# endif
Jerry Yu61c4cfa2023-04-26 11:06:51 +080078#endif
79
Jerry Yu6b00f5a2023-05-04 16:30:21 +080080#ifdef __ARM_NEON
Jerry Yu08933d32023-04-27 18:28:00 +080081#include <arm_neon.h>
Dave Rodgman27e3c872023-10-08 10:29:26 +010082
83#if defined(MBEDTLS_ARCH_IS_ARM32)
84#if defined(__clang__)
85/* On clang for A32/T32, work around some missing intrinsics and types */
86
87#ifndef vreinterpretq_p64_u8
88#define vreinterpretq_p64_u8 (poly64x2_t)
89#endif
90#ifndef vreinterpretq_u8_p128
91#define vreinterpretq_u8_p128 (uint8x16_t)
92#endif
93#ifndef vreinterpretq_u64_p64
94#define vreinterpretq_u64_p64 (uint64x2_t)
95#endif
96
97typedef uint8x16_t poly128_t;
98
99static inline poly128_t vmull_p64(poly64_t a, poly64_t b)
100{
101 poly128_t r;
102 asm ("vmull.p64 %[r], %[a], %[b]": [r] "=w" (r) : [a] "w" (a), [b] "w" (b) :);
103 return r;
104}
105
106static inline poly64x1_t vget_low_p64(poly64x2_t a)
107{
108 return (poly64x1_t) vget_low_u64(vreinterpretq_u64_p64(a));
109}
110
111static inline poly128_t vmull_high_p64(poly64x2_t a, poly64x2_t b)
112{
113 return vmull_p64((poly64_t) (vget_high_u64((uint64x2_t) a)),
114 (poly64_t) (vget_high_u64((uint64x2_t) b)));
115}
116
117#endif /* defined(__clang__) */
118
119static inline uint8x16_t vrbitq_u8(uint8x16_t x)
120{
121 /* There is no vrbitq_u8 instruction in A32/T32, so provide
122 * an equivalent non-Neon implementation. Reverse bit order in each
123 * byte with 4x rbit, rev. */
124 asm ("ldm %[p], { r2-r5 } \n\t"
125 "rbit r2, r2 \n\t"
126 "rev r2, r2 \n\t"
127 "rbit r3, r3 \n\t"
128 "rev r3, r3 \n\t"
129 "rbit r4, r4 \n\t"
130 "rev r4, r4 \n\t"
131 "rbit r5, r5 \n\t"
132 "rev r5, r5 \n\t"
133 "stm %[p], { r2-r5 } \n\t"
134 :
135 /* Output: 16 bytes of memory pointed to by &x */
136 "+m" (*(uint8_t(*)[16]) &x)
137 :
138 [p] "r" (&x)
139 :
140 "r2", "r3", "r4", "r5"
141 );
142 return x;
143}
144
145#endif /* defined(MBEDTLS_ARCH_IS_ARM32) */
146
Jerry Yu6b00f5a2023-05-04 16:30:21 +0800147#else
148#error "Target does not support NEON instructions"
149#endif
Jerry Yu08933d32023-04-27 18:28:00 +0800150
Jerry Yu580e06f2023-04-28 17:42:40 +0800151#if !(defined(__ARM_FEATURE_CRYPTO) || defined(__ARM_FEATURE_AES)) || \
152 defined(MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG)
Jerry Yub1d06bb2023-05-05 14:05:07 +0800153# if defined(__ARMCOMPILER_VERSION)
154# if __ARMCOMPILER_VERSION <= 6090000
155# error "Must use minimum -march=armv8-a+crypto for MBEDTLS_AESCE_C"
156# else
Jerry Yu893be8d2023-07-13 17:32:11 +0800157# pragma clang attribute push (__attribute__((target("aes"))), apply_to=function)
Jerry Yub1d06bb2023-05-05 14:05:07 +0800158# define MBEDTLS_POP_TARGET_PRAGMA
159# endif
160# elif defined(__clang__)
Jerry Yu893be8d2023-07-13 17:32:11 +0800161# pragma clang attribute push (__attribute__((target("aes"))), apply_to=function)
Jerry Yuec9be842023-03-14 10:42:47 +0800162# define MBEDTLS_POP_TARGET_PRAGMA
163# elif defined(__GNUC__)
Jerry Yuec9be842023-03-14 10:42:47 +0800164# pragma GCC push_options
Beniamin Sandu471a9752023-06-25 20:16:16 +0300165# pragma GCC target ("+crypto")
Jerry Yuec9be842023-03-14 10:42:47 +0800166# define MBEDTLS_POP_TARGET_PRAGMA
Jerry Yu07d28d82023-03-20 18:12:36 +0800167# elif defined(_MSC_VER)
Jerry Yu61c4cfa2023-04-26 11:06:51 +0800168# error "Required feature(__ARM_FEATURE_AES) is not enabled."
Jerry Yu49231312023-01-10 16:57:21 +0800169# endif
Jerry Yu580e06f2023-04-28 17:42:40 +0800170#endif /* !(__ARM_FEATURE_CRYPTO || __ARM_FEATURE_AES) ||
171 MBEDTLS_ENABLE_ARM_CRYPTO_EXTENSIONS_COMPILER_FLAG */
Jerry Yu49231312023-01-10 16:57:21 +0800172
Dave Rodgman45661322023-08-04 12:31:58 +0100173#if defined(__linux__) && !defined(MBEDTLS_AES_USE_HARDWARE_ONLY)
174
Jerry Yub95c7762023-01-10 16:59:51 +0800175#include <asm/hwcap.h>
176#include <sys/auxv.h>
Dave Rodgman45661322023-08-04 12:31:58 +0100177
Dave Rodgmanb30adce2023-08-04 12:52:51 +0100178signed char mbedtls_aesce_has_support_result = -1;
Jerry Yub95c7762023-01-10 16:59:51 +0800179
Jerry Yu36606232023-04-19 10:44:29 +0800180#if !defined(MBEDTLS_AES_USE_HARDWARE_ONLY)
Jerry Yub95c7762023-01-10 16:59:51 +0800181/*
182 * AES instruction support detection routine
183 */
Dave Rodgman45661322023-08-04 12:31:58 +0100184int mbedtls_aesce_has_support_impl(void)
Jerry Yub95c7762023-01-10 16:59:51 +0800185{
Dave Rodgman45661322023-08-04 12:31:58 +0100186 /* To avoid many calls to getauxval, cache the result. This is
187 * thread-safe, because we store the result in a char so cannot
188 * be vulnerable to non-atomic updates.
189 * It is possible that we could end up setting result more than
190 * once, but that is harmless.
191 */
Dave Rodgmanb30adce2023-08-04 12:52:51 +0100192 if (mbedtls_aesce_has_support_result == -1) {
Dave Rodgman45661322023-08-04 12:31:58 +0100193 unsigned long auxval = getauxval(AT_HWCAP);
194 if ((auxval & (HWCAP_ASIMD | HWCAP_AES)) ==
195 (HWCAP_ASIMD | HWCAP_AES)) {
196 mbedtls_aesce_has_support_result = 1;
197 } else {
198 mbedtls_aesce_has_support_result = 0;
199 }
200 }
201 return mbedtls_aesce_has_support_result;
Jerry Yub95c7762023-01-10 16:59:51 +0800202}
Jerry Yu0d4f4e52023-03-31 14:32:47 +0800203#endif
Jerry Yub95c7762023-01-10 16:59:51 +0800204
Dave Rodgman45661322023-08-04 12:31:58 +0100205#endif /* defined(__linux__) && !defined(MBEDTLS_AES_USE_HARDWARE_ONLY) */
206
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100207/* Single round of AESCE encryption */
208#define AESCE_ENCRYPT_ROUND \
209 block = vaeseq_u8(block, vld1q_u8(keys)); \
210 block = vaesmcq_u8(block); \
211 keys += 16
212/* Two rounds of AESCE encryption */
213#define AESCE_ENCRYPT_ROUND_X2 AESCE_ENCRYPT_ROUND; AESCE_ENCRYPT_ROUND
214
Dave Rodgman9bb7e6f2023-06-16 09:41:21 +0100215MBEDTLS_OPTIMIZE_FOR_PERFORMANCE
Jerry Yu2bb3d812023-01-10 17:38:26 +0800216static uint8x16_t aesce_encrypt_block(uint8x16_t block,
217 unsigned char *keys,
218 int rounds)
219{
Dave Rodgman73b0c0b2023-06-16 14:48:14 +0100220 /* 10, 12 or 14 rounds. Unroll loop. */
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100221 if (rounds == 10) {
222 goto rounds_10;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800223 }
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100224 if (rounds == 12) {
225 goto rounds_12;
226 }
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100227 AESCE_ENCRYPT_ROUND_X2;
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100228rounds_12:
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100229 AESCE_ENCRYPT_ROUND_X2;
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100230rounds_10:
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100231 AESCE_ENCRYPT_ROUND_X2;
232 AESCE_ENCRYPT_ROUND_X2;
233 AESCE_ENCRYPT_ROUND_X2;
234 AESCE_ENCRYPT_ROUND_X2;
235 AESCE_ENCRYPT_ROUND;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800236
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800237 /* AES AddRoundKey for the previous round.
238 * SubBytes, ShiftRows for the final round. */
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100239 block = vaeseq_u8(block, vld1q_u8(keys));
240 keys += 16;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800241
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800242 /* Final round: no MixColumns */
Jerry Yu3304c202023-02-22 14:37:11 +0800243
244 /* Final AddRoundKey */
Dave Rodgman96fdfb82023-06-15 16:21:31 +0100245 block = veorq_u8(block, vld1q_u8(keys));
Jerry Yu2bb3d812023-01-10 17:38:26 +0800246
247 return block;
248}
249
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100250/* Single round of AESCE decryption
251 *
252 * AES AddRoundKey, SubBytes, ShiftRows
253 *
254 * block = vaesdq_u8(block, vld1q_u8(keys));
255 *
256 * AES inverse MixColumns for the next round.
257 *
258 * This means that we switch the order of the inverse AddRoundKey and
259 * inverse MixColumns operations. We have to do this as AddRoundKey is
260 * done in an atomic instruction together with the inverses of SubBytes
261 * and ShiftRows.
262 *
263 * It works because MixColumns is a linear operation over GF(2^8) and
264 * AddRoundKey is an exclusive or, which is equivalent to addition over
265 * GF(2^8). (The inverse of MixColumns needs to be applied to the
266 * affected round keys separately which has been done when the
267 * decryption round keys were calculated.)
268 *
269 * block = vaesimcq_u8(block);
270 */
271#define AESCE_DECRYPT_ROUND \
272 block = vaesdq_u8(block, vld1q_u8(keys)); \
273 block = vaesimcq_u8(block); \
274 keys += 16
275/* Two rounds of AESCE decryption */
276#define AESCE_DECRYPT_ROUND_X2 AESCE_DECRYPT_ROUND; AESCE_DECRYPT_ROUND
277
Jerry Yu2bb3d812023-01-10 17:38:26 +0800278static uint8x16_t aesce_decrypt_block(uint8x16_t block,
279 unsigned char *keys,
280 int rounds)
281{
Dave Rodgman73b0c0b2023-06-16 14:48:14 +0100282 /* 10, 12 or 14 rounds. Unroll loop. */
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100283 if (rounds == 10) {
284 goto rounds_10;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800285 }
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100286 if (rounds == 12) {
287 goto rounds_12;
288 }
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100289 AESCE_DECRYPT_ROUND_X2;
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100290rounds_12:
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100291 AESCE_DECRYPT_ROUND_X2;
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100292rounds_10:
Dave Rodgman48fd2ab2023-06-16 09:36:50 +0100293 AESCE_DECRYPT_ROUND_X2;
294 AESCE_DECRYPT_ROUND_X2;
295 AESCE_DECRYPT_ROUND_X2;
296 AESCE_DECRYPT_ROUND_X2;
297 AESCE_DECRYPT_ROUND;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800298
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800299 /* The inverses of AES AddRoundKey, SubBytes, ShiftRows finishing up the
300 * last full round. */
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100301 block = vaesdq_u8(block, vld1q_u8(keys));
302 keys += 16;
Jerry Yu2bb3d812023-01-10 17:38:26 +0800303
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800304 /* Inverse AddRoundKey for inverting the initial round key addition. */
Dave Rodgman1c4451d2023-06-15 16:28:00 +0100305 block = veorq_u8(block, vld1q_u8(keys));
Jerry Yu2bb3d812023-01-10 17:38:26 +0800306
307 return block;
308}
309
310/*
311 * AES-ECB block en(de)cryption
312 */
313int mbedtls_aesce_crypt_ecb(mbedtls_aes_context *ctx,
314 int mode,
315 const unsigned char input[16],
316 unsigned char output[16])
317{
318 uint8x16_t block = vld1q_u8(&input[0]);
319 unsigned char *keys = (unsigned char *) (ctx->buf + ctx->rk_offset);
320
321 if (mode == MBEDTLS_AES_ENCRYPT) {
322 block = aesce_encrypt_block(block, keys, ctx->nr);
323 } else {
324 block = aesce_decrypt_block(block, keys, ctx->nr);
325 }
326 vst1q_u8(&output[0], block);
327
328 return 0;
329}
330
Jerry Yue096da12023-01-10 17:07:01 +0800331/*
332 * Compute decryption round keys from encryption round keys
333 */
334void mbedtls_aesce_inverse_key(unsigned char *invkey,
335 const unsigned char *fwdkey,
336 int nr)
337{
338 int i, j;
339 j = nr;
340 vst1q_u8(invkey, vld1q_u8(fwdkey + j * 16));
341 for (i = 1, j--; j > 0; i++, j--) {
342 vst1q_u8(invkey + i * 16,
343 vaesimcq_u8(vld1q_u8(fwdkey + j * 16)));
344 }
345 vst1q_u8(invkey + i * 16, vld1q_u8(fwdkey + j * 16));
346
347}
348
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800349static inline uint32_t aes_rot_word(uint32_t word)
Jerry Yu3f2fb712023-01-10 17:05:42 +0800350{
351 return (word << (32 - 8)) | (word >> 8);
352}
353
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800354static inline uint32_t aes_sub_word(uint32_t in)
Jerry Yu3f2fb712023-01-10 17:05:42 +0800355{
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800356 uint8x16_t v = vreinterpretq_u8_u32(vdupq_n_u32(in));
Jerry Yu3f2fb712023-01-10 17:05:42 +0800357 uint8x16_t zero = vdupq_n_u8(0);
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800358
359 /* vaeseq_u8 does both SubBytes and ShiftRows. Taking the first row yields
360 * the correct result as ShiftRows doesn't change the first row. */
361 v = vaeseq_u8(zero, v);
362 return vgetq_lane_u32(vreinterpretq_u32_u8(v), 0);
Jerry Yu3f2fb712023-01-10 17:05:42 +0800363}
364
365/*
Jerry Yubaae4012023-02-21 15:26:13 +0800366 * Key expansion function
Jerry Yu3f2fb712023-01-10 17:05:42 +0800367 */
Jerry Yubaae4012023-02-21 15:26:13 +0800368static void aesce_setkey_enc(unsigned char *rk,
369 const unsigned char *key,
370 const size_t key_bit_length)
Jerry Yu3f2fb712023-01-10 17:05:42 +0800371{
Jerry Yubaae4012023-02-21 15:26:13 +0800372 static uint8_t const rcon[] = { 0x01, 0x02, 0x04, 0x08, 0x10,
373 0x20, 0x40, 0x80, 0x1b, 0x36 };
Jerry Yu947bf962023-02-23 11:07:57 +0800374 /* See https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.197.pdf
375 * - Section 5, Nr = Nk + 6
Jerry Yu2c266512023-03-01 11:18:20 +0800376 * - Section 5.2, the length of round keys is Nb*(Nr+1)
Jerry Yu947bf962023-02-23 11:07:57 +0800377 */
378 const uint32_t key_len_in_words = key_bit_length / 32; /* Nk */
379 const size_t round_key_len_in_words = 4; /* Nb */
Jerry Yu2c266512023-03-01 11:18:20 +0800380 const size_t rounds_needed = key_len_in_words + 6; /* Nr */
381 const size_t round_keys_len_in_words =
382 round_key_len_in_words * (rounds_needed + 1); /* Nb*(Nr+1) */
383 const uint32_t *rko_end = (uint32_t *) rk + round_keys_len_in_words;
Jerry Yuc8bcdc82023-02-21 14:49:02 +0800384
Jerry Yu3304c202023-02-22 14:37:11 +0800385 memcpy(rk, key, key_len_in_words * 4);
Jerry Yu3f2fb712023-01-10 17:05:42 +0800386
Jerry Yu3304c202023-02-22 14:37:11 +0800387 for (uint32_t *rki = (uint32_t *) rk;
388 rki + key_len_in_words < rko_end;
389 rki += key_len_in_words) {
390
Jerry Yufac5a542023-02-23 10:13:40 +0800391 size_t iteration = (rki - (uint32_t *) rk) / key_len_in_words;
Jerry Yu3304c202023-02-22 14:37:11 +0800392 uint32_t *rko;
Jerry Yubaae4012023-02-21 15:26:13 +0800393 rko = rki + key_len_in_words;
394 rko[0] = aes_rot_word(aes_sub_word(rki[key_len_in_words - 1]));
Jerry Yu3304c202023-02-22 14:37:11 +0800395 rko[0] ^= rcon[iteration] ^ rki[0];
Jerry Yu3f2fb712023-01-10 17:05:42 +0800396 rko[1] = rko[0] ^ rki[1];
397 rko[2] = rko[1] ^ rki[2];
398 rko[3] = rko[2] ^ rki[3];
Jerry Yufac5a542023-02-23 10:13:40 +0800399 if (rko + key_len_in_words > rko_end) {
Jerry Yu3304c202023-02-22 14:37:11 +0800400 /* Do not write overflow words.*/
401 continue;
402 }
Yanray Wange2bc1582023-05-08 10:28:53 +0800403#if !defined(MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH)
Jerry Yubaae4012023-02-21 15:26:13 +0800404 switch (key_bit_length) {
Jerry Yu3304c202023-02-22 14:37:11 +0800405 case 128:
406 break;
Jerry Yubaae4012023-02-21 15:26:13 +0800407 case 192:
Jerry Yu3304c202023-02-22 14:37:11 +0800408 rko[4] = rko[3] ^ rki[4];
409 rko[5] = rko[4] ^ rki[5];
Jerry Yubaae4012023-02-21 15:26:13 +0800410 break;
411 case 256:
Jerry Yu3304c202023-02-22 14:37:11 +0800412 rko[4] = aes_sub_word(rko[3]) ^ rki[4];
413 rko[5] = rko[4] ^ rki[5];
414 rko[6] = rko[5] ^ rki[6];
415 rko[7] = rko[6] ^ rki[7];
Jerry Yubaae4012023-02-21 15:26:13 +0800416 break;
Jerry Yu3f2fb712023-01-10 17:05:42 +0800417 }
Yanray Wange2bc1582023-05-08 10:28:53 +0800418#endif /* !MBEDTLS_AES_ONLY_128_BIT_KEY_LENGTH */
Jerry Yu3f2fb712023-01-10 17:05:42 +0800419 }
420}
421
422/*
423 * Key expansion, wrapper
424 */
425int mbedtls_aesce_setkey_enc(unsigned char *rk,
426 const unsigned char *key,
427 size_t bits)
428{
429 switch (bits) {
Jerry Yubaae4012023-02-21 15:26:13 +0800430 case 128:
431 case 192:
432 case 256:
Jerry Yuba1e78f2023-02-24 11:18:16 +0800433 aesce_setkey_enc(rk, key, bits);
434 break;
435 default:
436 return MBEDTLS_ERR_AES_INVALID_KEY_LENGTH;
Jerry Yu3f2fb712023-01-10 17:05:42 +0800437 }
438
439 return 0;
440}
441
Jerry Yudf87a122023-01-10 18:17:15 +0800442#if defined(MBEDTLS_GCM_C)
443
Jerry Yu132d0cb2023-03-02 17:35:53 +0800444#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 5
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800445/* Some intrinsics are not available for GCC 5.X. */
Jerry Yu132d0cb2023-03-02 17:35:53 +0800446#define vreinterpretq_p64_u8(a) ((poly64x2_t) a)
447#define vreinterpretq_u8_p128(a) ((uint8x16_t) a)
448static inline poly64_t vget_low_p64(poly64x2_t __a)
449{
450 uint64x2_t tmp = (uint64x2_t) (__a);
451 uint64x1_t lo = vcreate_u64(vgetq_lane_u64(tmp, 0));
452 return (poly64_t) (lo);
453}
454#endif /* !__clang__ && __GNUC__ && __GNUC__ == 5*/
455
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800456/* vmull_p64/vmull_high_p64 wrappers.
457 *
458 * Older compilers miss some intrinsic functions for `poly*_t`. We use
459 * uint8x16_t and uint8x16x3_t as input/output parameters.
460 */
Jerry Yu9db4b1f2023-03-21 16:56:43 +0800461#if defined(__GNUC__) && !defined(__clang__)
462/* GCC reports incompatible type error without cast. GCC think poly64_t and
463 * poly64x1_t are different, that is different with MSVC and Clang. */
464#define MBEDTLS_VMULL_P64(a, b) vmull_p64((poly64_t) a, (poly64_t) b)
465#else
466/* MSVC reports `error C2440: 'type cast'` with cast. Clang does not report
467 * error with/without cast. And I think poly64_t and poly64x1_t are same, no
468 * cast for clang also. */
469#define MBEDTLS_VMULL_P64(a, b) vmull_p64(a, b)
470#endif
Jerry Yudf87a122023-01-10 18:17:15 +0800471static inline uint8x16_t pmull_low(uint8x16_t a, uint8x16_t b)
472{
Jerry Yu9db4b1f2023-03-21 16:56:43 +0800473
Jerry Yudf87a122023-01-10 18:17:15 +0800474 return vreinterpretq_u8_p128(
Jerry Yu9db4b1f2023-03-21 16:56:43 +0800475 MBEDTLS_VMULL_P64(
476 vget_low_p64(vreinterpretq_p64_u8(a)),
477 vget_low_p64(vreinterpretq_p64_u8(b))
478 ));
Jerry Yudf87a122023-01-10 18:17:15 +0800479}
480
481static inline uint8x16_t pmull_high(uint8x16_t a, uint8x16_t b)
482{
483 return vreinterpretq_u8_p128(
484 vmull_high_p64(vreinterpretq_p64_u8(a),
485 vreinterpretq_p64_u8(b)));
486}
487
Jerry Yuf0526a92023-03-14 15:00:29 +0800488/* GHASH does 128b polynomial multiplication on block in GF(2^128) defined by
Jerry Yu49b43672023-03-13 10:09:34 +0800489 * `x^128 + x^7 + x^2 + x + 1`.
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800490 *
491 * Arm64 only has 64b->128b polynomial multipliers, we need to do 4 64b
492 * multiplies to generate a 128b.
493 *
494 * `poly_mult_128` executes polynomial multiplication and outputs 256b that
495 * represented by 3 128b due to code size optimization.
496 *
497 * Output layout:
498 * | | | |
499 * |------------|-------------|-------------|
500 * | ret.val[0] | h3:h2:00:00 | high 128b |
Jerry Yu8f810602023-03-14 17:28:52 +0800501 * | ret.val[1] | :m2:m1:00 | middle 128b |
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800502 * | ret.val[2] | : :l1:l0 | low 128b |
503 */
Jerry Yudf87a122023-01-10 18:17:15 +0800504static inline uint8x16x3_t poly_mult_128(uint8x16_t a, uint8x16_t b)
505{
506 uint8x16x3_t ret;
Jerry Yu8f810602023-03-14 17:28:52 +0800507 uint8x16_t h, m, l; /* retval high/middle/low */
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800508 uint8x16_t c, d, e;
509
510 h = pmull_high(a, b); /* h3:h2:00:00 = a1*b1 */
511 l = pmull_low(a, b); /* : :l1:l0 = a0*b0 */
512 c = vextq_u8(b, b, 8); /* :c1:c0 = b0:b1 */
513 d = pmull_high(a, c); /* :d2:d1:00 = a1*b0 */
514 e = pmull_low(a, c); /* :e2:e1:00 = a0*b1 */
515 m = veorq_u8(d, e); /* :m2:m1:00 = d + e */
516
517 ret.val[0] = h;
518 ret.val[1] = m;
519 ret.val[2] = l;
Jerry Yudf87a122023-01-10 18:17:15 +0800520 return ret;
521}
522
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800523/*
524 * Modulo reduction.
525 *
526 * See: https://www.researchgate.net/publication/285612706_Implementing_GCM_on_ARMv8
527 *
528 * Section 4.3
529 *
530 * Modular reduction is slightly more complex. Write the GCM modulus as f(z) =
531 * z^128 +r(z), where r(z) = z^7+z^2+z+ 1. The well known approach is to
Jerry Yube4fdef2023-03-15 14:50:42 +0800532 * consider that z^128 ≡r(z) (mod z^128 +r(z)), allowing us to write the 256-bit
533 * operand to be reduced as a(z) = h(z)z^128 +l(z)≡h(z)r(z) + l(z). That is, we
534 * simply multiply the higher part of the operand by r(z) and add it to l(z). If
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800535 * the result is still larger than 128 bits, we reduce again.
536 */
537static inline uint8x16_t poly_mult_reduce(uint8x16x3_t input)
Jerry Yudf87a122023-01-10 18:17:15 +0800538{
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800539 uint8x16_t const ZERO = vdupq_n_u8(0);
Jerry Yu8b6df3f2023-03-21 16:59:13 +0800540
Jerry Yudf87a122023-01-10 18:17:15 +0800541 uint64x2_t r = vreinterpretq_u64_u8(vdupq_n_u8(0x87));
Jerry Yu8b6df3f2023-03-21 16:59:13 +0800542#if defined(__GNUC__)
543 /* use 'asm' as an optimisation barrier to prevent loading MODULO from
544 * memory. It is for GNUC compatible compilers.
545 */
Jerry Yudf87a122023-01-10 18:17:15 +0800546 asm ("" : "+w" (r));
Jerry Yu8b6df3f2023-03-21 16:59:13 +0800547#endif
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800548 uint8x16_t const MODULO = vreinterpretq_u8_u64(vshrq_n_u64(r, 64 - 8));
Jerry Yu8f810602023-03-14 17:28:52 +0800549 uint8x16_t h, m, l; /* input high/middle/low 128b */
Jerry Yu1ac7f6b2023-03-07 15:44:59 +0800550 uint8x16_t c, d, e, f, g, n, o;
551 h = input.val[0]; /* h3:h2:00:00 */
552 m = input.val[1]; /* :m2:m1:00 */
553 l = input.val[2]; /* : :l1:l0 */
554 c = pmull_high(h, MODULO); /* :c2:c1:00 = reduction of h3 */
555 d = pmull_low(h, MODULO); /* : :d1:d0 = reduction of h2 */
556 e = veorq_u8(c, m); /* :e2:e1:00 = m2:m1:00 + c2:c1:00 */
557 f = pmull_high(e, MODULO); /* : :f1:f0 = reduction of e2 */
558 g = vextq_u8(ZERO, e, 8); /* : :g1:00 = e1:00 */
559 n = veorq_u8(d, l); /* : :n1:n0 = d1:d0 + l1:l0 */
560 o = veorq_u8(n, f); /* o1:o0 = f1:f0 + n1:n0 */
561 return veorq_u8(o, g); /* = o1:o0 + g1:00 */
Jerry Yudf87a122023-01-10 18:17:15 +0800562}
563
564/*
565 * GCM multiplication: c = a times b in GF(2^128)
566 */
567void mbedtls_aesce_gcm_mult(unsigned char c[16],
568 const unsigned char a[16],
569 const unsigned char b[16])
570{
571 uint8x16_t va, vb, vc;
572 va = vrbitq_u8(vld1q_u8(&a[0]));
573 vb = vrbitq_u8(vld1q_u8(&b[0]));
574 vc = vrbitq_u8(poly_mult_reduce(poly_mult_128(va, vb)));
575 vst1q_u8(&c[0], vc);
576}
577
578#endif /* MBEDTLS_GCM_C */
Jerry Yu48b999c2023-03-03 15:51:07 +0800579
580#if defined(MBEDTLS_POP_TARGET_PRAGMA)
581#if defined(__clang__)
582#pragma clang attribute pop
583#elif defined(__GNUC__)
584#pragma GCC pop_options
585#endif
586#undef MBEDTLS_POP_TARGET_PRAGMA
587#endif
588
Dave Rodgman27e3c872023-10-08 10:29:26 +0100589#endif /* MBEDTLS_ARCH_IS_ARMV8 */
Jerry Yu49231312023-01-10 16:57:21 +0800590
591#endif /* MBEDTLS_AESCE_C */