ext: Pull in tinycrypt v0.2.6
Zephyr 1.9 moves to tinycrypt v0.2.7. This introduces a breaking API
change. This makes things challenging for mcuboot, which would like to
be able to work across multiple platforms.
To help with this, bring in the last working version of Tinycrypt v0.2.6
from https://github.com/01org/tinycrypt. Tinycrypt is released under a
3-clause BSD-style license, with parts under the micro-ecc license,
which is a 2-clause license. Please see ext/tinycrypt/LICENSE for
details.
Signed-off-by: David Brown <david.brown@linaro.org>
diff --git a/ext/tinycrypt/lib/source/aes_decrypt.c b/ext/tinycrypt/lib/source/aes_decrypt.c
new file mode 100644
index 0000000..2e4e3bc
--- /dev/null
+++ b/ext/tinycrypt/lib/source/aes_decrypt.c
@@ -0,0 +1,164 @@
+/* aes_decrypt.c - TinyCrypt implementation of AES decryption procedure */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/aes.h>
+#include <tinycrypt/constants.h>
+#include <tinycrypt/utils.h>
+
+#define ZERO_BYTE 0x00
+
+static const uint8_t inv_sbox[256] = {
+ 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e,
+ 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
+ 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32,
+ 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
+ 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49,
+ 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
+ 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50,
+ 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
+ 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05,
+ 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
+ 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41,
+ 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
+ 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8,
+ 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
+ 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b,
+ 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
+ 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59,
+ 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
+ 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d,
+ 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
+ 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63,
+ 0x55, 0x21, 0x0c, 0x7d
+};
+
+int32_t tc_aes128_set_decrypt_key(TCAesKeySched_t s, const uint8_t *k)
+{
+ return tc_aes128_set_encrypt_key(s, k);
+}
+
+#define mult8(a)(_double_byte(_double_byte(_double_byte(a))))
+#define mult9(a)(mult8(a)^(a))
+#define multb(a)(mult8(a)^_double_byte(a)^(a))
+#define multd(a)(mult8(a)^_double_byte(_double_byte(a))^(a))
+#define multe(a)(mult8(a)^_double_byte(_double_byte(a))^_double_byte(a))
+
+static inline void mult_row_column(uint8_t *out, const uint8_t *in)
+{
+ out[0] = multe(in[0]) ^ multb(in[1]) ^ multd(in[2]) ^ mult9(in[3]);
+ out[1] = mult9(in[0]) ^ multe(in[1]) ^ multb(in[2]) ^ multd(in[3]);
+ out[2] = multd(in[0]) ^ mult9(in[1]) ^ multe(in[2]) ^ multb(in[3]);
+ out[3] = multb(in[0]) ^ multd(in[1]) ^ mult9(in[2]) ^ multe(in[3]);
+}
+
+static inline void inv_mix_columns(uint8_t *s)
+{
+ uint8_t t[Nb*Nk];
+
+ mult_row_column(t, s);
+ mult_row_column(&t[Nb], s+Nb);
+ mult_row_column(&t[2*Nb], s+(2*Nb));
+ mult_row_column(&t[3*Nb], s+(3*Nb));
+ (void)_copy(s, sizeof(t), t, sizeof(t));
+}
+
+static inline void add_round_key(uint8_t *s, const uint32_t *k)
+{
+ s[0] ^= (uint8_t)(k[0] >> 24); s[1] ^= (uint8_t)(k[0] >> 16);
+ s[2] ^= (uint8_t)(k[0] >> 8); s[3] ^= (uint8_t)(k[0]);
+ s[4] ^= (uint8_t)(k[1] >> 24); s[5] ^= (uint8_t)(k[1] >> 16);
+ s[6] ^= (uint8_t)(k[1] >> 8); s[7] ^= (uint8_t)(k[1]);
+ s[8] ^= (uint8_t)(k[2] >> 24); s[9] ^= (uint8_t)(k[2] >> 16);
+ s[10] ^= (uint8_t)(k[2] >> 8); s[11] ^= (uint8_t)(k[2]);
+ s[12] ^= (uint8_t)(k[3] >> 24); s[13] ^= (uint8_t)(k[3] >> 16);
+ s[14] ^= (uint8_t)(k[3] >> 8); s[15] ^= (uint8_t)(k[3]);
+}
+
+static inline void inv_sub_bytes(uint8_t *s)
+{
+ uint32_t i;
+
+ for (i = 0; i < (Nb*Nk); ++i) {
+ s[i] = inv_sbox[s[i]];
+ }
+}
+
+/*
+ * This inv_shift_rows also implements the matrix flip required for
+ * inv_mix_columns, but performs it here to reduce the number of memory
+ * operations.
+ */
+static inline void inv_shift_rows(uint8_t *s)
+{
+ uint8_t t[Nb*Nk];
+
+ t[0] = s[0]; t[1] = s[13]; t[2] = s[10]; t[3] = s[7];
+ t[4] = s[4]; t[5] = s[1]; t[6] = s[14]; t[7] = s[11];
+ t[8] = s[8]; t[9] = s[5]; t[10] = s[2]; t[11] = s[15];
+ t[12] = s[12]; t[13] = s[9]; t[14] = s[6]; t[15] = s[3];
+ (void)_copy(s, sizeof(t), t, sizeof(t));
+}
+
+int32_t tc_aes_decrypt(uint8_t *out, const uint8_t *in, const TCAesKeySched_t s)
+{
+ uint8_t state[Nk*Nb];
+ uint32_t i;
+
+ if (out == (uint8_t *) 0) {
+ return TC_CRYPTO_FAIL;
+ } else if (in == (const uint8_t *) 0) {
+ return TC_CRYPTO_FAIL;
+ } else if (s == (TCAesKeySched_t) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ (void)_copy(state, sizeof(state), in, sizeof(state));
+
+ add_round_key(state, s->words + Nb*Nr);
+
+ for (i = Nr-1; i > 0; --i) {
+ inv_shift_rows(state);
+ inv_sub_bytes(state);
+ add_round_key(state, s->words + Nb*i);
+ inv_mix_columns(state);
+ }
+
+ inv_shift_rows(state);
+ inv_sub_bytes(state);
+ add_round_key(state, s->words);
+
+ (void)_copy(out, sizeof(state), state, sizeof(state));
+ /*zeroing out one byte state buffer */
+ _set(state, ZERO_BYTE, sizeof(state));
+
+ return TC_CRYPTO_SUCCESS;
+}
diff --git a/ext/tinycrypt/lib/source/aes_encrypt.c b/ext/tinycrypt/lib/source/aes_encrypt.c
new file mode 100644
index 0000000..6bc73a5
--- /dev/null
+++ b/ext/tinycrypt/lib/source/aes_encrypt.c
@@ -0,0 +1,191 @@
+/* aes_encrypt.c - TinyCrypt implementation of AES encryption procedure */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/aes.h>
+#include <tinycrypt/utils.h>
+#include <tinycrypt/constants.h>
+
+static const uint8_t sbox[256] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b,
+ 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26,
+ 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,
+ 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed,
+ 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f,
+ 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec,
+ 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14,
+ 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
+ 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f,
+ 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11,
+ 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f,
+ 0xb0, 0x54, 0xbb, 0x16
+};
+
+static inline uint32_t rotword(uint32_t a)
+{
+ return (((a) >> 24)|((a) << 8));
+}
+
+#define subbyte(a, o)(sbox[((a) >> (o))&0xff] << (o))
+#define subword(a)(subbyte(a, 24)|subbyte(a, 16)|subbyte(a, 8)|subbyte(a, 0))
+
+int32_t tc_aes128_set_encrypt_key(TCAesKeySched_t s, const uint8_t *k)
+{
+ const uint32_t rconst[11] = {
+ 0x00000000, 0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000,
+ 0x20000000, 0x40000000, 0x80000000, 0x1b000000, 0x36000000
+ };
+ uint32_t i;
+ uint32_t t;
+
+ if (s == (TCAesKeySched_t) 0) {
+ return TC_CRYPTO_FAIL;
+ } else if (k == (const uint8_t *) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ for (i = 0; i < Nk; ++i) {
+ s->words[i] = (k[Nb*i]<<24) | (k[Nb*i+1]<<16) |
+ (k[Nb*i+2]<<8) | (k[Nb*i+3]);
+ }
+
+ for (; i < (Nb*(Nr+1)); ++i) {
+ t = s->words[i-1];
+ if ((i % Nk) == 0) {
+ t = subword(rotword(t)) ^ rconst[i/Nk];
+ }
+ s->words[i] = s->words[i-Nk] ^ t;
+ }
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+static inline void add_round_key(uint8_t *s, const uint32_t *k)
+{
+ s[0] ^= (uint8_t)(k[0] >> 24); s[1] ^= (uint8_t)(k[0] >> 16);
+ s[2] ^= (uint8_t)(k[0] >> 8); s[3] ^= (uint8_t)(k[0]);
+ s[4] ^= (uint8_t)(k[1] >> 24); s[5] ^= (uint8_t)(k[1] >> 16);
+ s[6] ^= (uint8_t)(k[1] >> 8); s[7] ^= (uint8_t)(k[1]);
+ s[8] ^= (uint8_t)(k[2] >> 24); s[9] ^= (uint8_t)(k[2] >> 16);
+ s[10] ^= (uint8_t)(k[2] >> 8); s[11] ^= (uint8_t)(k[2]);
+ s[12] ^= (uint8_t)(k[3] >> 24); s[13] ^= (uint8_t)(k[3] >> 16);
+ s[14] ^= (uint8_t)(k[3] >> 8); s[15] ^= (uint8_t)(k[3]);
+}
+
+static inline void sub_bytes(uint8_t *s)
+{
+ uint32_t i;
+
+ for (i = 0; i < (Nb*Nk); ++i) {
+ s[i] = sbox[s[i]];
+ }
+}
+
+#define triple(a)(_double_byte(a)^(a))
+
+static inline void mult_row_column(uint8_t *out, const uint8_t *in)
+{
+ out[0] = _double_byte(in[0]) ^ triple(in[1]) ^ in[2] ^ in[3];
+ out[1] = in[0] ^ _double_byte(in[1]) ^ triple(in[2]) ^ in[3];
+ out[2] = in[0] ^ in[1] ^ _double_byte(in[2]) ^ triple(in[3]);
+ out[3] = triple(in[0]) ^ in[1] ^ in[2] ^ _double_byte(in[3]);
+}
+
+static inline void mix_columns(uint8_t *s)
+{
+ uint8_t t[Nb*Nk];
+
+ mult_row_column(t, s);
+ mult_row_column(&t[Nb], s+Nb);
+ mult_row_column(&t[2*Nb], s+(2*Nb));
+ mult_row_column(&t[3*Nb], s+(3*Nb));
+ (void) _copy(s, sizeof(t), t, sizeof(t));
+}
+
+/*
+ * This shift_rows also implements the matrix flip required for mix_columns, but
+ * performs it here to reduce the number of memory operations.
+ */
+static inline void shift_rows(uint8_t *s)
+{
+ uint8_t t[Nb*Nk];
+
+ t[0] = s[0]; t[1] = s[5]; t[2] = s[10]; t[3] = s[15];
+ t[4] = s[4]; t[5] = s[9]; t[6] = s[14]; t[7] = s[3];
+ t[8] = s[8]; t[9] = s[13]; t[10] = s[2]; t[11] = s[7];
+ t[12] = s[12]; t[13] = s[1]; t[14] = s[6]; t[15] = s[11];
+ (void) _copy(s, sizeof(t), t, sizeof(t));
+}
+
+int32_t tc_aes_encrypt(uint8_t *out, const uint8_t *in, const TCAesKeySched_t s)
+{
+ uint8_t state[Nk*Nb];
+ uint32_t i;
+
+ if (out == (uint8_t *) 0) {
+ return TC_CRYPTO_FAIL;
+ } else if (in == (const uint8_t *) 0) {
+ return TC_CRYPTO_FAIL;
+ } else if (s == (TCAesKeySched_t) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ (void)_copy(state, sizeof(state), in, sizeof(state));
+ add_round_key(state, s->words);
+
+ for (i = 0; i < (Nr-1); ++i) {
+ sub_bytes(state);
+ shift_rows(state);
+ mix_columns(state);
+ add_round_key(state, s->words + Nb*(i+1));
+ }
+
+ sub_bytes(state);
+ shift_rows(state);
+ add_round_key(state, s->words + Nb*(i+1));
+
+ (void)_copy(out, sizeof(state), state, sizeof(state));
+
+ /* zeroing out the state buffer */
+ _set(state, TC_ZERO_BYTE, sizeof(state));
+
+ return TC_CRYPTO_SUCCESS;
+}
diff --git a/ext/tinycrypt/lib/source/cbc_mode.c b/ext/tinycrypt/lib/source/cbc_mode.c
new file mode 100644
index 0000000..8163e0d
--- /dev/null
+++ b/ext/tinycrypt/lib/source/cbc_mode.c
@@ -0,0 +1,113 @@
+/* cbc_mode.c - TinyCrypt implementation of CBC mode encryption & decryption */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/cbc_mode.h>
+#include <tinycrypt/constants.h>
+#include <tinycrypt/utils.h>
+
+int32_t tc_cbc_mode_encrypt(uint8_t *out, uint32_t outlen, const uint8_t *in,
+ uint32_t inlen, const uint8_t *iv,
+ const TCAesKeySched_t sched)
+{
+
+ uint8_t buffer[TC_AES_BLOCK_SIZE];
+ uint32_t n, m;
+
+ /* input sanity check: */
+ if (out == (uint8_t *) 0 ||
+ in == (const uint8_t *) 0 ||
+ sched == (TCAesKeySched_t) 0 ||
+ inlen == 0 ||
+ outlen == 0 ||
+ (inlen % TC_AES_BLOCK_SIZE) != 0 ||
+ (outlen % TC_AES_BLOCK_SIZE) != 0 ||
+ outlen != inlen + TC_AES_BLOCK_SIZE) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ /* copy iv to the buffer */
+ (void)_copy(buffer, TC_AES_BLOCK_SIZE, iv, TC_AES_BLOCK_SIZE);
+ /* copy iv to the output buffer */
+ (void)_copy(out, TC_AES_BLOCK_SIZE, iv, TC_AES_BLOCK_SIZE);
+ out += TC_AES_BLOCK_SIZE;
+
+ for (n = m = 0; n < inlen; ++n) {
+ buffer[m++] ^= *in++;
+ if (m == TC_AES_BLOCK_SIZE) {
+ (void)tc_aes_encrypt(buffer, buffer, sched);
+ (void)_copy(out, TC_AES_BLOCK_SIZE,
+ buffer, TC_AES_BLOCK_SIZE);
+ out += TC_AES_BLOCK_SIZE;
+ m = 0;
+ }
+ }
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_cbc_mode_decrypt(uint8_t *out, uint32_t outlen, const uint8_t *in,
+ uint32_t inlen, const uint8_t *iv,
+ const TCAesKeySched_t sched)
+{
+ uint8_t buffer[TC_AES_BLOCK_SIZE];
+ const uint8_t *p;
+ uint32_t n, m;
+
+ /* sanity check the inputs */
+ if (out == (uint8_t *) 0 ||
+ in == (const uint8_t *) 0 ||
+ sched == (TCAesKeySched_t) 0 ||
+ inlen == 0 ||
+ outlen == 0 ||
+ (inlen % TC_AES_BLOCK_SIZE) != 0 ||
+ (outlen % TC_AES_BLOCK_SIZE) != 0 ||
+ outlen != inlen - TC_AES_BLOCK_SIZE) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ /*
+ * Note that in == iv + ciphertext, i.e. the iv and the ciphertext are
+ * contiguous. This allows for a very efficient decryption algorithm
+ * that would not otherwise be possible.
+ */
+ p = iv;
+ for (n = m = 0; n < inlen; ++n) {
+ if ((n % TC_AES_BLOCK_SIZE) == 0) {
+ (void)tc_aes_decrypt(buffer, in, sched);
+ in += TC_AES_BLOCK_SIZE;
+ m = 0;
+ }
+ *out++ = buffer[m++] ^ *p++;
+ }
+
+ return TC_CRYPTO_SUCCESS;
+}
diff --git a/ext/tinycrypt/lib/source/ccm_mode.c b/ext/tinycrypt/lib/source/ccm_mode.c
new file mode 100644
index 0000000..7b6d485
--- /dev/null
+++ b/ext/tinycrypt/lib/source/ccm_mode.c
@@ -0,0 +1,260 @@
+/* ccm_mode.c - TinyCrypt implementation of CCM mode */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/ccm_mode.h>
+#include <tinycrypt/constants.h>
+#include <tinycrypt/utils.h>
+
+#include <stdio.h>
+
+int32_t tc_ccm_config(TCCcmMode_t c, TCAesKeySched_t sched, uint8_t *nonce,
+ uint32_t nlen, uint32_t mlen)
+{
+
+ /* input sanity check: */
+ if (c == (TCCcmMode_t) 0 ||
+ sched == (TCAesKeySched_t) 0 ||
+ nonce == (uint8_t *) 0) {
+ return TC_CRYPTO_FAIL;
+ } else if (nlen != 13) {
+ return TC_CRYPTO_FAIL; /* The allowed nonce size is: 13. See documentation.*/
+ } else if ((mlen < 4) || (mlen > 16) || (mlen & 1)) {
+ return TC_CRYPTO_FAIL; /* The allowed mac sizes are: 4, 6, 8, 10, 12, 14, 16.*/
+ }
+
+ c->mlen = mlen;
+ c->sched = sched;
+ c->nonce = nonce;
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+/**
+ * Variation of CBC-MAC mode used in CCM.
+ */
+static void ccm_cbc_mac(uint8_t *T, const uint8_t *data, uint32_t dlen,
+ uint32_t flag, TCAesKeySched_t sched)
+{
+
+ uint32_t i;
+
+ if (flag > 0) {
+ T[0] ^= (uint8_t)(dlen >> 8);
+ T[1] ^= (uint8_t)(dlen);
+ dlen += 2; i = 2;
+ } else {
+ i = 0;
+ }
+
+ while (i < dlen) {
+ T[i++ % (Nb * Nk)] ^= *data++;
+ if (((i % (Nb * Nk)) == 0) || dlen == i) {
+ (void) tc_aes_encrypt(T, T, sched);
+ }
+ }
+}
+
+/**
+ * Variation of CTR mode used in CCM.
+ * The CTR mode used by CCM is slightly different than the conventional CTR
+ * mode (the counter is increased before encryption, instead of after
+ * encryption). Besides, it is assumed that the counter is stored in the last
+ * 2 bytes of the nonce.
+ */
+static int32_t ccm_ctr_mode(uint8_t *out, uint32_t outlen, const uint8_t *in,
+ uint32_t inlen, uint8_t *ctr, const TCAesKeySched_t sched)
+{
+
+ uint8_t buffer[TC_AES_BLOCK_SIZE];
+ uint8_t nonce[TC_AES_BLOCK_SIZE];
+ uint16_t block_num;
+ uint32_t i;
+
+ /* input sanity check: */
+ if (out == (uint8_t *) 0 ||
+ in == (uint8_t *) 0 ||
+ ctr == (uint8_t *) 0 ||
+ sched == (TCAesKeySched_t) 0 ||
+ inlen == 0 ||
+ outlen == 0 ||
+ outlen != inlen) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ /* copy the counter to the nonce */
+ (void) _copy(nonce, sizeof(nonce), ctr, sizeof(nonce));
+
+ /* select the last 2 bytes of the nonce to be incremented */
+ block_num = (uint16_t) ((nonce[14] << 8)|(nonce[15]));
+ for (i = 0; i < inlen; ++i) {
+ if ((i % (TC_AES_BLOCK_SIZE)) == 0) {
+ block_num++;
+ nonce[14] = (uint8_t)(block_num >> 8);
+ nonce[15] = (uint8_t)(block_num);
+ if (!tc_aes_encrypt(buffer, nonce, sched)) {
+ return TC_CRYPTO_FAIL;
+ }
+ }
+ /* update the output */
+ *out++ = buffer[i % (TC_AES_BLOCK_SIZE)] ^ *in++;
+ }
+
+ /* update the counter */
+ ctr[14] = nonce[14]; ctr[15] = nonce[15];
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_ccm_generation_encryption(uint8_t *out, const uint8_t *associated_data,
+ uint32_t alen, const uint8_t *payload,
+ uint32_t plen, TCCcmMode_t c)
+{
+ /* input sanity check: */
+ if ((out == (uint8_t *) 0) ||
+ (c == (TCCcmMode_t) 0) ||
+ ((plen > 0) && (payload == (uint8_t *) 0)) ||
+ ((alen > 0) && (associated_data == (uint8_t *) 0)) ||
+ (alen >= TC_CCM_AAD_MAX_BYTES) || /* associated data size unsupported */
+ (plen >= TC_CCM_PAYLOAD_MAX_BYTES)) { /* payload size unsupported */
+ return TC_CRYPTO_FAIL;
+ }
+
+ uint8_t b[Nb * Nk];
+ uint8_t tag[Nb * Nk];
+ uint32_t i;
+
+ /* GENERATING THE AUTHENTICATION TAG: */
+
+ /* formatting the sequence b for authentication: */
+ b[0] = ((alen > 0) ? 0x40:0) | (((c->mlen - 2) / 2 << 3)) | (1);
+ for (i = 1; i <= 13; ++i) {
+ b[i] = c->nonce[i - 1];
+ }
+ b[14] = (uint8_t)(plen >> 8);
+ b[15] = (uint8_t)(plen);
+
+ /* computing the authentication tag using cbc-mac: */
+ (void) tc_aes_encrypt(tag, b, c->sched);
+ if (alen > 0) {
+ ccm_cbc_mac(tag, associated_data, alen, 1, c->sched);
+ }
+ if (plen > 0) {
+ ccm_cbc_mac(tag, payload, plen, 0, c->sched);
+ }
+
+ /* ENCRYPTION: */
+
+ /* formatting the sequence b for encryption: */
+ b[0] = 1; /* q - 1 = 2 - 1 = 1 */
+ b[14] = b[15] = TC_ZERO_BYTE;
+
+ /* encrypting payload using ctr mode: */
+ ccm_ctr_mode(out, plen, payload, plen, b, c->sched);
+
+ b[14] = b[15] = TC_ZERO_BYTE; /* restoring initial counter for ctr_mode (0):*/
+
+ /* encrypting b and adding the tag to the output: */
+ (void) tc_aes_encrypt(b, b, c->sched);
+ out += plen;
+ for (i = 0; i < c->mlen; ++i) {
+ *out++ = tag[i] ^ b[i];
+ }
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_ccm_decryption_verification(uint8_t *out, const uint8_t *associated_data,
+ uint32_t alen, const uint8_t *payload,
+ uint32_t plen, TCCcmMode_t c)
+{
+ /* input sanity check: */
+ if ((out == (uint8_t *) 0) ||
+ (c == (TCCcmMode_t) 0) ||
+ ((plen > 0) && (payload == (uint8_t *) 0)) ||
+ ((alen > 0) && (associated_data == (uint8_t *) 0)) ||
+ (alen >= TC_CCM_AAD_MAX_BYTES) || /* associated data size unsupported */
+ (plen >= TC_CCM_PAYLOAD_MAX_BYTES)) { /* payload size unsupported */
+ return TC_CRYPTO_FAIL;
+ }
+
+ uint8_t b[Nb * Nk];
+ uint8_t tag[Nb * Nk];
+ uint32_t i;
+
+ /* DECRYPTION: */
+
+ /* formatting the sequence b for decryption: */
+ b[0] = 1; /* q - 1 = 2 - 1 = 1 */
+ for (i = 1; i < 14; ++i) {
+ b[i] = c->nonce[i - 1];
+ }
+ b[14] = b[15] = TC_ZERO_BYTE; /* initial counter value is 0 */
+
+ /* decrypting payload using ctr mode: */
+ ccm_ctr_mode(out, plen - c->mlen, payload, plen - c->mlen, b, c->sched);
+
+ b[14] = b[15] = TC_ZERO_BYTE; /* restoring initial counter value (0) */
+
+ /* encrypting b and restoring the tag from input: */
+ (void) tc_aes_encrypt(b, b, c->sched);
+ for (i = 0; i < c->mlen; ++i) {
+ tag[i] = *(payload + plen - c->mlen + i) ^ b[i];
+ }
+
+ /* VERIFYING THE AUTHENTICATION TAG: */
+
+ /* formatting the sequence b for authentication: */
+ b[0] = ((alen > 0) ? 0x40:0)|(((c->mlen - 2) / 2 << 3)) | (1);
+ for (i = 1; i < 14; ++i) {
+ b[i] = c->nonce[i - 1];
+ }
+ b[14] = (uint8_t)((plen - c->mlen) >> 8);
+ b[15] = (uint8_t)(plen - c->mlen);
+
+ /* computing the authentication tag using cbc-mac: */
+ (void) tc_aes_encrypt(b, b, c->sched);
+ if (alen > 0) {
+ ccm_cbc_mac(b, associated_data, alen, 1, c->sched);
+ }
+ if (plen > 0) {
+ ccm_cbc_mac(b, out, plen - c->mlen, 0, c->sched);
+ }
+
+ /* comparing the received tag and the computed one: */
+ if (_compare(b, tag, c->mlen) != 0) {
+ /* erase the decrypted buffer in case of mac validation failure: */
+ _set(out, 0, sizeof(*out));
+ return TC_CRYPTO_FAIL;
+ }
+
+ return TC_CRYPTO_SUCCESS;
+}
diff --git a/ext/tinycrypt/lib/source/cmac_mode.c b/ext/tinycrypt/lib/source/cmac_mode.c
new file mode 100644
index 0000000..3b31c3e
--- /dev/null
+++ b/ext/tinycrypt/lib/source/cmac_mode.c
@@ -0,0 +1,254 @@
+/* cmac_mode.c - TinyCrypt CMAC mode implementation */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/aes.h>
+#include <tinycrypt/cmac_mode.h>
+#include <tinycrypt/constants.h>
+#include <tinycrypt/utils.h>
+
+/* max number of calls until change the key (2^48).*/
+static uint64_t MAX_CALLS = ((uint64_t)1 << 48);
+
+/*
+ * gf_wrap -- In our implementation, GF(2^128) is represented as a 16 byte
+ * array with byte 0 the most significant and byte 15 the least significant.
+ * High bit carry reduction is based on the primitive polynomial
+ *
+ * X^128 + X^7 + X^2 + X + 1,
+ *
+ * which leads to the reduction formula X^128 = X^7 + X^2 + X + 1. Indeed,
+ * since 0 = (X^128 + X^7 + X^2 + 1) mod (X^128 + X^7 + X^2 + X + 1) and since
+ * addition of polynomials with coefficients in Z/Z(2) is just XOR, we can
+ * add X^128 to both sides to get
+ *
+ * X^128 = (X^7 + X^2 + X + 1) mod (X^128 + X^7 + X^2 + X + 1)
+ *
+ * and the coefficients of the polynomial on the right hand side form the
+ * string 1000 0111 = 0x87, which is the value of gf_wrap.
+ *
+ * This gets used in the following way. Doubling in GF(2^128) is just a left
+ * shift by 1 bit, except when the most significant bit is 1. In the latter
+ * case, the relation X^128 = X^7 + X^2 + X + 1 says that the high order bit
+ * that overflows beyond 128 bits can be replaced by addition of
+ * X^7 + X^2 + X + 1 <--> 0x87 to the low order 128 bits. Since addition
+ * in GF(2^128) is represented by XOR, we therefore only have to XOR 0x87
+ * into the low order byte after a left shift when the starting high order
+ * bit is 1.
+ */
+const unsigned char gf_wrap = 0x87;
+
+/*
+ * assumes: out != NULL and points to a GF(2^n) value to receive the
+ * doubled value;
+ * in != NULL and points to a 16 byte GF(2^n) value
+ * to double;
+ * the in and out buffers do not overlap.
+ * effects: doubles the GF(2^n) value pointed to by "in" and places
+ * the result in the GF(2^n) value pointed to by "out."
+ */
+void gf_double(uint8_t *out, uint8_t *in)
+{
+
+ /* start with low order byte */
+ uint8_t *x = in + (TC_AES_BLOCK_SIZE - 1);
+
+ /* if msb == 1, we need to add the gf_wrap value, otherwise add 0 */
+ uint8_t carry = (in[0] >> 7) ? gf_wrap : 0;
+
+ out += (TC_AES_BLOCK_SIZE - 1);
+ for (;;) {
+ *out-- = (*x << 1) ^ carry;
+ if (x == in) {
+ break;
+ }
+ carry = *x-- >> 7;
+ }
+}
+
+int32_t tc_cmac_setup(TCCmacState_t s, const uint8_t *key, TCAesKeySched_t sched)
+{
+
+ /* input sanity check: */
+ if (s == (TCCmacState_t) 0 ||
+ key == (const uint8_t *) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ /* put s into a known state */
+ _set(s, 0, sizeof(*s));
+ s->sched = sched;
+
+ /* configure the encryption key used by the underlying block cipher */
+ tc_aes128_set_encrypt_key(s->sched, key);
+
+ /* compute s->K1 and s->K2 from s->iv using s->keyid */
+ _set(s->iv, 0, TC_AES_BLOCK_SIZE);
+ tc_aes_encrypt(s->iv, s->iv, s->sched);
+ gf_double (s->K1, s->iv);
+ gf_double (s->K2, s->K1);
+
+ /* reset s->iv to 0 in case someone wants to compute now */
+ tc_cmac_init(s);
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_cmac_erase(TCCmacState_t s)
+{
+ if (s == (TCCmacState_t) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ /* destroy the current state */
+ _set(s, 0, sizeof(*s));
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_cmac_init(TCCmacState_t s)
+{
+ /* input sanity check: */
+ if (s == (TCCmacState_t) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ /* CMAC starts with an all zero initialization vector */
+ _set(s->iv, 0, TC_AES_BLOCK_SIZE);
+
+ /* and the leftover buffer is empty */
+ _set(s->leftover, 0, TC_AES_BLOCK_SIZE);
+ s->leftover_offset = 0;
+
+ /* Set countdown to max number of calls allowed before re-keying: */
+ s->countdown = MAX_CALLS;
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_cmac_update(TCCmacState_t s, const uint8_t *data, size_t data_length)
+{
+ uint32_t i;
+
+ /* input sanity check: */
+ if (s == (TCCmacState_t) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+ if (data_length == 0) {
+ return TC_CRYPTO_SUCCESS;
+ }
+ if (data == (const uint8_t *) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ if (s->countdown == 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ s->countdown--;
+
+ if (s->leftover_offset > 0) {
+ /* last data added to s didn't end on a TC_AES_BLOCK_SIZE byte boundary */
+ size_t remaining_space = TC_AES_BLOCK_SIZE - s->leftover_offset;
+
+ if (data_length < remaining_space) {
+ /* still not enough data to encrypt this time either */
+ _copy(&s->leftover[s->leftover_offset], data_length, data, data_length);
+ s->leftover_offset += data_length;
+ return TC_CRYPTO_SUCCESS;
+ }
+ /* leftover block is now full; encrypt it first */
+ _copy(&s->leftover[s->leftover_offset],
+ remaining_space,
+ data,
+ remaining_space);
+ data_length -= remaining_space;
+ data += remaining_space;
+ s->leftover_offset = 0;
+
+ for (i = 0; i < TC_AES_BLOCK_SIZE; ++i) {
+ s->iv[i] ^= s->leftover[i];
+ }
+ tc_aes_encrypt(s->iv, s->iv, s->sched);
+ }
+
+ /* CBC encrypt each (except the last) of the data blocks */
+ while (data_length > TC_AES_BLOCK_SIZE) {
+ for (i = 0; i < TC_AES_BLOCK_SIZE; ++i) {
+ s->iv[i] ^= data[i];
+ }
+ tc_aes_encrypt(s->iv, s->iv, s->sched);
+ data += TC_AES_BLOCK_SIZE;
+ data_length -= TC_AES_BLOCK_SIZE;
+ }
+
+ if (data_length > 0) {
+ /* save leftover data for next time */
+ _copy(s->leftover, data_length, data, data_length);
+ s->leftover_offset = data_length;
+ }
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_cmac_final(uint8_t *tag, TCCmacState_t s)
+{
+ uint8_t *k;
+ uint32_t i;
+
+ /* input sanity check: */
+ if (tag == (uint8_t *) 0 ||
+ s == (TCCmacState_t) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ if (s->leftover_offset == TC_AES_BLOCK_SIZE) {
+ /* the last message block is a full-sized block */
+ k = (uint8_t *) s->K1;
+ } else {
+ /* the final message block is not a full-sized block */
+ size_t remaining = TC_AES_BLOCK_SIZE - s->leftover_offset;
+
+ _set(&s->leftover[s->leftover_offset], 0, remaining);
+ s->leftover[s->leftover_offset] = TC_CMAC_PADDING;
+ k = (uint8_t *) s->K2;
+ }
+ for (i = 0; i < TC_AES_BLOCK_SIZE; ++i) {
+ s->iv[i] ^= s->leftover[i] ^ k[i];
+ }
+
+ tc_aes_encrypt(tag, s->iv, s->sched);
+
+ /* erasing state: */
+ tc_cmac_erase(s);
+
+ return TC_CRYPTO_SUCCESS;
+}
diff --git a/ext/tinycrypt/lib/source/ctr_mode.c b/ext/tinycrypt/lib/source/ctr_mode.c
new file mode 100644
index 0000000..7ba53d0
--- /dev/null
+++ b/ext/tinycrypt/lib/source/ctr_mode.c
@@ -0,0 +1,85 @@
+/* ctr_mode.c - TinyCrypt CTR mode implementation */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/constants.h>
+#include <tinycrypt/ctr_mode.h>
+#include <tinycrypt/utils.h>
+
+int32_t tc_ctr_mode(uint8_t *out, uint32_t outlen, const uint8_t *in,
+ uint32_t inlen, uint8_t *ctr, const TCAesKeySched_t sched)
+{
+
+ uint8_t buffer[TC_AES_BLOCK_SIZE];
+ uint8_t nonce[TC_AES_BLOCK_SIZE];
+ uint32_t block_num;
+ uint32_t i;
+
+ /* input sanity check: */
+ if (out == (uint8_t *) 0 ||
+ in == (uint8_t *) 0 ||
+ ctr == (uint8_t *) 0 ||
+ sched == (TCAesKeySched_t) 0 ||
+ inlen == 0 ||
+ outlen == 0 ||
+ outlen != inlen) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ /* copy the ctr to the nonce */
+ (void)_copy(nonce, sizeof(nonce), ctr, sizeof(nonce));
+
+ /* select the last 4 bytes of the nonce to be incremented */
+ block_num = (nonce[12] << 24) | (nonce[13] << 16) |
+ (nonce[14] << 8) | (nonce[15]);
+ for (i = 0; i < inlen; ++i) {
+ if ((i % (TC_AES_BLOCK_SIZE)) == 0) {
+ /* encrypt data using the current nonce */
+ if (tc_aes_encrypt(buffer, nonce, sched)) {
+ block_num++;
+ nonce[12] = (uint8_t)(block_num >> 24);
+ nonce[13] = (uint8_t)(block_num >> 16);
+ nonce[14] = (uint8_t)(block_num >> 8);
+ nonce[15] = (uint8_t)(block_num);
+ } else {
+ return TC_CRYPTO_FAIL;
+ }
+ }
+ /* update the output */
+ *out++ = buffer[i%(TC_AES_BLOCK_SIZE)] ^ *in++;
+ }
+
+ /* update the counter */
+ ctr[12] = nonce[12]; ctr[13] = nonce[13];
+ ctr[14] = nonce[14]; ctr[15] = nonce[15];
+
+ return TC_CRYPTO_SUCCESS;
+}
diff --git a/ext/tinycrypt/lib/source/ctr_prng.c b/ext/tinycrypt/lib/source/ctr_prng.c
new file mode 100644
index 0000000..bac81d8
--- /dev/null
+++ b/ext/tinycrypt/lib/source/ctr_prng.c
@@ -0,0 +1,308 @@
+/* ctr_prng.c - TinyCrypt implementation of CTR-PRNG */
+
+/*
+ * Copyright (c) 2016, Chris Morrison
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/ctr_prng.h>
+#include <tinycrypt/utils.h>
+#include <tinycrypt/constants.h>
+#include <string.h>
+
+/*
+ * This PRNG is based on the CTR_DRBG described in Recommendation for Random
+ * Number Generation Using Deterministic Random Bit Generators,
+ * NIST SP 800-90A Rev. 1.
+ *
+ * Annotations to particular steps (e.g. 10.2.1.2 Step 1) refer to the steps
+ * described in that document.
+ *
+ */
+
+/**
+ * @brief Array incrementer
+ * Treats the supplied array as one contiguous number (MSB in arr[0]), and
+ * increments it by one
+ * @return none
+ * @param arr IN/OUT -- array to be incremented
+ * @param len IN -- size of arr in bytes
+ */
+static void arrInc(uint8_t arr[], uint32_t len)
+{
+ uint32_t i;
+ if (0 != arr)
+ {
+ for (i = len; i > 0U; i--)
+ {
+ if (++arr[i-1] != 0U)
+ {
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * @brief CTR PRNG update
+ * Updates the internal state of supplied the CTR PRNG context
+ * increments it by one
+ * @return none
+ * @note Assumes: providedData is (TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE) bytes long
+ * @param ctx IN/OUT -- CTR PRNG state
+ * @param providedData IN -- data used when updating the internal state
+ */
+static void tc_ctr_prng_update(TCCtrPrng_t * const ctx, uint8_t const * const providedData)
+{
+ if (0 != ctx)
+ {
+ /* 10.2.1.2 step 1 */
+ uint8_t temp[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE];
+ uint32_t len = 0U;
+
+ /* 10.2.1.2 step 2 */
+ while (len < sizeof temp)
+ {
+ uint32_t blocklen = sizeof(temp) - len;
+ uint8_t output_block[TC_AES_BLOCK_SIZE];
+
+ /* 10.2.1.2 step 2.1 */
+ arrInc(ctx->V, sizeof ctx->V);
+
+ /* 10.2.1.2 step 2.2 */
+ if (blocklen > TC_AES_BLOCK_SIZE)
+ {
+ blocklen = TC_AES_BLOCK_SIZE;
+ }
+ (void)tc_aes_encrypt(output_block, ctx->V, &ctx->key);
+
+ /* 10.2.1.2 step 2.3/step 3 */
+ memcpy(&(temp[len]), output_block, blocklen);
+
+ len += blocklen;
+ }
+
+ /* 10.2.1.2 step 4 */
+ if (0 != providedData)
+ {
+ uint32_t i;
+ for (i = 0U; i < sizeof temp; i++)
+ {
+ temp[i] ^= providedData[i];
+ }
+ }
+
+ /* 10.2.1.2 step 5 */
+ (void)tc_aes128_set_encrypt_key(&ctx->key, temp);
+
+ /* 10.2.1.2 step 6 */
+ memcpy(ctx->V, &(temp[TC_AES_KEY_SIZE]), TC_AES_BLOCK_SIZE);
+ }
+}
+
+int32_t tc_ctr_prng_init(TCCtrPrng_t * const ctx,
+ uint8_t const * const entropy,
+ uint32_t entropyLen,
+ uint8_t const * const personalization,
+ uint32_t pLen)
+{
+ int32_t result = TC_CRYPTO_FAIL;
+ uint32_t i;
+ uint8_t personalization_buf[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE] = {0U};
+ uint8_t seed_material[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE];
+ uint8_t zeroArr[TC_AES_BLOCK_SIZE] = {0U};
+
+ if (0 != personalization)
+ {
+ /* 10.2.1.3.1 step 1 */
+ uint32_t len = pLen;
+ if (len > sizeof personalization_buf)
+ {
+ len = sizeof personalization_buf;
+ }
+
+ /* 10.2.1.3.1 step 2 */
+ memcpy(personalization_buf, personalization, len);
+ }
+
+ if ((0 != ctx) && (0 != entropy) && (entropyLen >= sizeof seed_material))
+ {
+ /* 10.2.1.3.1 step 3 */
+ memcpy(seed_material, entropy, sizeof seed_material);
+ for (i = 0U; i < sizeof seed_material; i++)
+ {
+ seed_material[i] ^= personalization_buf[i];
+ }
+
+ /* 10.2.1.3.1 step 4 */
+ (void)tc_aes128_set_encrypt_key(&ctx->key, zeroArr);
+
+ /* 10.2.1.3.1 step 5 */
+ memset(ctx->V, 0x00, sizeof ctx->V);
+
+ /* 10.2.1.3.1 step 6 */
+ tc_ctr_prng_update(ctx, seed_material);
+
+ /* 10.2.1.3.1 step 7 */
+ ctx->reseedCount = 1U;
+
+ result = TC_CRYPTO_SUCCESS;
+ }
+ return result;
+}
+
+int32_t tc_ctr_prng_reseed(TCCtrPrng_t * const ctx,
+ uint8_t const * const entropy,
+ uint32_t entropyLen,
+ uint8_t const * const additional_input,
+ uint32_t additionallen)
+{
+ uint32_t i;
+ int32_t result = TC_CRYPTO_FAIL;
+ uint8_t additional_input_buf[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE] = {0U};
+ uint8_t seed_material[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE];
+
+ if (0 != additional_input)
+ {
+ /* 10.2.1.4.1 step 1 */
+ uint32_t len = additionallen;
+ if (len > sizeof additional_input_buf)
+ {
+ len = sizeof additional_input_buf;
+ }
+
+ /* 10.2.1.4.1 step 2 */
+ memcpy(additional_input_buf, additional_input, len);
+ }
+
+ uint32_t seedlen = (uint32_t)TC_AES_KEY_SIZE + (uint32_t)TC_AES_BLOCK_SIZE;
+ if ((0 != ctx) && (entropyLen >= seedlen))
+ {
+ /* 10.2.1.4.1 step 3 */
+ memcpy(seed_material, entropy, sizeof seed_material);
+ for (i = 0U; i < sizeof seed_material; i++)
+ {
+ seed_material[i] ^= additional_input_buf[i];
+ }
+
+ /* 10.2.1.4.1 step 4 */
+ tc_ctr_prng_update(ctx, seed_material);
+
+ /* 10.2.1.4.1 step 5 */
+ ctx->reseedCount = 1U;
+
+ result = TC_CRYPTO_SUCCESS;
+ }
+ return result;
+}
+
+int32_t tc_ctr_prng_generate(TCCtrPrng_t * const ctx,
+ uint8_t const * const additional_input,
+ uint32_t additionallen,
+ uint8_t * const out,
+ uint32_t outlen)
+{
+ /* 2^48 - see section 10.2.1 */
+ static const uint64_t MAX_REQS_BEFORE_RESEED = 0x1000000000000ULL;
+
+ /* 2^19 bits - see section 10.2.1 */
+ static const uint32_t MAX_BYTES_PER_REQ = 65536U;
+
+ int32_t result = TC_CRYPTO_FAIL;
+
+ if ((0 != ctx) && (0 != out) && (outlen < MAX_BYTES_PER_REQ))
+ {
+ /* 10.2.1.5.1 step 1 */
+ if (ctx->reseedCount > MAX_REQS_BEFORE_RESEED)
+ {
+ result = TC_CTR_PRNG_RESEED_REQ;
+ }
+ else
+ {
+ uint8_t additional_input_buf[TC_AES_KEY_SIZE + TC_AES_BLOCK_SIZE] = {0U};
+ if (0 != additional_input)
+ {
+ /* 10.2.1.5.1 step 2 */
+ uint32_t len = additionallen;
+ if (len > sizeof additional_input_buf)
+ {
+ len = sizeof additional_input_buf;
+ }
+ memcpy(additional_input_buf, additional_input, len);
+ tc_ctr_prng_update(ctx, additional_input_buf);
+ }
+
+ /* 10.2.1.5.1 step 3 - implicit */
+
+ /* 10.2.1.5.1 step 4 */
+ uint32_t len = 0U;
+ while (len < outlen)
+ {
+ uint32_t blocklen = outlen - len;
+ uint8_t output_block[TC_AES_BLOCK_SIZE];
+
+ /* 10.2.1.5.1 step 4.1 */
+ arrInc(ctx->V, sizeof ctx->V);
+
+ /* 10.2.1.5.1 step 4.2 */
+ (void)tc_aes_encrypt(output_block, ctx->V, &ctx->key);
+
+ /* 10.2.1.5.1 step 4.3/step 5 */
+ if (blocklen > TC_AES_BLOCK_SIZE)
+ {
+ blocklen = TC_AES_BLOCK_SIZE;
+ }
+ memcpy(&(out[len]), output_block, blocklen);
+
+ len += blocklen;
+ }
+
+ /* 10.2.1.5.1 step 6 */
+ tc_ctr_prng_update(ctx, additional_input_buf);
+
+ /* 10.2.1.5.1 step 7 */
+ ctx->reseedCount++;
+
+ /* 10.2.1.5.1 step 8 */
+ result = TC_CRYPTO_SUCCESS;
+ }
+ }
+
+ return result;
+}
+
+void tc_ctr_prng_uninstantiate(TCCtrPrng_t * const ctx)
+{
+ if (0 != ctx)
+ {
+ memset(ctx->key.words, 0x00, sizeof ctx->key.words);
+ memset(ctx->V, 0x00, sizeof ctx->V);
+ ctx->reseedCount = 0U;
+ }
+}
+
+
+
+
diff --git a/ext/tinycrypt/lib/source/ecc.c b/ext/tinycrypt/lib/source/ecc.c
new file mode 100644
index 0000000..bfe6c5f
--- /dev/null
+++ b/ext/tinycrypt/lib/source/ecc.c
@@ -0,0 +1,625 @@
+/* ecc.c - TinyCrypt implementation of ECC auxiliary functions */
+
+/*
+ *
+ * Copyright (c) 2013, Kenneth MacKay
+ * All rights reserved.
+ * https://github.com/kmackay/micro-ecc
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/ecc.h>
+
+/* ------ Curve NIST P-256 constants: ------ */
+
+#define Curve_P {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, \
+ 0x00000000, 0x00000000, 0x00000001, 0xFFFFFFFF}
+
+#define Curve_B {0x27D2604B, 0x3BCE3C3E, 0xCC53B0F6, 0x651D06B0, \
+ 0x769886BC, 0xB3EBBD55, 0xAA3A93E7, 0x5AC635D8}
+
+#define Curve_N {0xFC632551, 0xF3B9CAC2, 0xA7179E84, 0xBCE6FAAD, \
+ 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF}
+
+#define Curve_G {{0xD898C296, 0xF4A13945, 0x2DEB33A0, 0x77037D81, \
+ 0x63A440F2, 0xF8BCE6E5, 0xE12C4247, 0x6B17D1F2}, \
+ {0x37BF51F5, 0xCBB64068, 0x6B315ECE, 0x2BCE3357, \
+ 0x7C0F9E16, 0x8EE7EB4A, 0xFE1A7F9B, 0x4FE342E2} }
+
+#define Curve_P_Barrett {0x00000003, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFE, \
+ 0xFFFFFFFE, 0xFFFFFFFE, 0xFFFFFFFF, 0x00000000, 0x00000001}
+
+#define Curve_N_Barrett {0xEEDF9BFE, 0x012FFD85, 0xDF1A6C21, 0x43190552, \
+ 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF, 0x00000000, 0x00000001}
+
+uint32_t curve_p[NUM_ECC_DIGITS] = Curve_P;
+uint32_t curve_b[NUM_ECC_DIGITS] = Curve_B;
+EccPoint curve_G = Curve_G;
+uint32_t curve_n[NUM_ECC_DIGITS] = Curve_N;
+uint32_t curve_pb[NUM_ECC_DIGITS + 1] = Curve_P_Barrett;
+uint32_t curve_nb[NUM_ECC_DIGITS + 1] = Curve_N_Barrett;
+
+/* ------ Static functions: ------ */
+
+/* Zeroing out p_vli. */
+static void vli_clear(uint32_t *p_vli)
+{
+ uint32_t i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; ++i) {
+ p_vli[i] = 0;
+ }
+}
+
+/* Returns nonzero if bit p_bit of p_vli is set.
+ * It is assumed that the value provided in 'bit' is within
+ * the boundaries of the word-array 'p_vli'.*/
+static uint32_t vli_testBit(uint32_t *p_vli, uint32_t p_bit)
+{
+ return (p_vli[p_bit / 32] & (1 << (p_bit % 32)));
+}
+
+uint32_t vli_isZero(uint32_t *p_vli)
+{
+ uint32_t acc = 0;
+
+ for (uint32_t i = 0; i < NUM_ECC_DIGITS; ++i) {
+ acc |= p_vli[i];
+ }
+
+ return (!acc);
+}
+
+/*
+ * Find the right-most nonzero 32-bit "digits" in p_vli.
+ *
+ * Side-channel countermeasure: algorithm strengthened against timing attack.
+ */
+static uint32_t vli_numDigits(uint32_t *p_vli)
+{
+ int32_t i;
+ uint32_t digits = 0;
+
+ for (i = NUM_ECC_DIGITS - 1; i >= 0 ; --i) {
+ digits += p_vli[i] || digits;
+ }
+
+ return digits;
+}
+
+/*
+ * Find the left-most non-zero bit in p_vli.
+ *
+ * Side-channel countermeasure: algorithm strengthened against timing attack.
+ */
+static uint32_t vli_numBits(uint32_t *p_vli)
+{
+ uint32_t l_digit;
+ uint32_t i, acc = 32;
+ uint32_t l_numDigits = vli_numDigits(p_vli);
+
+ l_digit = p_vli[l_numDigits - 1];
+
+ for (i = 0; i < 32; ++i) {
+ acc -= !l_digit;
+ l_digit >>= 1;
+ }
+
+ return ((l_numDigits - 1) * 32 + acc);
+}
+
+/*
+ * Computes p_result = p_left + p_right, returns carry.
+ *
+ * Side-channel countermeasure: algorithm strengthened against timing attack.
+ */
+static uint32_t vli_add(uint32_t *p_result, uint32_t *p_left,
+ uint32_t *p_right)
+{
+
+ uint32_t l_carry = 0;
+
+ for (uint32_t i = 0; i < NUM_ECC_DIGITS; ++i) {
+ uint32_t l_sum = p_left[i] + p_right[i] + l_carry;
+
+ l_carry = (l_sum < p_left[i]) | ((l_sum == p_left[i]) && l_carry);
+ p_result[i] = l_sum;
+ }
+
+ return l_carry;
+}
+
+
+/* Computes p_result = p_left * p_right. */
+static void vli_mult(uint32_t *p_result, uint32_t *p_left,
+ uint32_t *p_right, uint32_t word_size)
+{
+
+ uint64_t r01 = 0;
+ uint32_t r2 = 0;
+
+ /* Compute each digit of p_result in sequence, maintaining the carries. */
+ for (uint32_t k = 0; k < word_size*2 - 1; ++k) {
+
+ uint32_t l_min = (k < word_size ? 0 : (k + 1) - word_size);
+
+ for (uint32_t i = l_min; i <= k && i < word_size; ++i) {
+
+ uint64_t l_product = (uint64_t)p_left[i] * p_right[k - i];
+
+ r01 += l_product;
+ r2 += (r01 < l_product);
+ }
+ p_result[k] = (uint32_t)r01;
+ r01 = (r01 >> 32) | (((uint64_t)r2) << 32);
+ r2 = 0;
+ }
+
+ p_result[word_size * 2 - 1] = (uint32_t)r01;
+}
+
+/* Computes p_result = p_left^2. */
+static void vli_square(uint32_t *p_result, uint32_t *p_left)
+{
+
+ uint64_t r01 = 0;
+ uint32_t r2 = 0;
+ uint32_t i, k;
+
+ for (k = 0; k < NUM_ECC_DIGITS * 2 - 1; ++k) {
+
+ uint32_t l_min = (k < NUM_ECC_DIGITS ? 0 : (k + 1) - NUM_ECC_DIGITS);
+
+ for (i = l_min; i <= k && i <= k - i; ++i) {
+
+ uint64_t l_product = (uint64_t)p_left[i] * p_left[k - i];
+
+ if (i < k - i) {
+
+ r2 += l_product >> 63;
+ l_product *= 2;
+ }
+ r01 += l_product;
+ r2 += (r01 < l_product);
+ }
+ p_result[k] = (uint32_t)r01;
+ r01 = (r01 >> 32) | (((uint64_t)r2) << 32);
+ r2 = 0;
+ }
+
+ p_result[NUM_ECC_DIGITS * 2 - 1] = (uint32_t)r01;
+}
+
+/* Computes p_result = p_product % curve_p using Barrett reduction. */
+void vli_mmod_barrett(uint32_t *p_result, uint32_t *p_product,
+ uint32_t *p_mod, uint32_t *p_barrett)
+{
+ uint32_t i;
+ uint32_t q1[NUM_ECC_DIGITS + 1];
+
+ for (i = NUM_ECC_DIGITS - 1; i < 2 * NUM_ECC_DIGITS; i++) {
+ q1[i - (NUM_ECC_DIGITS - 1)] = p_product[i];
+ }
+
+ uint32_t q2[2*NUM_ECC_DIGITS + 2];
+
+ vli_mult(q2, q1, p_barrett, NUM_ECC_DIGITS + 1);
+ for (i = NUM_ECC_DIGITS + 1; i < 2 * NUM_ECC_DIGITS + 2; i++) {
+ q1[i - (NUM_ECC_DIGITS + 1)] = q2[i];
+ }
+
+ uint32_t prime2[2*NUM_ECC_DIGITS];
+
+ for (i = 0; i < NUM_ECC_DIGITS; i++) {
+ prime2[i] = p_mod[i];
+ prime2[NUM_ECC_DIGITS + i] = 0;
+ }
+
+ vli_mult(q2, q1, prime2, NUM_ECC_DIGITS + 1);
+ vli_sub(p_product, p_product, q2, 2 * NUM_ECC_DIGITS);
+
+ uint32_t borrow;
+
+ borrow = vli_sub(q1, p_product, prime2, NUM_ECC_DIGITS + 1);
+ vli_cond_set(p_product, p_product, q1, borrow);
+ p_product[NUM_ECC_DIGITS] = q1[NUM_ECC_DIGITS] * (!borrow);
+ borrow = vli_sub(q1, p_product, prime2, NUM_ECC_DIGITS + 1);
+ vli_cond_set(p_product, p_product, q1, borrow);
+ p_product[NUM_ECC_DIGITS] = q1[NUM_ECC_DIGITS] * (!borrow);
+ borrow = vli_sub(q1, p_product, prime2, NUM_ECC_DIGITS + 1);
+ vli_cond_set(p_product, p_product, q1, borrow);
+ p_product[NUM_ECC_DIGITS] = q1[NUM_ECC_DIGITS] * (!borrow);
+
+ for (i = 0; i < NUM_ECC_DIGITS; i++) {
+ p_result[i] = p_product[i];
+ }
+}
+
+/*
+ * Computes modular exponentiation.
+ *
+ * Side-channel countermeasure: algorithm strengthened against timing attack.
+ */
+static void vli_modExp(uint32_t *p_result, uint32_t *p_base,
+ uint32_t *p_exp, uint32_t *p_mod, uint32_t *p_barrett)
+{
+
+ uint32_t acc[NUM_ECC_DIGITS], tmp[NUM_ECC_DIGITS], product[2 * NUM_ECC_DIGITS];
+ uint32_t j;
+ int32_t i;
+
+ vli_clear(acc);
+ acc[0] = 1;
+
+ for (i = NUM_ECC_DIGITS - 1; i >= 0; i--) {
+ for (j = 1 << 31; j > 0; j = j >> 1) {
+ vli_square(product, acc);
+ vli_mmod_barrett(acc, product, p_mod, p_barrett);
+ vli_mult(product, acc, p_base, NUM_ECC_DIGITS);
+ vli_mmod_barrett(tmp, product, p_mod, p_barrett);
+ vli_cond_set(acc, tmp, acc, j & p_exp[i]);
+ }
+ }
+
+ vli_set(p_result, acc);
+}
+
+/* Conversion from Affine coordinates to Jacobi coordinates. */
+static void EccPoint_fromAffine(EccPointJacobi *p_point_jacobi,
+ EccPoint *p_point) {
+
+ vli_set(p_point_jacobi->X, p_point->x);
+ vli_set(p_point_jacobi->Y, p_point->y);
+ vli_clear(p_point_jacobi->Z);
+ p_point_jacobi->Z[0] = 1;
+}
+
+/*
+ * Elliptic curve point doubling in Jacobi coordinates: P = P + P.
+ *
+ * Requires 4 squares and 4 multiplications.
+ */
+static void EccPoint_double(EccPointJacobi *P)
+{
+
+ uint32_t m[NUM_ECC_DIGITS], s[NUM_ECC_DIGITS], t[NUM_ECC_DIGITS];
+
+ vli_modSquare_fast(t, P->Z);
+ vli_modSub(m, P->X, t, curve_p);
+ vli_modAdd(s, P->X, t, curve_p);
+ vli_modMult_fast(m, m, s);
+ vli_modAdd(s, m, m, curve_p);
+ vli_modAdd(m, s, m, curve_p); /* m = 3X^2 - 3Z^4 */
+ vli_modSquare_fast(t, P->Y);
+ vli_modMult_fast(s, P->X, t);
+ vli_modAdd(s, s, s, curve_p);
+ vli_modAdd(s, s, s, curve_p); /* s = 4XY^2 */
+ vli_modMult_fast(P->Z, P->Y, P->Z);
+ vli_modAdd(P->Z, P->Z, P->Z, curve_p); /* Z' = 2YZ */
+ vli_modSquare_fast(P->X, m);
+ vli_modSub(P->X, P->X, s, curve_p);
+ vli_modSub(P->X, P->X, s, curve_p); /* X' = m^2 - 2s */
+ vli_modSquare_fast(P->Y, t);
+ vli_modAdd(P->Y, P->Y, P->Y, curve_p);
+ vli_modAdd(P->Y, P->Y, P->Y, curve_p);
+ vli_modAdd(P->Y, P->Y, P->Y, curve_p);
+ vli_modSub(t, s, P->X, curve_p);
+ vli_modMult_fast(t, t, m);
+ vli_modSub(P->Y, t, P->Y, curve_p); /* Y' = m(s - X') - 8Y^4 */
+
+}
+
+/* Copy input to target. */
+static void EccPointJacobi_set(EccPointJacobi *target, EccPointJacobi *input)
+{
+ vli_set(target->X, input->X);
+ vli_set(target->Y, input->Y);
+ vli_set(target->Z, input->Z);
+}
+
+/* ------ Externally visible functions (see header file for comments): ------ */
+
+void vli_set(uint32_t *p_dest, uint32_t *p_src)
+{
+
+ uint32_t i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; ++i) {
+ p_dest[i] = p_src[i];
+ }
+}
+
+int32_t vli_cmp(uint32_t *p_left, uint32_t *p_right, int32_t word_size)
+{
+
+ int32_t i, cmp = 0;
+
+ for (i = word_size-1; i >= 0; --i) {
+ cmp |= ((p_left[i] > p_right[i]) - (p_left[i] < p_right[i])) * (!cmp);
+ }
+
+ return cmp;
+}
+
+uint32_t vli_sub(uint32_t *p_result, uint32_t *p_left, uint32_t *p_right,
+ uint32_t word_size)
+{
+
+ uint32_t l_borrow = 0;
+
+ for (uint32_t i = 0; i < word_size; ++i) {
+ uint32_t l_diff = p_left[i] - p_right[i] - l_borrow;
+
+ l_borrow = (l_diff > p_left[i]) | ((l_diff == p_left[i]) && l_borrow);
+ p_result[i] = l_diff;
+ }
+
+ return l_borrow;
+}
+
+void vli_cond_set(uint32_t *output, uint32_t *p_true, uint32_t *p_false,
+ uint32_t cond)
+{
+ uint32_t i;
+
+ cond = (!cond);
+
+ for (i = 0; i < NUM_ECC_DIGITS; i++) {
+ output[i] = (p_true[i]*(!cond)) | (p_false[i]*cond);
+ }
+}
+
+void vli_modAdd(uint32_t *p_result, uint32_t *p_left, uint32_t *p_right,
+ uint32_t *p_mod)
+{
+ uint32_t l_carry = vli_add(p_result, p_left, p_right);
+ uint32_t p_temp[NUM_ECC_DIGITS];
+
+ l_carry = l_carry == vli_sub(p_temp, p_result, p_mod, NUM_ECC_DIGITS);
+ vli_cond_set(p_result, p_temp, p_result, l_carry);
+}
+
+void vli_modSub(uint32_t *p_result, uint32_t *p_left, uint32_t *p_right,
+ uint32_t *p_mod)
+{
+ uint32_t l_borrow = vli_sub(p_result, p_left, p_right, NUM_ECC_DIGITS);
+ uint32_t p_temp[NUM_ECC_DIGITS];
+
+ vli_add(p_temp, p_result, p_mod);
+ vli_cond_set(p_result, p_temp, p_result, l_borrow);
+}
+
+void vli_modMult_fast(uint32_t *p_result, uint32_t *p_left,
+ uint32_t *p_right)
+{
+ uint32_t l_product[2 * NUM_ECC_DIGITS];
+
+ vli_mult(l_product, p_left, p_right, NUM_ECC_DIGITS);
+ vli_mmod_barrett(p_result, l_product, curve_p, curve_pb);
+}
+
+void vli_modSquare_fast(uint32_t *p_result, uint32_t *p_left)
+{
+ uint32_t l_product[2 * NUM_ECC_DIGITS];
+
+ vli_square(l_product, p_left);
+ vli_mmod_barrett(p_result, l_product, curve_p, curve_pb);
+}
+
+void vli_modMult(uint32_t *p_result, uint32_t *p_left, uint32_t *p_right,
+ uint32_t *p_mod, uint32_t *p_barrett)
+{
+
+ uint32_t l_product[2 * NUM_ECC_DIGITS];
+
+ vli_mult(l_product, p_left, p_right, NUM_ECC_DIGITS);
+ vli_mmod_barrett(p_result, l_product, p_mod, p_barrett);
+}
+
+void vli_modInv(uint32_t *p_result, uint32_t *p_input, uint32_t *p_mod,
+ uint32_t *p_barrett)
+{
+ uint32_t p_power[NUM_ECC_DIGITS];
+
+ vli_set(p_power, p_mod);
+ p_power[0] -= 2;
+ vli_modExp(p_result, p_input, p_power, p_mod, p_barrett);
+}
+
+uint32_t EccPoint_isZero(EccPoint *p_point)
+{
+ return (vli_isZero(p_point->x) && vli_isZero(p_point->y));
+}
+
+uint32_t EccPointJacobi_isZero(EccPointJacobi *p_point_jacobi)
+{
+ return vli_isZero(p_point_jacobi->Z);
+}
+
+void EccPoint_toAffine(EccPoint *p_point, EccPointJacobi *p_point_jacobi)
+{
+
+ if (vli_isZero(p_point_jacobi->Z)) {
+ vli_clear(p_point->x);
+ vli_clear(p_point->y);
+ return;
+ }
+
+ uint32_t z[NUM_ECC_DIGITS];
+
+ vli_set(z, p_point_jacobi->Z);
+ vli_modInv(z, z, curve_p, curve_pb);
+ vli_modSquare_fast(p_point->x, z);
+ vli_modMult_fast(p_point->y, p_point->x, z);
+ vli_modMult_fast(p_point->x, p_point->x, p_point_jacobi->X);
+ vli_modMult_fast(p_point->y, p_point->y, p_point_jacobi->Y);
+}
+
+void EccPoint_add(EccPointJacobi *P1, EccPointJacobi *P2)
+{
+
+ uint32_t s1[NUM_ECC_DIGITS], u1[NUM_ECC_DIGITS], t[NUM_ECC_DIGITS];
+ uint32_t h[NUM_ECC_DIGITS], r[NUM_ECC_DIGITS];
+
+ vli_modSquare_fast(r, P1->Z);
+ vli_modSquare_fast(s1, P2->Z);
+ vli_modMult_fast(u1, P1->X, s1); /* u1 = X1 Z2^2 */
+ vli_modMult_fast(h, P2->X, r);
+ vli_modMult_fast(s1, P1->Y, s1);
+ vli_modMult_fast(s1, s1, P2->Z); /* s1 = Y1 Z2^3 */
+ vli_modMult_fast(r, P2->Y, r);
+ vli_modMult_fast(r, r, P1->Z);
+ vli_modSub(h, h, u1, curve_p); /* h = X2 Z1^2 - u1 */
+ vli_modSub(r, r, s1, curve_p); /* r = Y2 Z1^3 - s1 */
+
+ if (vli_isZero(h)) {
+ if (vli_isZero(r)) {
+ /* P1 = P2 */
+ EccPoint_double(P1);
+ return;
+ }
+ /* point at infinity */
+ vli_clear(P1->Z);
+ return;
+ }
+
+ vli_modMult_fast(P1->Z, P1->Z, P2->Z);
+ vli_modMult_fast(P1->Z, P1->Z, h); /* Z3 = h Z1 Z2 */
+ vli_modSquare_fast(t, h);
+ vli_modMult_fast(h, t, h);
+ vli_modMult_fast(u1, u1, t);
+ vli_modSquare_fast(P1->X, r);
+ vli_modSub(P1->X, P1->X, h, curve_p);
+ vli_modSub(P1->X, P1->X, u1, curve_p);
+ vli_modSub(P1->X, P1->X, u1, curve_p); /* X3 = r^2 - h^3 - 2 u1 h^2 */
+ vli_modMult_fast(t, s1, h);
+ vli_modSub(P1->Y, u1, P1->X, curve_p);
+ vli_modMult_fast(P1->Y, P1->Y, r);
+ vli_modSub(P1->Y, P1->Y, t, curve_p); /* Y3 = r(u1 h^2 - X3) - s1 h^3 */
+}
+
+/*
+ * Elliptic curve scalar multiplication with result in Jacobi coordinates:
+ *
+ * p_result = p_scalar * p_point.
+ */
+void EccPoint_mult_safe(EccPointJacobi *p_result, EccPoint *p_point, uint32_t *p_scalar)
+{
+
+ int32_t i;
+ uint32_t bit;
+ EccPointJacobi p_point_jacobi, p_tmp;
+
+ EccPoint_fromAffine(p_result, p_point);
+ EccPoint_fromAffine(&p_point_jacobi, p_point);
+
+ for (i = vli_numBits(p_scalar) - 2; i >= 0; i--) {
+ EccPoint_double(p_result);
+ EccPointJacobi_set(&p_tmp, p_result);
+ EccPoint_add(&p_tmp, &p_point_jacobi);
+ bit = vli_testBit(p_scalar, i);
+ vli_cond_set(p_result->X, p_tmp.X, p_result->X, bit);
+ vli_cond_set(p_result->Y, p_tmp.Y, p_result->Y, bit);
+ vli_cond_set(p_result->Z, p_tmp.Z, p_result->Z, bit);
+ }
+}
+
+/* Ellptic curve scalar multiplication with result in Jacobi coordinates */
+/* p_result = p_scalar * p_point */
+void EccPoint_mult_unsafe(EccPointJacobi *p_result, EccPoint *p_point, uint32_t *p_scalar)
+{
+ int i;
+ EccPointJacobi p_point_jacobi;
+ EccPoint_fromAffine(p_result, p_point);
+ EccPoint_fromAffine(&p_point_jacobi, p_point);
+
+ for(i = vli_numBits(p_scalar) - 2; i >= 0; i--)
+ {
+ EccPoint_double(p_result);
+ if (vli_testBit(p_scalar, i))
+ {
+ EccPoint_add(p_result, &p_point_jacobi);
+ }
+ }
+}
+
+/* -------- Conversions between big endian and little endian: -------- */
+
+void ecc_bytes2native(uint32_t p_native[NUM_ECC_DIGITS],
+ uint8_t p_bytes[NUM_ECC_DIGITS * 4])
+{
+
+ uint32_t i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; ++i) {
+ uint8_t *p_digit = p_bytes + 4 * (NUM_ECC_DIGITS - 1 - i);
+
+ p_native[i] = ((uint32_t)p_digit[0] << 24) |
+ ((uint32_t)p_digit[1] << 16) |
+ ((uint32_t)p_digit[2] << 8) |
+ (uint32_t)p_digit[3];
+ }
+}
+
+void ecc_native2bytes(uint8_t p_bytes[NUM_ECC_DIGITS * 4],
+ uint32_t p_native[NUM_ECC_DIGITS])
+{
+
+ uint32_t i;
+
+ for (i = 0; i < NUM_ECC_DIGITS; ++i) {
+ uint8_t *p_digit = p_bytes + 4 * (NUM_ECC_DIGITS - 1 - i);
+
+ p_digit[0] = p_native[i] >> 24;
+ p_digit[1] = p_native[i] >> 16;
+ p_digit[2] = p_native[i] >> 8;
+ p_digit[3] = p_native[i];
+ }
+}
+
diff --git a/ext/tinycrypt/lib/source/ecc_dh.c b/ext/tinycrypt/lib/source/ecc_dh.c
new file mode 100644
index 0000000..c2ab414
--- /dev/null
+++ b/ext/tinycrypt/lib/source/ecc_dh.c
@@ -0,0 +1,132 @@
+/* ec_dh.c - TinyCrypt implementation of EC-DH */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <tinycrypt/constants.h>
+#include <tinycrypt/ecc.h>
+
+extern uint32_t curve_p[NUM_ECC_DIGITS];
+extern uint32_t curve_b[NUM_ECC_DIGITS];
+extern uint32_t curve_n[NUM_ECC_DIGITS];
+extern uint32_t curve_pb[NUM_ECC_DIGITS + 1];
+extern EccPoint curve_G;
+
+int32_t ecc_make_key(EccPoint *p_publicKey, uint32_t p_privateKey[NUM_ECC_DIGITS],
+ uint32_t p_random[NUM_ECC_DIGITS * 2])
+{
+ // computing modular reduction of p_random (see FIPS 186.4 B.4.1):
+ vli_mmod_barrett(p_privateKey, p_random, curve_p, curve_pb);
+
+ /* Make sure the private key is in the range [1, n-1].
+ * For the supported curve, n is always large enough
+ * that we only need to subtract once at most.
+ */
+ uint32_t p_tmp[NUM_ECC_DIGITS];
+ vli_sub(p_tmp, p_privateKey, curve_n, NUM_ECC_DIGITS);
+
+ vli_cond_set(p_privateKey, p_privateKey, p_tmp,
+ vli_cmp(curve_n, p_privateKey, NUM_ECC_DIGITS) == 1);
+
+ /* erasing temporary buffer used to store secret: */
+ for (uint32_t i = 0; i < NUM_ECC_DIGITS; i++)
+ p_tmp[i] = 0;
+
+ if (vli_isZero(p_privateKey)) {
+ return TC_CRYPTO_FAIL; /* The private key cannot be 0 (mod p). */
+ }
+
+ EccPointJacobi P;
+
+ EccPoint_mult_safe(&P, &curve_G, p_privateKey);
+ EccPoint_toAffine(p_publicKey, &P);
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+/* Compute p_result = x^3 - 3x + b */
+static void curve_x_side(uint32_t p_result[NUM_ECC_DIGITS],
+ uint32_t x[NUM_ECC_DIGITS])
+{
+
+ uint32_t _3[NUM_ECC_DIGITS] = {3}; /* -a = 3 */
+
+ vli_modSquare_fast(p_result, x); /* r = x^2 */
+ vli_modSub(p_result, p_result, _3, curve_p); /* r = x^2 - 3 */
+ vli_modMult_fast(p_result, p_result, x); /* r = x^3 - 3x */
+ vli_modAdd(p_result, p_result, curve_b, curve_p); /* r = x^3 - 3x + b */
+
+}
+
+int32_t ecc_valid_public_key(EccPoint *p_publicKey)
+{
+ uint32_t l_tmp1[NUM_ECC_DIGITS];
+ uint32_t l_tmp2[NUM_ECC_DIGITS];
+
+ if (EccPoint_isZero(p_publicKey)) {
+ return -1;
+ }
+
+ if ((vli_cmp(curve_p, p_publicKey->x, NUM_ECC_DIGITS) != 1) ||
+ (vli_cmp(curve_p, p_publicKey->y, NUM_ECC_DIGITS) != 1)) {
+ return -2;
+ }
+
+ vli_modSquare_fast(l_tmp1, p_publicKey->y); /* tmp1 = y^2 */
+
+ curve_x_side(l_tmp2, p_publicKey->x); /* tmp2 = x^3 - 3x + b */
+
+ /* Make sure that y^2 == x^3 + ax + b */
+ if (vli_cmp(l_tmp1, l_tmp2, NUM_ECC_DIGITS) != 0) {
+ return -3;
+ }
+
+ if (vli_cmp(p_publicKey->x, curve_G.x, NUM_ECC_DIGITS) == 0 &&
+ vli_cmp(p_publicKey->y, curve_G.y, NUM_ECC_DIGITS) == 0 )
+ return -4;
+
+ return 0;
+}
+
+int32_t ecdh_shared_secret(uint32_t p_secret[NUM_ECC_DIGITS],
+ EccPoint *p_publicKey, uint32_t p_privateKey[NUM_ECC_DIGITS])
+{
+
+ EccPoint p_point;
+ EccPointJacobi P;
+
+ EccPoint_mult_safe(&P, p_publicKey, p_privateKey);
+ if (EccPointJacobi_isZero(&P)) {
+ return TC_CRYPTO_FAIL;
+ }
+ EccPoint_toAffine(&p_point, &P);
+ vli_set(p_secret, p_point.x);
+
+ return TC_CRYPTO_SUCCESS;
+}
diff --git a/ext/tinycrypt/lib/source/ecc_dsa.c b/ext/tinycrypt/lib/source/ecc_dsa.c
new file mode 100644
index 0000000..dd84a18
--- /dev/null
+++ b/ext/tinycrypt/lib/source/ecc_dsa.c
@@ -0,0 +1,115 @@
+/* ec_dsa.c - TinyCrypt implementation of EC-DSA */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/constants.h>
+#include <tinycrypt/ecc.h>
+
+extern uint32_t curve_n[NUM_ECC_DIGITS];
+extern EccPoint curve_G;
+extern uint32_t curve_nb[NUM_ECC_DIGITS + 1];
+
+int32_t ecdsa_sign(uint32_t r[NUM_ECC_DIGITS], uint32_t s[NUM_ECC_DIGITS],
+ uint32_t p_privateKey[NUM_ECC_DIGITS], uint32_t p_random[NUM_ECC_DIGITS],
+ uint32_t p_hash[NUM_ECC_DIGITS])
+{
+
+ uint32_t k[NUM_ECC_DIGITS], tmp[NUM_ECC_DIGITS];
+ EccPoint p_point;
+ EccPointJacobi P;
+
+ if (vli_isZero(p_random)) {
+ return TC_CRYPTO_FAIL; /* The random number must not be 0. */
+ }
+
+ vli_set(k, p_random);
+
+ vli_sub(tmp, k, curve_n, NUM_ECC_DIGITS);
+ vli_cond_set(k, k, tmp, vli_cmp(curve_n, k, NUM_ECC_DIGITS) == 1);
+
+ /* tmp = k * G */
+ EccPoint_mult_safe(&P, &curve_G, k);
+ EccPoint_toAffine(&p_point, &P);
+
+ /* r = x1 (mod n) */
+ vli_set(r, p_point.x);
+ if (vli_cmp(curve_n, r, NUM_ECC_DIGITS) != 1) {
+ vli_sub(r, r, curve_n, NUM_ECC_DIGITS);
+ }
+
+ if (vli_isZero(r)) {
+ return TC_CRYPTO_FAIL; /* If r == 0, fail (need a different random number). */
+ }
+
+ vli_modMult(s, r, p_privateKey, curve_n, curve_nb); /* s = r*d */
+ vli_modAdd(s, p_hash, s, curve_n); /* s = e + r*d */
+ vli_modInv(k, k, curve_n, curve_nb); /* k = 1 / k */
+ vli_modMult(s, s, k, curve_n, curve_nb); /* s = (e + r*d) / k */
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t ecdsa_verify(EccPoint *p_publicKey, uint32_t p_hash[NUM_ECC_DIGITS],
+ uint32_t r[NUM_ECC_DIGITS], uint32_t s[NUM_ECC_DIGITS])
+{
+
+ uint32_t u1[NUM_ECC_DIGITS], u2[NUM_ECC_DIGITS];
+ uint32_t z[NUM_ECC_DIGITS];
+ EccPointJacobi P, R;
+ EccPoint p_point;
+
+ if (vli_isZero(r) || vli_isZero(s)) {
+ return TC_CRYPTO_FAIL; /* r, s must not be 0. */
+ }
+
+ if ((vli_cmp(curve_n, r, NUM_ECC_DIGITS) != 1) ||
+ (vli_cmp(curve_n, s, NUM_ECC_DIGITS) != 1)) {
+ return TC_CRYPTO_FAIL; /* r, s must be < n. */
+ }
+
+ /* Calculate u1 and u2. */
+ vli_modInv(z, s, curve_n, curve_nb); /* Z = s^-1 */
+ vli_modMult(u1, p_hash, z, curve_n, curve_nb); /* u1 = e/s */
+ vli_modMult(u2, r, z, curve_n, curve_nb); /* u2 = r/s */
+
+ /* calculate P = u1*G + u2*Q */
+ EccPoint_mult_unsafe(&P, &curve_G, u1);
+ EccPoint_mult_unsafe(&R, p_publicKey, u2);
+ EccPoint_add(&P, &R);
+ EccPoint_toAffine(&p_point, &P);
+
+ /* Accept only if P.x == r. */
+ if (!vli_sub(z, p_point.x, curve_n, NUM_ECC_DIGITS)) {
+ vli_set(p_point.x, z);
+ }
+
+ return (vli_cmp(p_point.x, r, NUM_ECC_DIGITS) == 0);
+}
diff --git a/ext/tinycrypt/lib/source/hmac.c b/ext/tinycrypt/lib/source/hmac.c
new file mode 100644
index 0000000..e256846
--- /dev/null
+++ b/ext/tinycrypt/lib/source/hmac.c
@@ -0,0 +1,147 @@
+/* hmac.c - TinyCrypt implementation of the HMAC algorithm */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/hmac.h>
+#include <tinycrypt/constants.h>
+#include <tinycrypt/utils.h>
+
+static void rekey(uint8_t *key, const uint8_t *new_key, uint32_t key_size)
+{
+ const uint8_t inner_pad = (uint8_t) 0x36;
+ const uint8_t outer_pad = (uint8_t) 0x5c;
+ uint32_t i;
+
+ for (i = 0; i < key_size; ++i) {
+ key[i] = inner_pad ^ new_key[i];
+ key[i + TC_SHA256_BLOCK_SIZE] = outer_pad ^ new_key[i];
+ }
+ for (; i < TC_SHA256_BLOCK_SIZE; ++i) {
+ key[i] = inner_pad; key[i + TC_SHA256_BLOCK_SIZE] = outer_pad;
+ }
+}
+
+int32_t tc_hmac_set_key(TCHmacState_t ctx,
+ const uint8_t *key,
+ uint32_t key_size)
+{
+ /* input sanity check: */
+ if (ctx == (TCHmacState_t) 0 ||
+ key == (const uint8_t *) 0 ||
+ key_size == 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ const uint8_t dummy_key[key_size];
+ struct tc_hmac_state_struct dummy_state;
+
+ if (key_size <= TC_SHA256_BLOCK_SIZE) {
+ /*
+ * The next three lines consist of dummy calls just to avoid
+ * certain timing attacks. Without these dummy calls,
+ * adversaries would be able to learn whether the key_size is
+ * greater than TC_SHA256_BLOCK_SIZE by measuring the time
+ * consumed in this process.
+ */
+ (void)tc_sha256_init(&dummy_state.hash_state);
+ (void)tc_sha256_update(&dummy_state.hash_state,
+ dummy_key,
+ key_size);
+ (void)tc_sha256_final(&dummy_state.key[TC_SHA256_DIGEST_SIZE],
+ &dummy_state.hash_state);
+
+ /* Actual code for when key_size <= TC_SHA256_BLOCK_SIZE: */
+ rekey(ctx->key, key, key_size);
+ } else {
+ (void)tc_sha256_init(&ctx->hash_state);
+ (void)tc_sha256_update(&ctx->hash_state, key, key_size);
+ (void)tc_sha256_final(&ctx->key[TC_SHA256_DIGEST_SIZE],
+ &ctx->hash_state);
+ rekey(ctx->key,
+ &ctx->key[TC_SHA256_DIGEST_SIZE],
+ TC_SHA256_DIGEST_SIZE);
+ }
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_hmac_init(TCHmacState_t ctx)
+{
+ /* input sanity check: */
+ if (ctx == (TCHmacState_t) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ (void)tc_sha256_init(&ctx->hash_state);
+ (void)tc_sha256_update(&ctx->hash_state,
+ ctx->key,
+ TC_SHA256_BLOCK_SIZE);
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_hmac_update(TCHmacState_t ctx,
+ const void *data,
+ uint32_t data_length)
+{
+ /* input sanity check: */
+ if (ctx == (TCHmacState_t) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ (void)tc_sha256_update(&ctx->hash_state, data, data_length);
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_hmac_final(uint8_t *tag, uint32_t taglen, TCHmacState_t ctx)
+{
+ /* input sanity check: */
+ if (tag == (uint8_t *) 0 ||
+ taglen != TC_SHA256_DIGEST_SIZE ||
+ ctx == (TCHmacState_t) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ (void) tc_sha256_final(tag, &ctx->hash_state);
+
+ (void)tc_sha256_init(&ctx->hash_state);
+ (void)tc_sha256_update(&ctx->hash_state,
+ &ctx->key[TC_SHA256_BLOCK_SIZE],
+ TC_SHA256_BLOCK_SIZE);
+ (void)tc_sha256_update(&ctx->hash_state, tag, TC_SHA256_DIGEST_SIZE);
+ (void)tc_sha256_final(tag, &ctx->hash_state);
+
+ /* destroy the current state */
+ _set(ctx, 0, sizeof(*ctx));
+
+ return TC_CRYPTO_SUCCESS;
+}
diff --git a/ext/tinycrypt/lib/source/hmac_prng.c b/ext/tinycrypt/lib/source/hmac_prng.c
new file mode 100644
index 0000000..ceac27f
--- /dev/null
+++ b/ext/tinycrypt/lib/source/hmac_prng.c
@@ -0,0 +1,210 @@
+/* hmac_prng.c - TinyCrypt implementation of HMAC-PRNG */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/hmac_prng.h>
+#include <tinycrypt/hmac.h>
+#include <tinycrypt/constants.h>
+#include <tinycrypt/utils.h>
+
+/*
+ * min bytes in the seed string.
+ * MIN_SLEN*8 must be at least the expected security level.
+ */
+static const uint32_t MIN_SLEN = 32;
+
+/*
+ * max bytes in the seed string;
+ * SP800-90A specifies a maximum of 2^35 bits (i.e., 2^32 bytes).
+ */
+static const uint32_t MAX_SLEN = UINT32_MAX;
+
+/*
+ * max bytes in the personalization string;
+ * SP800-90A specifies a maximum of 2^35 bits (i.e., 2^32 bytes).
+ */
+static const uint32_t MAX_PLEN = UINT32_MAX;
+
+/*
+ * max bytes in the additional_info string;
+ * SP800-90A specifies a maximum of 2^35 bits (i.e., 2^32 bytes).
+ */
+static const uint32_t MAX_ALEN = UINT32_MAX;
+
+/*
+ * max number of generates between re-seeds;
+ * TinyCrypt accepts up to (2^32 - 1) which is the maximal value of
+ * a uint32_t variable, while SP800-90A specifies a maximum of 2^48.
+ */
+static const uint32_t MAX_GENS = UINT32_MAX;
+
+/*
+ * maximum bytes per generate call;
+ * SP800-90A specifies a maximum up to 2^19.
+ */
+static const uint32_t MAX_OUT = (1 << 19);
+
+/*
+ * Assumes: prng != NULL, e != NULL, len >= 0.
+ */
+static void update(TCHmacPrng_t prng, const uint8_t *e, uint32_t len)
+{
+ const uint8_t separator0 = 0x00;
+ const uint8_t separator1 = 0x01;
+
+ /* use current state, e and separator 0 to compute a new prng key: */
+ (void)tc_hmac_init(&prng->h);
+ (void)tc_hmac_update(&prng->h, prng->v, sizeof(prng->v));
+ (void)tc_hmac_update(&prng->h, &separator0, sizeof(separator0));
+ (void)tc_hmac_update(&prng->h, e, len);
+ (void)tc_hmac_final(prng->key, sizeof(prng->key), &prng->h);
+ /* configure the new prng key into the prng's instance of hmac */
+ (void)tc_hmac_set_key(&prng->h, prng->key, sizeof(prng->key));
+
+ /* use the new key to compute a new state variable v */
+ (void)tc_hmac_init(&prng->h);
+ (void)tc_hmac_update(&prng->h, prng->v, sizeof(prng->v));
+ (void)tc_hmac_final(prng->v, sizeof(prng->v), &prng->h);
+
+ /* use current state, e and separator 1 to compute a new prng key: */
+ (void)tc_hmac_init(&prng->h);
+ (void)tc_hmac_update(&prng->h, prng->v, sizeof(prng->v));
+ (void)tc_hmac_update(&prng->h, &separator1, sizeof(separator1));
+ (void)tc_hmac_update(&prng->h, e, len);
+ (void)tc_hmac_final(prng->key, sizeof(prng->key), &prng->h);
+ /* configure the new prng key into the prng's instance of hmac */
+ (void)tc_hmac_set_key(&prng->h, prng->key, sizeof(prng->key));
+
+ /* use the new key to compute a new state variable v */
+ (void)tc_hmac_init(&prng->h);
+ (void)tc_hmac_update(&prng->h, prng->v, sizeof(prng->v));
+ (void)tc_hmac_final(prng->v, sizeof(prng->v), &prng->h);
+}
+
+int32_t tc_hmac_prng_init(TCHmacPrng_t prng,
+ const uint8_t *personalization,
+ uint32_t plen)
+{
+ /* input sanity check: */
+ if (prng == (TCHmacPrng_t) 0 ||
+ personalization == (uint8_t *) 0 ||
+ plen > MAX_PLEN) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ /* put the generator into a known state: */
+ _set(prng->key, 0x00, sizeof(prng->key));
+ _set(prng->v, 0x01, sizeof(prng->v));
+ tc_hmac_set_key(&prng->h, prng->key, sizeof(prng->key));
+ /* update assumes SOME key has been configured into HMAC */
+
+ update(prng, personalization, plen);
+
+ /* force a reseed before allowing tc_hmac_prng_generate to succeed: */
+ prng->countdown = 0;
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_hmac_prng_reseed(TCHmacPrng_t prng,
+ const uint8_t *seed,
+ uint32_t seedlen,
+ const uint8_t *additional_input,
+ uint32_t additionallen)
+{
+ /* input sanity check: */
+ if (prng == (TCHmacPrng_t) 0 ||
+ seed == (const uint8_t *) 0 ||
+ seedlen < MIN_SLEN ||
+ seedlen > MAX_SLEN) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ if (additional_input != (const uint8_t *) 0) {
+ /*
+ * Abort if additional_input is provided but has inappropriate
+ * length
+ */
+ if (additionallen == 0 ||
+ additionallen > MAX_ALEN) {
+ return TC_CRYPTO_FAIL;
+ } else {
+ /* call update for the seed and additional_input */
+ update(prng, seed, seedlen);
+ update(prng, additional_input, additionallen);
+ }
+ } else {
+ /* call update only for the seed */
+ update(prng, seed, seedlen);
+ }
+
+ /* ... and enable hmac_prng_generate */
+ prng->countdown = MAX_GENS;
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_hmac_prng_generate(uint8_t *out, uint32_t outlen, TCHmacPrng_t prng)
+{
+ uint32_t bufferlen;
+
+ /* input sanity check: */
+ if (out == (uint8_t *) 0 ||
+ prng == (TCHmacPrng_t) 0 ||
+ outlen == 0 ||
+ outlen > MAX_OUT) {
+ return TC_CRYPTO_FAIL;
+ } else if (prng->countdown == 0) {
+ return TC_HMAC_PRNG_RESEED_REQ;
+ }
+
+ prng->countdown--;
+
+ while (outlen != 0) {
+ /* operate HMAC in OFB mode to create "random" outputs */
+ (void)tc_hmac_init(&prng->h);
+ (void)tc_hmac_update(&prng->h, prng->v, sizeof(prng->v));
+ (void)tc_hmac_final(prng->v, sizeof(prng->v), &prng->h);
+
+ bufferlen = (TC_SHA256_DIGEST_SIZE > outlen) ?
+ outlen : TC_SHA256_DIGEST_SIZE;
+ (void)_copy(out, bufferlen, prng->v, bufferlen);
+
+ out += bufferlen;
+ outlen = (outlen > TC_SHA256_DIGEST_SIZE) ?
+ (outlen - TC_SHA256_DIGEST_SIZE) : 0;
+ }
+
+ /* block future PRNG compromises from revealing past state */
+ update(prng, prng->v, TC_SHA256_DIGEST_SIZE);
+
+ return TC_CRYPTO_SUCCESS;
+}
diff --git a/ext/tinycrypt/lib/source/sha256.c b/ext/tinycrypt/lib/source/sha256.c
new file mode 100644
index 0000000..c27d3e1
--- /dev/null
+++ b/ext/tinycrypt/lib/source/sha256.c
@@ -0,0 +1,217 @@
+/* sha256.c - TinyCrypt SHA-256 crypto hash algorithm implementation */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/sha256.h>
+#include <tinycrypt/constants.h>
+#include <tinycrypt/utils.h>
+
+static void compress(uint32_t *iv, const uint8_t *data);
+
+int32_t tc_sha256_init(TCSha256State_t s)
+{
+ /* input sanity check: */
+ if (s == (TCSha256State_t) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ /*
+ * Setting the initial state values.
+ * These values correspond to the first 32 bits of the fractional parts
+ * of the square roots of the first 8 primes: 2, 3, 5, 7, 11, 13, 17
+ * and 19.
+ */
+ _set((uint8_t *) s, 0x00, sizeof(*s));
+ s->iv[0] = 0x6a09e667;
+ s->iv[1] = 0xbb67ae85;
+ s->iv[2] = 0x3c6ef372;
+ s->iv[3] = 0xa54ff53a;
+ s->iv[4] = 0x510e527f;
+ s->iv[5] = 0x9b05688c;
+ s->iv[6] = 0x1f83d9ab;
+ s->iv[7] = 0x5be0cd19;
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_sha256_update(TCSha256State_t s, const uint8_t *data, size_t datalen)
+{
+ /* input sanity check: */
+ if (s == (TCSha256State_t) 0 ||
+ data == (void *) 0) {
+ return TC_CRYPTO_FAIL;
+ } else if (datalen == 0) {
+ return TC_CRYPTO_SUCCESS;
+ }
+
+ while (datalen-- > 0) {
+ s->leftover[s->leftover_offset++] = *(data++);
+ if (s->leftover_offset >= TC_SHA256_BLOCK_SIZE) {
+ compress(s->iv, s->leftover);
+ s->leftover_offset = 0;
+ s->bits_hashed += (TC_SHA256_BLOCK_SIZE << 3);
+ }
+ }
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+int32_t tc_sha256_final(uint8_t *digest, TCSha256State_t s)
+{
+ uint32_t i;
+
+ /* input sanity check: */
+ if (digest == (uint8_t *) 0 ||
+ s == (TCSha256State_t) 0) {
+ return TC_CRYPTO_FAIL;
+ }
+
+ s->bits_hashed += (s->leftover_offset << 3);
+
+ s->leftover[s->leftover_offset++] = 0x80; /* always room for one byte */
+ if (s->leftover_offset > (sizeof(s->leftover) - 8)) {
+ /* there is not room for all the padding in this block */
+ _set(s->leftover + s->leftover_offset, 0x00,
+ sizeof(s->leftover) - s->leftover_offset);
+ compress(s->iv, s->leftover);
+ s->leftover_offset = 0;
+ }
+
+ /* add the padding and the length in big-Endian format */
+ _set(s->leftover + s->leftover_offset, 0x00,
+ sizeof(s->leftover) - 8 - s->leftover_offset);
+ s->leftover[sizeof(s->leftover) - 1] = (uint8_t)(s->bits_hashed);
+ s->leftover[sizeof(s->leftover) - 2] = (uint8_t)(s->bits_hashed >> 8);
+ s->leftover[sizeof(s->leftover) - 3] = (uint8_t)(s->bits_hashed >> 16);
+ s->leftover[sizeof(s->leftover) - 4] = (uint8_t)(s->bits_hashed >> 24);
+ s->leftover[sizeof(s->leftover) - 5] = (uint8_t)(s->bits_hashed >> 32);
+ s->leftover[sizeof(s->leftover) - 6] = (uint8_t)(s->bits_hashed >> 40);
+ s->leftover[sizeof(s->leftover) - 7] = (uint8_t)(s->bits_hashed >> 48);
+ s->leftover[sizeof(s->leftover) - 8] = (uint8_t)(s->bits_hashed >> 56);
+
+ /* hash the padding and length */
+ compress(s->iv, s->leftover);
+
+ /* copy the iv out to digest */
+ for (i = 0; i < TC_SHA256_STATE_BLOCKS; ++i) {
+ uint32_t t = *((uint32_t *) &s->iv[i]);
+ *digest++ = (uint8_t)(t >> 24);
+ *digest++ = (uint8_t)(t >> 16);
+ *digest++ = (uint8_t)(t >> 8);
+ *digest++ = (uint8_t)(t);
+ }
+
+ /* destroy the current state */
+ _set(s, 0, sizeof(*s));
+
+ return TC_CRYPTO_SUCCESS;
+}
+
+/*
+ * Initializing SHA-256 Hash constant words K.
+ * These values correspond to the first 32 bits of the fractional parts of the
+ * cube roots of the first 64 primes between 2 and 311.
+ */
+static const uint32_t k256[64] = {
+ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
+ 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
+ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
+ 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
+ 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
+ 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
+ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
+ 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
+ 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
+ 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
+ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
+};
+
+static inline uint32_t ROTR(uint32_t a, uint32_t n)
+{
+ return (((a) >> n) | ((a) << (32 - n)));
+}
+
+#define Sigma0(a)(ROTR((a), 2) ^ ROTR((a), 13) ^ ROTR((a), 22))
+#define Sigma1(a)(ROTR((a), 6) ^ ROTR((a), 11) ^ ROTR((a), 25))
+#define sigma0(a)(ROTR((a), 7) ^ ROTR((a), 18) ^ ((a) >> 3))
+#define sigma1(a)(ROTR((a), 17) ^ ROTR((a), 19) ^ ((a) >> 10))
+
+#define Ch(a, b, c)(((a) & (b)) ^ ((~(a)) & (c)))
+#define Maj(a, b, c)(((a) & (b)) ^ ((a) & (c)) ^ ((b) & (c)))
+
+static inline uint32_t BigEndian(const uint8_t **c)
+{
+ uint32_t n = 0;
+
+ n = (((uint32_t)(*((*c)++))) << 24);
+ n |= ((uint32_t)(*((*c)++)) << 16);
+ n |= ((uint32_t)(*((*c)++)) << 8);
+ n |= ((uint32_t)(*((*c)++)));
+ return n;
+}
+
+static void compress(uint32_t *iv, const uint8_t *data)
+{
+ uint32_t a, b, c, d, e, f, g, h;
+ uint32_t s0, s1;
+ uint32_t t1, t2;
+ uint32_t work_space[16];
+ uint32_t n;
+ uint32_t i;
+
+ a = iv[0]; b = iv[1]; c = iv[2]; d = iv[3];
+ e = iv[4]; f = iv[5]; g = iv[6]; h = iv[7];
+
+ for (i = 0; i < 16; ++i) {
+ n = BigEndian(&data);
+ t1 = work_space[i] = n;
+ t1 += h + Sigma1(e) + Ch(e, f, g) + k256[i];
+ t2 = Sigma0(a) + Maj(a, b, c);
+ h = g; g = f; f = e; e = d + t1;
+ d = c; c = b; b = a; a = t1 + t2;
+ }
+
+ for ( ; i < 64; ++i) {
+ s0 = work_space[(i+1)&0x0f];
+ s0 = sigma0(s0);
+ s1 = work_space[(i+14)&0x0f];
+ s1 = sigma1(s1);
+
+ t1 = work_space[i&0xf] += s0 + s1 + work_space[(i+9)&0xf];
+ t1 += h + Sigma1(e) + Ch(e, f, g) + k256[i];
+ t2 = Sigma0(a) + Maj(a, b, c);
+ h = g; g = f; f = e; e = d + t1;
+ d = c; c = b; b = a; a = t1 + t2;
+ }
+
+ iv[0] += a; iv[1] += b; iv[2] += c; iv[3] += d;
+ iv[4] += e; iv[5] += f; iv[6] += g; iv[7] += h;
+}
diff --git a/ext/tinycrypt/lib/source/utils.c b/ext/tinycrypt/lib/source/utils.c
new file mode 100644
index 0000000..147d8d4
--- /dev/null
+++ b/ext/tinycrypt/lib/source/utils.c
@@ -0,0 +1,75 @@
+/* utils.c - TinyCrypt platform-dependent run-time operations */
+
+/*
+ * Copyright (C) 2015 by Intel Corporation, All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * - Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <tinycrypt/utils.h>
+#include <tinycrypt/constants.h>
+
+#include <string.h>
+
+#define MASK_MOST_SIG_BIT 0x80
+#define MASK_TWENTY_SEVEN 0x1b
+
+uint32_t _copy(uint8_t *to, uint32_t to_len,
+ const uint8_t *from, uint32_t from_len)
+{
+ if (from_len <= to_len) {
+ (void)memcpy(to, from, from_len);
+ return from_len;
+ } else {
+ return TC_CRYPTO_FAIL;
+ }
+}
+
+void _set(void *to, uint8_t val, uint32_t len)
+{
+ (void)memset(to, val, len);
+}
+
+/*
+ * Doubles the value of a byte for values up to 127.
+ */
+uint8_t _double_byte(uint8_t a)
+{
+ return ((a<<1) ^ ((a>>7) * MASK_TWENTY_SEVEN));
+}
+
+int32_t _compare(const uint8_t *a, const uint8_t *b, size_t size)
+{
+ const uint8_t *tempa = a;
+ const uint8_t *tempb = b;
+ uint8_t result = 0;
+
+ for (uint32_t i = 0; i < size; i++) {
+ result |= tempa[i] ^ tempb[i];
+ }
+ return result;
+}