Switch to the new code style
Signed-off-by: Gilles Peskine <Gilles.Peskine@arm.com>
diff --git a/library/sha512.c b/library/sha512.c
index 46e3dab..0ea6421 100644
--- a/library/sha512.c
+++ b/library/sha512.c
@@ -42,22 +42,22 @@
#if defined(__aarch64__)
# if defined(MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT) || \
- defined(MBEDTLS_SHA512_USE_A64_CRYPTO_ONLY)
+ defined(MBEDTLS_SHA512_USE_A64_CRYPTO_ONLY)
# include <arm_neon.h>
# endif
# if defined(MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT)
# if defined(__unix__)
# if defined(__linux__)
- /* Our preferred method of detection is getauxval() */
+/* Our preferred method of detection is getauxval() */
# include <sys/auxv.h>
# endif
- /* Use SIGILL on Unix, and fall back to it on Linux */
+/* Use SIGILL on Unix, and fall back to it on Linux */
# include <signal.h>
# endif
# endif
#elif defined(_M_ARM64)
# if defined(MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT) || \
- defined(MBEDTLS_SHA512_USE_A64_CRYPTO_ONLY)
+ defined(MBEDTLS_SHA512_USE_A64_CRYPTO_ONLY)
# include <arm64_neon.h>
# endif
#else
@@ -71,22 +71,22 @@
* MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT if no detection mechanism found
*/
#if defined(HWCAP_SHA512)
-static int mbedtls_a64_crypto_sha512_determine_support( void )
+static int mbedtls_a64_crypto_sha512_determine_support(void)
{
- return( ( getauxval( AT_HWCAP ) & HWCAP_SHA512 ) ? 1 : 0 );
+ return (getauxval(AT_HWCAP) & HWCAP_SHA512) ? 1 : 0;
}
#elif defined(__APPLE__)
#include <sys/types.h>
#include <sys/sysctl.h>
-static int mbedtls_a64_crypto_sha512_determine_support( void )
+static int mbedtls_a64_crypto_sha512_determine_support(void)
{
int value = 0;
size_t value_len = sizeof(value);
- int ret = sysctlbyname( "hw.optional.armv8_2_sha512", &value, &value_len,
- NULL, 0 );
- return( ret == 0 && value != 0 );
+ int ret = sysctlbyname("hw.optional.armv8_2_sha512", &value, &value_len,
+ NULL, 0);
+ return ret == 0 && value != 0;
}
#elif defined(_M_ARM64)
/*
@@ -115,39 +115,39 @@
/*
* A64 SHA512 support detection via SIGILL
*/
-static void sigill_handler( int signal )
+static void sigill_handler(int signal)
{
(void) signal;
- longjmp( return_from_sigill, 1 );
+ longjmp(return_from_sigill, 1);
}
-static int mbedtls_a64_crypto_sha512_determine_support( void )
+static int mbedtls_a64_crypto_sha512_determine_support(void)
{
struct sigaction old_action, new_action;
sigset_t old_mask;
- if( sigprocmask( 0, NULL, &old_mask ) )
- return( 0 );
+ if (sigprocmask(0, NULL, &old_mask)) {
+ return 0;
+ }
- sigemptyset( &new_action.sa_mask );
+ sigemptyset(&new_action.sa_mask);
new_action.sa_flags = 0;
new_action.sa_handler = sigill_handler;
- sigaction( SIGILL, &new_action, &old_action );
+ sigaction(SIGILL, &new_action, &old_action);
static int ret = 0;
- if( setjmp( return_from_sigill ) == 0 ) /* First return only */
- {
+ if (setjmp(return_from_sigill) == 0) { /* First return only */
/* If this traps, we will return a second time from setjmp() with 1 */
- asm( "sha512h q0, q0, v0.2d" : : : "v0" );
+ asm ("sha512h q0, q0, v0.2d" : : : "v0");
ret = 1;
}
- sigaction( SIGILL, &old_action, NULL );
- sigprocmask( SIG_SETMASK, &old_mask, NULL );
+ sigaction(SIGILL, &old_action, NULL);
+ sigprocmask(SIG_SETMASK, &old_mask, NULL);
- return( ret );
+ return ret;
}
#else
#warning "No mechanism to detect A64_CRYPTO found, using C code only"
@@ -161,7 +161,7 @@
#define SHA512_BLOCK_SIZE 128
#if defined(MBEDTLS_SHA512_SMALLER)
-static void sha512_put_uint64_be( uint64_t n, unsigned char *b, uint8_t i )
+static void sha512_put_uint64_be(uint64_t n, unsigned char *b, uint8_t i)
{
MBEDTLS_PUT_UINT64_BE(n, b, i);
}
@@ -169,21 +169,22 @@
#define sha512_put_uint64_be MBEDTLS_PUT_UINT64_BE
#endif /* MBEDTLS_SHA512_SMALLER */
-void mbedtls_sha512_init( mbedtls_sha512_context *ctx )
+void mbedtls_sha512_init(mbedtls_sha512_context *ctx)
{
- memset( ctx, 0, sizeof( mbedtls_sha512_context ) );
+ memset(ctx, 0, sizeof(mbedtls_sha512_context));
}
-void mbedtls_sha512_free( mbedtls_sha512_context *ctx )
+void mbedtls_sha512_free(mbedtls_sha512_context *ctx)
{
- if( ctx == NULL )
+ if (ctx == NULL) {
return;
+ }
- mbedtls_platform_zeroize( ctx, sizeof( mbedtls_sha512_context ) );
+ mbedtls_platform_zeroize(ctx, sizeof(mbedtls_sha512_context));
}
-void mbedtls_sha512_clone( mbedtls_sha512_context *dst,
- const mbedtls_sha512_context *src )
+void mbedtls_sha512_clone(mbedtls_sha512_context *dst,
+ const mbedtls_sha512_context *src)
{
*dst = *src;
}
@@ -191,24 +192,26 @@
/*
* SHA-512 context setup
*/
-int mbedtls_sha512_starts( mbedtls_sha512_context *ctx, int is384 )
+int mbedtls_sha512_starts(mbedtls_sha512_context *ctx, int is384)
{
#if defined(MBEDTLS_SHA384_C) && defined(MBEDTLS_SHA512_C)
- if( is384 != 0 && is384 != 1 )
+ if (is384 != 0 && is384 != 1) {
return MBEDTLS_ERR_SHA512_BAD_INPUT_DATA;
+ }
#elif defined(MBEDTLS_SHA512_C)
- if( is384 != 0 )
+ if (is384 != 0) {
return MBEDTLS_ERR_SHA512_BAD_INPUT_DATA;
+ }
#else /* defined MBEDTLS_SHA384_C only */
- if( is384 == 0 )
+ if (is384 == 0) {
return MBEDTLS_ERR_SHA512_BAD_INPUT_DATA;
+ }
#endif
ctx->total[0] = 0;
ctx->total[1] = 0;
- if( is384 == 0 )
- {
+ if (is384 == 0) {
#if defined(MBEDTLS_SHA512_C)
ctx->state[0] = UL64(0x6A09E667F3BCC908);
ctx->state[1] = UL64(0xBB67AE8584CAA73B);
@@ -219,9 +222,7 @@
ctx->state[6] = UL64(0x1F83D9ABFB41BD6B);
ctx->state[7] = UL64(0x5BE0CD19137E2179);
#endif /* MBEDTLS_SHA512_C */
- }
- else
- {
+ } else {
#if defined(MBEDTLS_SHA384_C)
ctx->state[0] = UL64(0xCBBB9D5DC1059ED8);
ctx->state[1] = UL64(0x629A292A367CD507);
@@ -238,7 +239,7 @@
ctx->is384 = is384;
#endif
- return( 0 );
+ return 0;
}
#if !defined(MBEDTLS_SHA512_PROCESS_ALT)
@@ -310,46 +311,45 @@
*/
#if defined(__clang__) && \
- (__clang_major__ < 13 || \
- (__clang_major__ == 13 && __clang_minor__ == 0 && __clang_patchlevel__ == 0))
+ (__clang_major__ < 13 || \
+ (__clang_major__ == 13 && __clang_minor__ == 0 && __clang_patchlevel__ == 0))
static inline uint64x2_t vsha512su0q_u64(uint64x2_t x, uint64x2_t y)
{
- asm( "sha512su0 %0.2D,%1.2D" : "+w" (x) : "w" (y) );
- return( x );
+ asm ("sha512su0 %0.2D,%1.2D" : "+w" (x) : "w" (y));
+ return x;
}
static inline uint64x2_t vsha512su1q_u64(uint64x2_t x, uint64x2_t y, uint64x2_t z)
{
- asm( "sha512su1 %0.2D,%1.2D,%2.2D" : "+w" (x) : "w" (y), "w" (z) );
- return( x );
+ asm ("sha512su1 %0.2D,%1.2D,%2.2D" : "+w" (x) : "w" (y), "w" (z));
+ return x;
}
static inline uint64x2_t vsha512hq_u64(uint64x2_t x, uint64x2_t y, uint64x2_t z)
{
- asm( "sha512h %0,%1,%2.2D" : "+w" (x) : "w" (y), "w" (z) );
- return( x );
+ asm ("sha512h %0,%1,%2.2D" : "+w" (x) : "w" (y), "w" (z));
+ return x;
}
static inline uint64x2_t vsha512h2q_u64(uint64x2_t x, uint64x2_t y, uint64x2_t z)
{
- asm( "sha512h2 %0,%1,%2.2D" : "+w" (x) : "w" (y), "w" (z) );
- return( x );
+ asm ("sha512h2 %0,%1,%2.2D" : "+w" (x) : "w" (y), "w" (z));
+ return x;
}
#endif /* __clang__ etc */
static size_t mbedtls_internal_sha512_process_many_a64_crypto(
- mbedtls_sha512_context *ctx, const uint8_t *msg, size_t len )
+ mbedtls_sha512_context *ctx, const uint8_t *msg, size_t len)
{
- uint64x2_t ab = vld1q_u64( &ctx->state[0] );
- uint64x2_t cd = vld1q_u64( &ctx->state[2] );
- uint64x2_t ef = vld1q_u64( &ctx->state[4] );
- uint64x2_t gh = vld1q_u64( &ctx->state[6] );
+ uint64x2_t ab = vld1q_u64(&ctx->state[0]);
+ uint64x2_t cd = vld1q_u64(&ctx->state[2]);
+ uint64x2_t ef = vld1q_u64(&ctx->state[4]);
+ uint64x2_t gh = vld1q_u64(&ctx->state[6]);
size_t processed = 0;
- for ( ;
- len >= SHA512_BLOCK_SIZE;
- processed += SHA512_BLOCK_SIZE,
- msg += SHA512_BLOCK_SIZE,
- len -= SHA512_BLOCK_SIZE )
- {
+ for (;
+ len >= SHA512_BLOCK_SIZE;
+ processed += SHA512_BLOCK_SIZE,
+ msg += SHA512_BLOCK_SIZE,
+ len -= SHA512_BLOCK_SIZE) {
uint64x2_t initial_sum, sum, intermed;
uint64x2_t ab_orig = ab;
@@ -357,161 +357,160 @@
uint64x2_t ef_orig = ef;
uint64x2_t gh_orig = gh;
- uint64x2_t s0 = (uint64x2_t) vld1q_u8( msg + 16 * 0 );
- uint64x2_t s1 = (uint64x2_t) vld1q_u8( msg + 16 * 1 );
- uint64x2_t s2 = (uint64x2_t) vld1q_u8( msg + 16 * 2 );
- uint64x2_t s3 = (uint64x2_t) vld1q_u8( msg + 16 * 3 );
- uint64x2_t s4 = (uint64x2_t) vld1q_u8( msg + 16 * 4 );
- uint64x2_t s5 = (uint64x2_t) vld1q_u8( msg + 16 * 5 );
- uint64x2_t s6 = (uint64x2_t) vld1q_u8( msg + 16 * 6 );
- uint64x2_t s7 = (uint64x2_t) vld1q_u8( msg + 16 * 7 );
+ uint64x2_t s0 = (uint64x2_t) vld1q_u8(msg + 16 * 0);
+ uint64x2_t s1 = (uint64x2_t) vld1q_u8(msg + 16 * 1);
+ uint64x2_t s2 = (uint64x2_t) vld1q_u8(msg + 16 * 2);
+ uint64x2_t s3 = (uint64x2_t) vld1q_u8(msg + 16 * 3);
+ uint64x2_t s4 = (uint64x2_t) vld1q_u8(msg + 16 * 4);
+ uint64x2_t s5 = (uint64x2_t) vld1q_u8(msg + 16 * 5);
+ uint64x2_t s6 = (uint64x2_t) vld1q_u8(msg + 16 * 6);
+ uint64x2_t s7 = (uint64x2_t) vld1q_u8(msg + 16 * 7);
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ /* assume LE if these not defined; untested on BE */
- s0 = vreinterpretq_u64_u8( vrev64q_u8( vreinterpretq_u8_u64( s0 ) ) );
- s1 = vreinterpretq_u64_u8( vrev64q_u8( vreinterpretq_u8_u64( s1 ) ) );
- s2 = vreinterpretq_u64_u8( vrev64q_u8( vreinterpretq_u8_u64( s2 ) ) );
- s3 = vreinterpretq_u64_u8( vrev64q_u8( vreinterpretq_u8_u64( s3 ) ) );
- s4 = vreinterpretq_u64_u8( vrev64q_u8( vreinterpretq_u8_u64( s4 ) ) );
- s5 = vreinterpretq_u64_u8( vrev64q_u8( vreinterpretq_u8_u64( s5 ) ) );
- s6 = vreinterpretq_u64_u8( vrev64q_u8( vreinterpretq_u8_u64( s6 ) ) );
- s7 = vreinterpretq_u64_u8( vrev64q_u8( vreinterpretq_u8_u64( s7 ) ) );
+ s0 = vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(s0)));
+ s1 = vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(s1)));
+ s2 = vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(s2)));
+ s3 = vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(s3)));
+ s4 = vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(s4)));
+ s5 = vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(s5)));
+ s6 = vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(s6)));
+ s7 = vreinterpretq_u64_u8(vrev64q_u8(vreinterpretq_u8_u64(s7)));
#endif
/* Rounds 0 and 1 */
- initial_sum = vaddq_u64( s0, vld1q_u64( &K[0] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), gh );
- intermed = vsha512hq_u64( sum, vextq_u64( ef, gh, 1 ), vextq_u64( cd, ef, 1 ) );
- gh = vsha512h2q_u64( intermed, cd, ab );
- cd = vaddq_u64( cd, intermed );
+ initial_sum = vaddq_u64(s0, vld1q_u64(&K[0]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), gh);
+ intermed = vsha512hq_u64(sum, vextq_u64(ef, gh, 1), vextq_u64(cd, ef, 1));
+ gh = vsha512h2q_u64(intermed, cd, ab);
+ cd = vaddq_u64(cd, intermed);
/* Rounds 2 and 3 */
- initial_sum = vaddq_u64( s1, vld1q_u64( &K[2] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), ef );
- intermed = vsha512hq_u64( sum, vextq_u64( cd, ef, 1 ), vextq_u64( ab, cd, 1 ) );
- ef = vsha512h2q_u64( intermed, ab, gh );
- ab = vaddq_u64( ab, intermed );
+ initial_sum = vaddq_u64(s1, vld1q_u64(&K[2]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), ef);
+ intermed = vsha512hq_u64(sum, vextq_u64(cd, ef, 1), vextq_u64(ab, cd, 1));
+ ef = vsha512h2q_u64(intermed, ab, gh);
+ ab = vaddq_u64(ab, intermed);
/* Rounds 4 and 5 */
- initial_sum = vaddq_u64( s2, vld1q_u64( &K[4] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), cd );
- intermed = vsha512hq_u64( sum, vextq_u64( ab, cd, 1 ), vextq_u64( gh, ab, 1 ) );
- cd = vsha512h2q_u64( intermed, gh, ef );
- gh = vaddq_u64( gh, intermed );
+ initial_sum = vaddq_u64(s2, vld1q_u64(&K[4]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), cd);
+ intermed = vsha512hq_u64(sum, vextq_u64(ab, cd, 1), vextq_u64(gh, ab, 1));
+ cd = vsha512h2q_u64(intermed, gh, ef);
+ gh = vaddq_u64(gh, intermed);
/* Rounds 6 and 7 */
- initial_sum = vaddq_u64( s3, vld1q_u64( &K[6] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), ab );
- intermed = vsha512hq_u64( sum, vextq_u64( gh, ab, 1 ), vextq_u64( ef, gh, 1 ) );
- ab = vsha512h2q_u64( intermed, ef, cd );
- ef = vaddq_u64( ef, intermed );
+ initial_sum = vaddq_u64(s3, vld1q_u64(&K[6]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), ab);
+ intermed = vsha512hq_u64(sum, vextq_u64(gh, ab, 1), vextq_u64(ef, gh, 1));
+ ab = vsha512h2q_u64(intermed, ef, cd);
+ ef = vaddq_u64(ef, intermed);
/* Rounds 8 and 9 */
- initial_sum = vaddq_u64( s4, vld1q_u64( &K[8] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), gh );
- intermed = vsha512hq_u64( sum, vextq_u64( ef, gh, 1 ), vextq_u64( cd, ef, 1 ) );
- gh = vsha512h2q_u64( intermed, cd, ab );
- cd = vaddq_u64( cd, intermed );
+ initial_sum = vaddq_u64(s4, vld1q_u64(&K[8]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), gh);
+ intermed = vsha512hq_u64(sum, vextq_u64(ef, gh, 1), vextq_u64(cd, ef, 1));
+ gh = vsha512h2q_u64(intermed, cd, ab);
+ cd = vaddq_u64(cd, intermed);
/* Rounds 10 and 11 */
- initial_sum = vaddq_u64( s5, vld1q_u64( &K[10] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), ef );
- intermed = vsha512hq_u64( sum, vextq_u64( cd, ef, 1 ), vextq_u64( ab, cd, 1 ) );
- ef = vsha512h2q_u64( intermed, ab, gh );
- ab = vaddq_u64( ab, intermed );
+ initial_sum = vaddq_u64(s5, vld1q_u64(&K[10]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), ef);
+ intermed = vsha512hq_u64(sum, vextq_u64(cd, ef, 1), vextq_u64(ab, cd, 1));
+ ef = vsha512h2q_u64(intermed, ab, gh);
+ ab = vaddq_u64(ab, intermed);
/* Rounds 12 and 13 */
- initial_sum = vaddq_u64( s6, vld1q_u64( &K[12] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), cd );
- intermed = vsha512hq_u64( sum, vextq_u64( ab, cd, 1 ), vextq_u64( gh, ab, 1 ) );
- cd = vsha512h2q_u64( intermed, gh, ef );
- gh = vaddq_u64( gh, intermed );
+ initial_sum = vaddq_u64(s6, vld1q_u64(&K[12]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), cd);
+ intermed = vsha512hq_u64(sum, vextq_u64(ab, cd, 1), vextq_u64(gh, ab, 1));
+ cd = vsha512h2q_u64(intermed, gh, ef);
+ gh = vaddq_u64(gh, intermed);
/* Rounds 14 and 15 */
- initial_sum = vaddq_u64( s7, vld1q_u64( &K[14] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), ab );
- intermed = vsha512hq_u64( sum, vextq_u64( gh, ab, 1 ), vextq_u64( ef, gh, 1 ) );
- ab = vsha512h2q_u64( intermed, ef, cd );
- ef = vaddq_u64( ef, intermed );
+ initial_sum = vaddq_u64(s7, vld1q_u64(&K[14]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), ab);
+ intermed = vsha512hq_u64(sum, vextq_u64(gh, ab, 1), vextq_u64(ef, gh, 1));
+ ab = vsha512h2q_u64(intermed, ef, cd);
+ ef = vaddq_u64(ef, intermed);
- for ( unsigned int t = 16; t < 80; t += 16 )
- {
+ for (unsigned int t = 16; t < 80; t += 16) {
/* Rounds t and t + 1 */
- s0 = vsha512su1q_u64( vsha512su0q_u64( s0, s1 ), s7, vextq_u64( s4, s5, 1 ) );
- initial_sum = vaddq_u64( s0, vld1q_u64( &K[t] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), gh );
- intermed = vsha512hq_u64( sum, vextq_u64( ef, gh, 1 ), vextq_u64( cd, ef, 1 ) );
- gh = vsha512h2q_u64( intermed, cd, ab );
- cd = vaddq_u64( cd, intermed );
+ s0 = vsha512su1q_u64(vsha512su0q_u64(s0, s1), s7, vextq_u64(s4, s5, 1));
+ initial_sum = vaddq_u64(s0, vld1q_u64(&K[t]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), gh);
+ intermed = vsha512hq_u64(sum, vextq_u64(ef, gh, 1), vextq_u64(cd, ef, 1));
+ gh = vsha512h2q_u64(intermed, cd, ab);
+ cd = vaddq_u64(cd, intermed);
/* Rounds t + 2 and t + 3 */
- s1 = vsha512su1q_u64( vsha512su0q_u64( s1, s2 ), s0, vextq_u64( s5, s6, 1 ) );
- initial_sum = vaddq_u64( s1, vld1q_u64( &K[t + 2] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), ef );
- intermed = vsha512hq_u64( sum, vextq_u64( cd, ef, 1 ), vextq_u64( ab, cd, 1 ) );
- ef = vsha512h2q_u64( intermed, ab, gh );
- ab = vaddq_u64( ab, intermed );
+ s1 = vsha512su1q_u64(vsha512su0q_u64(s1, s2), s0, vextq_u64(s5, s6, 1));
+ initial_sum = vaddq_u64(s1, vld1q_u64(&K[t + 2]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), ef);
+ intermed = vsha512hq_u64(sum, vextq_u64(cd, ef, 1), vextq_u64(ab, cd, 1));
+ ef = vsha512h2q_u64(intermed, ab, gh);
+ ab = vaddq_u64(ab, intermed);
/* Rounds t + 4 and t + 5 */
- s2 = vsha512su1q_u64( vsha512su0q_u64( s2, s3 ), s1, vextq_u64( s6, s7, 1 ) );
- initial_sum = vaddq_u64( s2, vld1q_u64( &K[t + 4] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), cd );
- intermed = vsha512hq_u64( sum, vextq_u64( ab, cd, 1 ), vextq_u64( gh, ab, 1 ) );
- cd = vsha512h2q_u64( intermed, gh, ef );
- gh = vaddq_u64( gh, intermed );
+ s2 = vsha512su1q_u64(vsha512su0q_u64(s2, s3), s1, vextq_u64(s6, s7, 1));
+ initial_sum = vaddq_u64(s2, vld1q_u64(&K[t + 4]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), cd);
+ intermed = vsha512hq_u64(sum, vextq_u64(ab, cd, 1), vextq_u64(gh, ab, 1));
+ cd = vsha512h2q_u64(intermed, gh, ef);
+ gh = vaddq_u64(gh, intermed);
/* Rounds t + 6 and t + 7 */
- s3 = vsha512su1q_u64( vsha512su0q_u64( s3, s4 ), s2, vextq_u64( s7, s0, 1 ) );
- initial_sum = vaddq_u64( s3, vld1q_u64( &K[t + 6] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), ab );
- intermed = vsha512hq_u64( sum, vextq_u64( gh, ab, 1 ), vextq_u64( ef, gh, 1 ) );
- ab = vsha512h2q_u64( intermed, ef, cd );
- ef = vaddq_u64( ef, intermed );
+ s3 = vsha512su1q_u64(vsha512su0q_u64(s3, s4), s2, vextq_u64(s7, s0, 1));
+ initial_sum = vaddq_u64(s3, vld1q_u64(&K[t + 6]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), ab);
+ intermed = vsha512hq_u64(sum, vextq_u64(gh, ab, 1), vextq_u64(ef, gh, 1));
+ ab = vsha512h2q_u64(intermed, ef, cd);
+ ef = vaddq_u64(ef, intermed);
/* Rounds t + 8 and t + 9 */
- s4 = vsha512su1q_u64( vsha512su0q_u64( s4, s5 ), s3, vextq_u64( s0, s1, 1 ) );
- initial_sum = vaddq_u64( s4, vld1q_u64( &K[t + 8] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), gh );
- intermed = vsha512hq_u64( sum, vextq_u64( ef, gh, 1 ), vextq_u64( cd, ef, 1 ) );
- gh = vsha512h2q_u64( intermed, cd, ab );
- cd = vaddq_u64( cd, intermed );
+ s4 = vsha512su1q_u64(vsha512su0q_u64(s4, s5), s3, vextq_u64(s0, s1, 1));
+ initial_sum = vaddq_u64(s4, vld1q_u64(&K[t + 8]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), gh);
+ intermed = vsha512hq_u64(sum, vextq_u64(ef, gh, 1), vextq_u64(cd, ef, 1));
+ gh = vsha512h2q_u64(intermed, cd, ab);
+ cd = vaddq_u64(cd, intermed);
/* Rounds t + 10 and t + 11 */
- s5 = vsha512su1q_u64( vsha512su0q_u64( s5, s6 ), s4, vextq_u64( s1, s2, 1 ) );
- initial_sum = vaddq_u64( s5, vld1q_u64( &K[t + 10] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), ef );
- intermed = vsha512hq_u64( sum, vextq_u64( cd, ef, 1 ), vextq_u64( ab, cd, 1 ) );
- ef = vsha512h2q_u64( intermed, ab, gh );
- ab = vaddq_u64( ab, intermed );
+ s5 = vsha512su1q_u64(vsha512su0q_u64(s5, s6), s4, vextq_u64(s1, s2, 1));
+ initial_sum = vaddq_u64(s5, vld1q_u64(&K[t + 10]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), ef);
+ intermed = vsha512hq_u64(sum, vextq_u64(cd, ef, 1), vextq_u64(ab, cd, 1));
+ ef = vsha512h2q_u64(intermed, ab, gh);
+ ab = vaddq_u64(ab, intermed);
/* Rounds t + 12 and t + 13 */
- s6 = vsha512su1q_u64( vsha512su0q_u64( s6, s7 ), s5, vextq_u64( s2, s3, 1 ) );
- initial_sum = vaddq_u64( s6, vld1q_u64( &K[t + 12] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), cd );
- intermed = vsha512hq_u64( sum, vextq_u64( ab, cd, 1 ), vextq_u64( gh, ab, 1 ) );
- cd = vsha512h2q_u64( intermed, gh, ef );
- gh = vaddq_u64( gh, intermed );
+ s6 = vsha512su1q_u64(vsha512su0q_u64(s6, s7), s5, vextq_u64(s2, s3, 1));
+ initial_sum = vaddq_u64(s6, vld1q_u64(&K[t + 12]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), cd);
+ intermed = vsha512hq_u64(sum, vextq_u64(ab, cd, 1), vextq_u64(gh, ab, 1));
+ cd = vsha512h2q_u64(intermed, gh, ef);
+ gh = vaddq_u64(gh, intermed);
/* Rounds t + 14 and t + 15 */
- s7 = vsha512su1q_u64( vsha512su0q_u64( s7, s0 ), s6, vextq_u64( s3, s4, 1 ) );
- initial_sum = vaddq_u64( s7, vld1q_u64( &K[t + 14] ) );
- sum = vaddq_u64( vextq_u64( initial_sum, initial_sum, 1 ), ab );
- intermed = vsha512hq_u64( sum, vextq_u64( gh, ab, 1 ), vextq_u64( ef, gh, 1 ) );
- ab = vsha512h2q_u64( intermed, ef, cd );
- ef = vaddq_u64( ef, intermed );
+ s7 = vsha512su1q_u64(vsha512su0q_u64(s7, s0), s6, vextq_u64(s3, s4, 1));
+ initial_sum = vaddq_u64(s7, vld1q_u64(&K[t + 14]));
+ sum = vaddq_u64(vextq_u64(initial_sum, initial_sum, 1), ab);
+ intermed = vsha512hq_u64(sum, vextq_u64(gh, ab, 1), vextq_u64(ef, gh, 1));
+ ab = vsha512h2q_u64(intermed, ef, cd);
+ ef = vaddq_u64(ef, intermed);
}
- ab = vaddq_u64( ab, ab_orig );
- cd = vaddq_u64( cd, cd_orig );
- ef = vaddq_u64( ef, ef_orig );
- gh = vaddq_u64( gh, gh_orig );
+ ab = vaddq_u64(ab, ab_orig);
+ cd = vaddq_u64(cd, cd_orig);
+ ef = vaddq_u64(ef, ef_orig);
+ gh = vaddq_u64(gh, gh_orig);
}
- vst1q_u64( &ctx->state[0], ab );
- vst1q_u64( &ctx->state[2], cd );
- vst1q_u64( &ctx->state[4], ef );
- vst1q_u64( &ctx->state[6], gh );
+ vst1q_u64(&ctx->state[0], ab);
+ vst1q_u64(&ctx->state[2], cd);
+ vst1q_u64(&ctx->state[4], ef);
+ vst1q_u64(&ctx->state[6], gh);
- return( processed );
+ return processed;
}
#if defined(MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT)
@@ -521,11 +520,12 @@
*/
static
#endif
-int mbedtls_internal_sha512_process_a64_crypto( mbedtls_sha512_context *ctx,
- const unsigned char data[SHA512_BLOCK_SIZE] )
+int mbedtls_internal_sha512_process_a64_crypto(mbedtls_sha512_context *ctx,
+ const unsigned char data[SHA512_BLOCK_SIZE])
{
- return( mbedtls_internal_sha512_process_many_a64_crypto( ctx, data,
- SHA512_BLOCK_SIZE ) == SHA512_BLOCK_SIZE ) ? 0 : -1;
+ return (mbedtls_internal_sha512_process_many_a64_crypto(ctx, data,
+ SHA512_BLOCK_SIZE) ==
+ SHA512_BLOCK_SIZE) ? 0 : -1;
}
#endif /* MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT || MBEDTLS_SHA512_USE_A64_CRYPTO_ONLY */
@@ -546,54 +546,50 @@
*/
static
#endif
-int mbedtls_internal_sha512_process_c( mbedtls_sha512_context *ctx,
- const unsigned char data[SHA512_BLOCK_SIZE] )
+int mbedtls_internal_sha512_process_c(mbedtls_sha512_context *ctx,
+ const unsigned char data[SHA512_BLOCK_SIZE])
{
int i;
- struct
- {
+ struct {
uint64_t temp1, temp2, W[80];
uint64_t A[8];
} local;
-#define SHR(x,n) ((x) >> (n))
-#define ROTR(x,n) (SHR((x),(n)) | ((x) << (64 - (n))))
+#define SHR(x, n) ((x) >> (n))
+#define ROTR(x, n) (SHR((x), (n)) | ((x) << (64 - (n))))
#define S0(x) (ROTR(x, 1) ^ ROTR(x, 8) ^ SHR(x, 7))
-#define S1(x) (ROTR(x,19) ^ ROTR(x,61) ^ SHR(x, 6))
+#define S1(x) (ROTR(x, 19) ^ ROTR(x, 61) ^ SHR(x, 6))
-#define S2(x) (ROTR(x,28) ^ ROTR(x,34) ^ ROTR(x,39))
-#define S3(x) (ROTR(x,14) ^ ROTR(x,18) ^ ROTR(x,41))
+#define S2(x) (ROTR(x, 28) ^ ROTR(x, 34) ^ ROTR(x, 39))
+#define S3(x) (ROTR(x, 14) ^ ROTR(x, 18) ^ ROTR(x, 41))
-#define F0(x,y,z) (((x) & (y)) | ((z) & ((x) | (y))))
-#define F1(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
+#define F0(x, y, z) (((x) & (y)) | ((z) & ((x) | (y))))
+#define F1(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
-#define P(a,b,c,d,e,f,g,h,x,K) \
+#define P(a, b, c, d, e, f, g, h, x, K) \
do \
{ \
- local.temp1 = (h) + S3(e) + F1((e),(f),(g)) + (K) + (x); \
- local.temp2 = S2(a) + F0((a),(b),(c)); \
+ local.temp1 = (h) + S3(e) + F1((e), (f), (g)) + (K) + (x); \
+ local.temp2 = S2(a) + F0((a), (b), (c)); \
(d) += local.temp1; (h) = local.temp1 + local.temp2; \
- } while( 0 )
+ } while (0)
- for( i = 0; i < 8; i++ )
+ for (i = 0; i < 8; i++) {
local.A[i] = ctx->state[i];
+ }
#if defined(MBEDTLS_SHA512_SMALLER)
- for( i = 0; i < 80; i++ )
- {
- if( i < 16 )
- {
- local.W[i] = MBEDTLS_GET_UINT64_BE( data, i << 3 );
- }
- else
- {
+ for (i = 0; i < 80; i++) {
+ if (i < 16) {
+ local.W[i] = MBEDTLS_GET_UINT64_BE(data, i << 3);
+ } else {
local.W[i] = S1(local.W[i - 2]) + local.W[i - 7] +
- S0(local.W[i - 15]) + local.W[i - 16];
+ S0(local.W[i - 15]) + local.W[i - 16];
}
- P( local.A[0], local.A[1], local.A[2], local.A[3], local.A[4],
- local.A[5], local.A[6], local.A[7], local.W[i], K[i] );
+ P(local.A[0], local.A[1], local.A[2], local.A[3], local.A[4],
+ local.A[5], local.A[6], local.A[7], local.W[i], K[i]);
local.temp1 = local.A[7]; local.A[7] = local.A[6];
local.A[6] = local.A[5]; local.A[5] = local.A[4];
@@ -602,47 +598,44 @@
local.A[0] = local.temp1;
}
#else /* MBEDTLS_SHA512_SMALLER */
- for( i = 0; i < 16; i++ )
- {
- local.W[i] = MBEDTLS_GET_UINT64_BE( data, i << 3 );
+ for (i = 0; i < 16; i++) {
+ local.W[i] = MBEDTLS_GET_UINT64_BE(data, i << 3);
}
- for( ; i < 80; i++ )
- {
+ for (; i < 80; i++) {
local.W[i] = S1(local.W[i - 2]) + local.W[i - 7] +
- S0(local.W[i - 15]) + local.W[i - 16];
+ S0(local.W[i - 15]) + local.W[i - 16];
}
i = 0;
- do
- {
- P( local.A[0], local.A[1], local.A[2], local.A[3], local.A[4],
- local.A[5], local.A[6], local.A[7], local.W[i], K[i] ); i++;
- P( local.A[7], local.A[0], local.A[1], local.A[2], local.A[3],
- local.A[4], local.A[5], local.A[6], local.W[i], K[i] ); i++;
- P( local.A[6], local.A[7], local.A[0], local.A[1], local.A[2],
- local.A[3], local.A[4], local.A[5], local.W[i], K[i] ); i++;
- P( local.A[5], local.A[6], local.A[7], local.A[0], local.A[1],
- local.A[2], local.A[3], local.A[4], local.W[i], K[i] ); i++;
- P( local.A[4], local.A[5], local.A[6], local.A[7], local.A[0],
- local.A[1], local.A[2], local.A[3], local.W[i], K[i] ); i++;
- P( local.A[3], local.A[4], local.A[5], local.A[6], local.A[7],
- local.A[0], local.A[1], local.A[2], local.W[i], K[i] ); i++;
- P( local.A[2], local.A[3], local.A[4], local.A[5], local.A[6],
- local.A[7], local.A[0], local.A[1], local.W[i], K[i] ); i++;
- P( local.A[1], local.A[2], local.A[3], local.A[4], local.A[5],
- local.A[6], local.A[7], local.A[0], local.W[i], K[i] ); i++;
- }
- while( i < 80 );
+ do {
+ P(local.A[0], local.A[1], local.A[2], local.A[3], local.A[4],
+ local.A[5], local.A[6], local.A[7], local.W[i], K[i]); i++;
+ P(local.A[7], local.A[0], local.A[1], local.A[2], local.A[3],
+ local.A[4], local.A[5], local.A[6], local.W[i], K[i]); i++;
+ P(local.A[6], local.A[7], local.A[0], local.A[1], local.A[2],
+ local.A[3], local.A[4], local.A[5], local.W[i], K[i]); i++;
+ P(local.A[5], local.A[6], local.A[7], local.A[0], local.A[1],
+ local.A[2], local.A[3], local.A[4], local.W[i], K[i]); i++;
+ P(local.A[4], local.A[5], local.A[6], local.A[7], local.A[0],
+ local.A[1], local.A[2], local.A[3], local.W[i], K[i]); i++;
+ P(local.A[3], local.A[4], local.A[5], local.A[6], local.A[7],
+ local.A[0], local.A[1], local.A[2], local.W[i], K[i]); i++;
+ P(local.A[2], local.A[3], local.A[4], local.A[5], local.A[6],
+ local.A[7], local.A[0], local.A[1], local.W[i], K[i]); i++;
+ P(local.A[1], local.A[2], local.A[3], local.A[4], local.A[5],
+ local.A[6], local.A[7], local.A[0], local.W[i], K[i]); i++;
+ } while (i < 80);
#endif /* MBEDTLS_SHA512_SMALLER */
- for( i = 0; i < 8; i++ )
+ for (i = 0; i < 8; i++) {
ctx->state[i] += local.A[i];
+ }
/* Zeroise buffers and variables to clear sensitive data from memory. */
- mbedtls_platform_zeroize( &local, sizeof( local ) );
+ mbedtls_platform_zeroize(&local, sizeof(local));
- return( 0 );
+ return 0;
}
#endif /* !MBEDTLS_SHA512_PROCESS_ALT && !MBEDTLS_SHA512_USE_A64_CRYPTO_ONLY */
@@ -651,14 +644,14 @@
#if !defined(MBEDTLS_SHA512_USE_A64_CRYPTO_ONLY)
static size_t mbedtls_internal_sha512_process_many_c(
- mbedtls_sha512_context *ctx, const uint8_t *data, size_t len)
+ mbedtls_sha512_context *ctx, const uint8_t *data, size_t len)
{
size_t processed = 0;
- while( len >= SHA512_BLOCK_SIZE )
- {
- if( mbedtls_internal_sha512_process_c( ctx, data ) != 0)
- return( 0 );
+ while (len >= SHA512_BLOCK_SIZE) {
+ if (mbedtls_internal_sha512_process_c(ctx, data) != 0) {
+ return 0;
+ }
data += SHA512_BLOCK_SIZE;
len -= SHA512_BLOCK_SIZE;
@@ -666,7 +659,7 @@
processed += SHA512_BLOCK_SIZE;
}
- return( processed );
+ return processed;
}
#endif /* !MBEDTLS_SHA512_USE_A64_CRYPTO_ONLY */
@@ -674,36 +667,37 @@
#if defined(MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT)
-static int mbedtls_a64_crypto_sha512_has_support( void )
+static int mbedtls_a64_crypto_sha512_has_support(void)
{
static int done = 0;
static int supported = 0;
- if( !done )
- {
+ if (!done) {
supported = mbedtls_a64_crypto_sha512_determine_support();
done = 1;
}
- return( supported );
+ return supported;
}
-static size_t mbedtls_internal_sha512_process_many( mbedtls_sha512_context *ctx,
- const uint8_t *msg, size_t len )
+static size_t mbedtls_internal_sha512_process_many(mbedtls_sha512_context *ctx,
+ const uint8_t *msg, size_t len)
{
- if( mbedtls_a64_crypto_sha512_has_support() )
- return( mbedtls_internal_sha512_process_many_a64_crypto( ctx, msg, len ) );
- else
- return( mbedtls_internal_sha512_process_many_c( ctx, msg, len ) );
+ if (mbedtls_a64_crypto_sha512_has_support()) {
+ return mbedtls_internal_sha512_process_many_a64_crypto(ctx, msg, len);
+ } else {
+ return mbedtls_internal_sha512_process_many_c(ctx, msg, len);
+ }
}
-int mbedtls_internal_sha512_process( mbedtls_sha512_context *ctx,
- const unsigned char data[SHA512_BLOCK_SIZE] )
+int mbedtls_internal_sha512_process(mbedtls_sha512_context *ctx,
+ const unsigned char data[SHA512_BLOCK_SIZE])
{
- if( mbedtls_a64_crypto_sha512_has_support() )
- return( mbedtls_internal_sha512_process_a64_crypto( ctx, data ) );
- else
- return( mbedtls_internal_sha512_process_c( ctx, data ) );
+ if (mbedtls_a64_crypto_sha512_has_support()) {
+ return mbedtls_internal_sha512_process_a64_crypto(ctx, data);
+ } else {
+ return mbedtls_internal_sha512_process_c(ctx, data);
+ }
}
#endif /* MBEDTLS_SHA512_USE_A64_CRYPTO_IF_PRESENT */
@@ -711,59 +705,62 @@
/*
* SHA-512 process buffer
*/
-int mbedtls_sha512_update( mbedtls_sha512_context *ctx,
- const unsigned char *input,
- size_t ilen )
+int mbedtls_sha512_update(mbedtls_sha512_context *ctx,
+ const unsigned char *input,
+ size_t ilen)
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t fill;
unsigned int left;
- if( ilen == 0 )
- return( 0 );
+ if (ilen == 0) {
+ return 0;
+ }
left = (unsigned int) (ctx->total[0] & 0x7F);
fill = SHA512_BLOCK_SIZE - left;
ctx->total[0] += (uint64_t) ilen;
- if( ctx->total[0] < (uint64_t) ilen )
+ if (ctx->total[0] < (uint64_t) ilen) {
ctx->total[1]++;
+ }
- if( left && ilen >= fill )
- {
- memcpy( (void *) (ctx->buffer + left), input, fill );
+ if (left && ilen >= fill) {
+ memcpy((void *) (ctx->buffer + left), input, fill);
- if( ( ret = mbedtls_internal_sha512_process( ctx, ctx->buffer ) ) != 0 )
- return( ret );
+ if ((ret = mbedtls_internal_sha512_process(ctx, ctx->buffer)) != 0) {
+ return ret;
+ }
input += fill;
ilen -= fill;
left = 0;
}
- while( ilen >= SHA512_BLOCK_SIZE )
- {
+ while (ilen >= SHA512_BLOCK_SIZE) {
size_t processed =
- mbedtls_internal_sha512_process_many( ctx, input, ilen );
- if( processed < SHA512_BLOCK_SIZE )
- return( MBEDTLS_ERR_ERROR_GENERIC_ERROR );
+ mbedtls_internal_sha512_process_many(ctx, input, ilen);
+ if (processed < SHA512_BLOCK_SIZE) {
+ return MBEDTLS_ERR_ERROR_GENERIC_ERROR;
+ }
input += processed;
ilen -= processed;
}
- if( ilen > 0 )
- memcpy( (void *) (ctx->buffer + left), input, ilen );
+ if (ilen > 0) {
+ memcpy((void *) (ctx->buffer + left), input, ilen);
+ }
- return( 0 );
+ return 0;
}
/*
* SHA-512 final digest
*/
-int mbedtls_sha512_finish( mbedtls_sha512_context *ctx,
- unsigned char *output )
+int mbedtls_sha512_finish(mbedtls_sha512_context *ctx,
+ unsigned char *output)
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
unsigned used;
@@ -776,56 +773,54 @@
ctx->buffer[used++] = 0x80;
- if( used <= 112 )
- {
+ if (used <= 112) {
/* Enough room for padding + length in current block */
- memset( ctx->buffer + used, 0, 112 - used );
- }
- else
- {
+ memset(ctx->buffer + used, 0, 112 - used);
+ } else {
/* We'll need an extra block */
- memset( ctx->buffer + used, 0, SHA512_BLOCK_SIZE - used );
+ memset(ctx->buffer + used, 0, SHA512_BLOCK_SIZE - used);
- if( ( ret = mbedtls_internal_sha512_process( ctx, ctx->buffer ) ) != 0 )
- return( ret );
+ if ((ret = mbedtls_internal_sha512_process(ctx, ctx->buffer)) != 0) {
+ return ret;
+ }
- memset( ctx->buffer, 0, 112 );
+ memset(ctx->buffer, 0, 112);
}
/*
* Add message length
*/
- high = ( ctx->total[0] >> 61 )
- | ( ctx->total[1] << 3 );
- low = ( ctx->total[0] << 3 );
+ high = (ctx->total[0] >> 61)
+ | (ctx->total[1] << 3);
+ low = (ctx->total[0] << 3);
- sha512_put_uint64_be( high, ctx->buffer, 112 );
- sha512_put_uint64_be( low, ctx->buffer, 120 );
+ sha512_put_uint64_be(high, ctx->buffer, 112);
+ sha512_put_uint64_be(low, ctx->buffer, 120);
- if( ( ret = mbedtls_internal_sha512_process( ctx, ctx->buffer ) ) != 0 )
- return( ret );
+ if ((ret = mbedtls_internal_sha512_process(ctx, ctx->buffer)) != 0) {
+ return ret;
+ }
/*
* Output final state
*/
- sha512_put_uint64_be( ctx->state[0], output, 0 );
- sha512_put_uint64_be( ctx->state[1], output, 8 );
- sha512_put_uint64_be( ctx->state[2], output, 16 );
- sha512_put_uint64_be( ctx->state[3], output, 24 );
- sha512_put_uint64_be( ctx->state[4], output, 32 );
- sha512_put_uint64_be( ctx->state[5], output, 40 );
+ sha512_put_uint64_be(ctx->state[0], output, 0);
+ sha512_put_uint64_be(ctx->state[1], output, 8);
+ sha512_put_uint64_be(ctx->state[2], output, 16);
+ sha512_put_uint64_be(ctx->state[3], output, 24);
+ sha512_put_uint64_be(ctx->state[4], output, 32);
+ sha512_put_uint64_be(ctx->state[5], output, 40);
int truncated = 0;
#if defined(MBEDTLS_SHA384_C)
truncated = ctx->is384;
#endif
- if( !truncated )
- {
- sha512_put_uint64_be( ctx->state[6], output, 48 );
- sha512_put_uint64_be( ctx->state[7], output, 56 );
+ if (!truncated) {
+ sha512_put_uint64_be(ctx->state[6], output, 48);
+ sha512_put_uint64_be(ctx->state[7], output, 56);
}
- return( 0 );
+ return 0;
}
#endif /* !MBEDTLS_SHA512_ALT */
@@ -833,40 +828,46 @@
/*
* output = SHA-512( input buffer )
*/
-int mbedtls_sha512( const unsigned char *input,
- size_t ilen,
- unsigned char *output,
- int is384 )
+int mbedtls_sha512(const unsigned char *input,
+ size_t ilen,
+ unsigned char *output,
+ int is384)
{
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
mbedtls_sha512_context ctx;
#if defined(MBEDTLS_SHA384_C) && defined(MBEDTLS_SHA512_C)
- if( is384 != 0 && is384 != 1 )
+ if (is384 != 0 && is384 != 1) {
return MBEDTLS_ERR_SHA512_BAD_INPUT_DATA;
+ }
#elif defined(MBEDTLS_SHA512_C)
- if( is384 != 0 )
+ if (is384 != 0) {
return MBEDTLS_ERR_SHA512_BAD_INPUT_DATA;
+ }
#else /* defined MBEDTLS_SHA384_C only */
- if( is384 == 0 )
+ if (is384 == 0) {
return MBEDTLS_ERR_SHA512_BAD_INPUT_DATA;
+ }
#endif
- mbedtls_sha512_init( &ctx );
+ mbedtls_sha512_init(&ctx);
- if( ( ret = mbedtls_sha512_starts( &ctx, is384 ) ) != 0 )
+ if ((ret = mbedtls_sha512_starts(&ctx, is384)) != 0) {
goto exit;
+ }
- if( ( ret = mbedtls_sha512_update( &ctx, input, ilen ) ) != 0 )
+ if ((ret = mbedtls_sha512_update(&ctx, input, ilen)) != 0) {
goto exit;
+ }
- if( ( ret = mbedtls_sha512_finish( &ctx, output ) ) != 0 )
+ if ((ret = mbedtls_sha512_finish(&ctx, output)) != 0) {
goto exit;
+ }
exit:
- mbedtls_sha512_free( &ctx );
+ mbedtls_sha512_free(&ctx);
- return( ret );
+ return ret;
}
#if defined(MBEDTLS_SELF_TEST)
@@ -877,7 +878,9 @@
static const unsigned char sha_test_buf[3][113] =
{
{ "abc" },
- { "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" },
+ {
+ "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"
+ },
{ "" }
};
@@ -948,9 +951,9 @@
};
#endif /* MBEDTLS_SHA512_C */
-#define ARRAY_LENGTH( a ) ( sizeof( a ) / sizeof( ( a )[0] ) )
+#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
-static int mbedtls_sha512_common_self_test( int verbose, int is384 )
+static int mbedtls_sha512_common_self_test(int verbose, int is384)
{
int i, buflen, ret = 0;
unsigned char *buf;
@@ -958,91 +961,93 @@
mbedtls_sha512_context ctx;
#if defined(MBEDTLS_SHA384_C) && defined(MBEDTLS_SHA512_C)
- sha_test_sum_t* sha_test_sum = ( is384 ) ? sha384_test_sum : sha512_test_sum;
+ sha_test_sum_t *sha_test_sum = (is384) ? sha384_test_sum : sha512_test_sum;
#elif defined(MBEDTLS_SHA512_C)
- sha_test_sum_t* sha_test_sum = sha512_test_sum;
+ sha_test_sum_t *sha_test_sum = sha512_test_sum;
#else
- sha_test_sum_t* sha_test_sum = sha384_test_sum;
+ sha_test_sum_t *sha_test_sum = sha384_test_sum;
#endif
- buf = mbedtls_calloc( 1024, sizeof(unsigned char) );
- if( NULL == buf )
- {
- if( verbose != 0 )
- mbedtls_printf( "Buffer allocation failed\n" );
+ buf = mbedtls_calloc(1024, sizeof(unsigned char));
+ if (NULL == buf) {
+ if (verbose != 0) {
+ mbedtls_printf("Buffer allocation failed\n");
+ }
- return( 1 );
+ return 1;
}
- mbedtls_sha512_init( &ctx );
+ mbedtls_sha512_init(&ctx);
- for( i = 0; i < 3; i++ )
- {
- if( verbose != 0 )
- mbedtls_printf( " SHA-%d test #%d: ", 512 - is384 * 128, i + 1 );
+ for (i = 0; i < 3; i++) {
+ if (verbose != 0) {
+ mbedtls_printf(" SHA-%d test #%d: ", 512 - is384 * 128, i + 1);
+ }
- if( ( ret = mbedtls_sha512_starts( &ctx, is384 ) ) != 0 )
+ if ((ret = mbedtls_sha512_starts(&ctx, is384)) != 0) {
goto fail;
+ }
- if( i == 2 )
- {
- memset( buf, 'a', buflen = 1000 );
+ if (i == 2) {
+ memset(buf, 'a', buflen = 1000);
- for( int j = 0; j < 1000; j++ )
- {
- ret = mbedtls_sha512_update( &ctx, buf, buflen );
- if( ret != 0 )
+ for (int j = 0; j < 1000; j++) {
+ ret = mbedtls_sha512_update(&ctx, buf, buflen);
+ if (ret != 0) {
goto fail;
+ }
+ }
+ } else {
+ ret = mbedtls_sha512_update(&ctx, sha_test_buf[i],
+ sha_test_buflen[i]);
+ if (ret != 0) {
+ goto fail;
}
}
- else
- {
- ret = mbedtls_sha512_update( &ctx, sha_test_buf[i],
- sha_test_buflen[i] );
- if( ret != 0 )
- goto fail;
+
+ if ((ret = mbedtls_sha512_finish(&ctx, sha512sum)) != 0) {
+ goto fail;
}
- if( ( ret = mbedtls_sha512_finish( &ctx, sha512sum ) ) != 0 )
- goto fail;
-
- if( memcmp( sha512sum, sha_test_sum[i], 64 - is384 * 16 ) != 0 )
- {
+ if (memcmp(sha512sum, sha_test_sum[i], 64 - is384 * 16) != 0) {
ret = 1;
goto fail;
}
- if( verbose != 0 )
- mbedtls_printf( "passed\n" );
+ if (verbose != 0) {
+ mbedtls_printf("passed\n");
+ }
}
- if( verbose != 0 )
- mbedtls_printf( "\n" );
+ if (verbose != 0) {
+ mbedtls_printf("\n");
+ }
goto exit;
fail:
- if( verbose != 0 )
- mbedtls_printf( "failed\n" );
+ if (verbose != 0) {
+ mbedtls_printf("failed\n");
+ }
exit:
- mbedtls_sha512_free( &ctx );
- mbedtls_free( buf );
+ mbedtls_sha512_free(&ctx);
+ mbedtls_free(buf);
- return( ret );
+ return ret;
}
#if defined(MBEDTLS_SHA512_C)
-int mbedtls_sha512_self_test( int verbose )
+int mbedtls_sha512_self_test(int verbose)
{
- return mbedtls_sha512_common_self_test( verbose, 0 );
+ return mbedtls_sha512_common_self_test(verbose, 0);
}
#endif /* MBEDTLS_SHA512_C */
#if defined(MBEDTLS_SHA384_C)
-int mbedtls_sha384_self_test( int verbose )
+int mbedtls_sha384_self_test(int verbose)
{
- return mbedtls_sha512_common_self_test( verbose, 1 );
+ return mbedtls_sha512_common_self_test(verbose, 1);
}
#endif /* MBEDTLS_SHA384_C */