blob: d212ddfd8104eb0ff08efea35a3387d7c4347008 [file] [log] [blame]
Jerome Forissier039e02d2022-08-09 17:10:15 +02001/**
2 * Constant-time functions
3 *
4 * Copyright The Mbed TLS Contributors
Tom Van Eyckc1633172024-04-09 18:44:13 +02005 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
Jerome Forissier039e02d2022-08-09 17:10:15 +02006 */
7
Jens Wiklander32b31802023-10-06 16:59:46 +02008/*
Jerome Forissier039e02d2022-08-09 17:10:15 +02009 * The following functions are implemented without using comparison operators, as those
10 * might be translated to branches by some compilers on some platforms.
11 */
12
Tom Van Eyckc1633172024-04-09 18:44:13 +020013#include <stdint.h>
14#include <limits.h>
15
Jerome Forissier039e02d2022-08-09 17:10:15 +020016#include "common.h"
17#include "constant_time_internal.h"
18#include "mbedtls/constant_time.h"
19#include "mbedtls/error.h"
20#include "mbedtls/platform_util.h"
21
Jerome Forissier039e02d2022-08-09 17:10:15 +020022#include <string.h>
Tom Van Eyckc1633172024-04-09 18:44:13 +020023
24#if !defined(MBEDTLS_CT_ASM)
25/*
26 * Define an object with the value zero, such that the compiler cannot prove that it
27 * has the value zero (because it is volatile, it "may be modified in ways unknown to
28 * the implementation").
29 */
30volatile mbedtls_ct_uint_t mbedtls_ct_zero = 0;
Jens Wiklander32b31802023-10-06 16:59:46 +020031#endif
Jerome Forissier039e02d2022-08-09 17:10:15 +020032
Jens Wiklander32b31802023-10-06 16:59:46 +020033/*
34 * Define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS where assembly is present to
35 * perform fast unaligned access to volatile data.
36 *
37 * This is needed because mbedtls_get_unaligned_uintXX etc don't support volatile
38 * memory accesses.
39 *
40 * Some of these definitions could be moved into alignment.h but for now they are
41 * only used here.
42 */
Tom Van Eyckc1633172024-04-09 18:44:13 +020043#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) && \
44 ((defined(MBEDTLS_CT_ARM_ASM) && (UINTPTR_MAX == 0xfffffffful)) || \
45 defined(MBEDTLS_CT_AARCH64_ASM))
46/* We check pointer sizes to avoid issues with them not matching register size requirements */
Jens Wiklander32b31802023-10-06 16:59:46 +020047#define MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS
Jens Wiklander32b31802023-10-06 16:59:46 +020048
Jens Wiklander32b31802023-10-06 16:59:46 +020049static inline uint32_t mbedtls_get_unaligned_volatile_uint32(volatile const unsigned char *p)
Jerome Forissier039e02d2022-08-09 17:10:15 +020050{
Jens Wiklander32b31802023-10-06 16:59:46 +020051 /* This is UB, even where it's safe:
52 * return *((volatile uint32_t*)p);
53 * so instead the same thing is expressed in assembly below.
54 */
55 uint32_t r;
Tom Van Eyckc1633172024-04-09 18:44:13 +020056#if defined(MBEDTLS_CT_ARM_ASM)
Jens Wiklander32b31802023-10-06 16:59:46 +020057 asm volatile ("ldr %0, [%1]" : "=r" (r) : "r" (p) :);
Tom Van Eyckc1633172024-04-09 18:44:13 +020058#elif defined(MBEDTLS_CT_AARCH64_ASM)
59 asm volatile ("ldr %w0, [%1]" : "=r" (r) : MBEDTLS_ASM_AARCH64_PTR_CONSTRAINT(p) :);
60#else
61#error "No assembly defined for mbedtls_get_unaligned_volatile_uint32"
Jens Wiklander32b31802023-10-06 16:59:46 +020062#endif
63 return r;
64}
Tom Van Eyckc1633172024-04-09 18:44:13 +020065#endif /* defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS) &&
66 (defined(MBEDTLS_CT_ARM_ASM) || defined(MBEDTLS_CT_AARCH64_ASM)) */
Jens Wiklander32b31802023-10-06 16:59:46 +020067
68int mbedtls_ct_memcmp(const void *a,
69 const void *b,
70 size_t n)
71{
72 size_t i = 0;
73 /*
74 * `A` and `B` are cast to volatile to ensure that the compiler
75 * generates code that always fully reads both buffers.
76 * Otherwise it could generate a test to exit early if `diff` has all
77 * bits set early in the loop.
78 */
Jerome Forissier039e02d2022-08-09 17:10:15 +020079 volatile const unsigned char *A = (volatile const unsigned char *) a;
80 volatile const unsigned char *B = (volatile const unsigned char *) b;
Jens Wiklander32b31802023-10-06 16:59:46 +020081 uint32_t diff = 0;
Jerome Forissier039e02d2022-08-09 17:10:15 +020082
Jens Wiklander32b31802023-10-06 16:59:46 +020083#if defined(MBEDTLS_EFFICIENT_UNALIGNED_VOLATILE_ACCESS)
84 for (; (i + 4) <= n; i += 4) {
85 uint32_t x = mbedtls_get_unaligned_volatile_uint32(A + i);
86 uint32_t y = mbedtls_get_unaligned_volatile_uint32(B + i);
87 diff |= x ^ y;
88 }
89#endif
90
91 for (; i < n; i++) {
Jerome Forissier039e02d2022-08-09 17:10:15 +020092 /* Read volatile data in order before computing diff.
93 * This avoids IAR compiler warning:
94 * 'the order of volatile accesses is undefined ..' */
95 unsigned char x = A[i], y = B[i];
96 diff |= x ^ y;
97 }
98
Tom Van Eyckc1633172024-04-09 18:44:13 +020099
100#if (INT_MAX < INT32_MAX)
101 /* We don't support int smaller than 32-bits, but if someone tried to build
102 * with this configuration, there is a risk that, for differing data, the
103 * only bits set in diff are in the top 16-bits, and would be lost by a
104 * simple cast from uint32 to int.
105 * This would have significant security implications, so protect against it. */
106#error "mbedtls_ct_memcmp() requires minimum 32-bit ints"
107#else
108 /* The bit-twiddling ensures that when we cast uint32_t to int, we are casting
109 * a value that is in the range 0..INT_MAX - a value larger than this would
110 * result in implementation defined behaviour.
111 *
112 * This ensures that the value returned by the function is non-zero iff
113 * diff is non-zero.
114 */
115 return (int) ((diff & 0xffff) | (diff >> 16));
116#endif
117}
118
119#if defined(MBEDTLS_NIST_KW_C)
120
121int mbedtls_ct_memcmp_partial(const void *a,
122 const void *b,
123 size_t n,
124 size_t skip_head,
125 size_t skip_tail)
126{
127 unsigned int diff = 0;
128
129 volatile const unsigned char *A = (volatile const unsigned char *) a;
130 volatile const unsigned char *B = (volatile const unsigned char *) b;
131
132 size_t valid_end = n - skip_tail;
133
134 for (size_t i = 0; i < n; i++) {
135 unsigned char x = A[i], y = B[i];
136 unsigned int d = x ^ y;
137 mbedtls_ct_condition_t valid = mbedtls_ct_bool_and(mbedtls_ct_uint_ge(i, skip_head),
138 mbedtls_ct_uint_lt(i, valid_end));
139 diff |= mbedtls_ct_uint_if_else_0(valid, d);
140 }
141
142 /* Since we go byte-by-byte, the only bits set will be in the bottom 8 bits, so the
143 * cast from uint to int is safe. */
Jens Wiklander32b31802023-10-06 16:59:46 +0200144 return (int) diff;
Jerome Forissier039e02d2022-08-09 17:10:15 +0200145}
146
Jerome Forissier039e02d2022-08-09 17:10:15 +0200147#endif
Jerome Forissier039e02d2022-08-09 17:10:15 +0200148
149#if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
150
Tom Van Eyckc1633172024-04-09 18:44:13 +0200151void mbedtls_ct_memmove_left(void *start, size_t total, size_t offset)
Jerome Forissier039e02d2022-08-09 17:10:15 +0200152{
153 volatile unsigned char *buf = start;
Tom Van Eyckc1633172024-04-09 18:44:13 +0200154 for (size_t i = 0; i < total; i++) {
155 mbedtls_ct_condition_t no_op = mbedtls_ct_uint_gt(total - offset, i);
Jerome Forissier039e02d2022-08-09 17:10:15 +0200156 /* The first `total - offset` passes are a no-op. The last
157 * `offset` passes shift the data one byte to the left and
158 * zero out the last byte. */
Tom Van Eyckc1633172024-04-09 18:44:13 +0200159 for (size_t n = 0; n < total - 1; n++) {
Jerome Forissier039e02d2022-08-09 17:10:15 +0200160 unsigned char current = buf[n];
Tom Van Eyckc1633172024-04-09 18:44:13 +0200161 unsigned char next = buf[n+1];
Jens Wiklander32b31802023-10-06 16:59:46 +0200162 buf[n] = mbedtls_ct_uint_if(no_op, current, next);
Jerome Forissier039e02d2022-08-09 17:10:15 +0200163 }
Tom Van Eyckc1633172024-04-09 18:44:13 +0200164 buf[total-1] = mbedtls_ct_uint_if_else_0(no_op, buf[total-1]);
Jerome Forissier039e02d2022-08-09 17:10:15 +0200165 }
166}
167
168#endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */
169
Tom Van Eyckc1633172024-04-09 18:44:13 +0200170void mbedtls_ct_memcpy_if(mbedtls_ct_condition_t condition,
171 unsigned char *dest,
172 const unsigned char *src1,
173 const unsigned char *src2,
174 size_t len)
Jerome Forissier039e02d2022-08-09 17:10:15 +0200175{
Tom Van Eyckc1633172024-04-09 18:44:13 +0200176#if defined(MBEDTLS_CT_SIZE_64)
177 const uint64_t mask = (uint64_t) condition;
178 const uint64_t not_mask = (uint64_t) ~mbedtls_ct_compiler_opaque(condition);
179#else
180 const uint32_t mask = (uint32_t) condition;
181 const uint32_t not_mask = (uint32_t) ~mbedtls_ct_compiler_opaque(condition);
182#endif
183
184 /* If src2 is NULL, setup src2 so that we read from the destination address.
185 *
186 * This means that if src2 == NULL && condition is false, the result will be a
187 * no-op because we read from dest and write the same data back into dest.
188 */
189 if (src2 == NULL) {
190 src2 = dest;
191 }
Jerome Forissier039e02d2022-08-09 17:10:15 +0200192
193 /* dest[i] = c1 == c2 ? src[i] : dest[i] */
Jens Wiklander32b31802023-10-06 16:59:46 +0200194 size_t i = 0;
195#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
Tom Van Eyckc1633172024-04-09 18:44:13 +0200196#if defined(MBEDTLS_CT_SIZE_64)
197 for (; (i + 8) <= len; i += 8) {
198 uint64_t a = mbedtls_get_unaligned_uint64(src1 + i) & mask;
199 uint64_t b = mbedtls_get_unaligned_uint64(src2 + i) & not_mask;
200 mbedtls_put_unaligned_uint64(dest + i, a | b);
Jens Wiklander32b31802023-10-06 16:59:46 +0200201 }
202#else
Tom Van Eyckc1633172024-04-09 18:44:13 +0200203 for (; (i + 4) <= len; i += 4) {
204 uint32_t a = mbedtls_get_unaligned_uint32(src1 + i) & mask;
205 uint32_t b = mbedtls_get_unaligned_uint32(src2 + i) & not_mask;
206 mbedtls_put_unaligned_uint32(dest + i, a | b);
207 }
208#endif /* defined(MBEDTLS_CT_SIZE_64) */
Jens Wiklander32b31802023-10-06 16:59:46 +0200209#endif /* MBEDTLS_EFFICIENT_UNALIGNED_ACCESS */
210 for (; i < len; i++) {
Tom Van Eyckc1633172024-04-09 18:44:13 +0200211 dest[i] = (src1[i] & mask) | (src2[i] & not_mask);
Jerome Forissier039e02d2022-08-09 17:10:15 +0200212 }
213}
214
Jens Wiklander32b31802023-10-06 16:59:46 +0200215void mbedtls_ct_memcpy_offset(unsigned char *dest,
216 const unsigned char *src,
217 size_t offset,
218 size_t offset_min,
219 size_t offset_max,
220 size_t len)
221{
222 size_t offsetval;
223
224 for (offsetval = offset_min; offsetval <= offset_max; offsetval++) {
Tom Van Eyckc1633172024-04-09 18:44:13 +0200225 mbedtls_ct_memcpy_if(mbedtls_ct_uint_eq(offsetval, offset), dest, src + offsetval, NULL,
226 len);
Jens Wiklander32b31802023-10-06 16:59:46 +0200227 }
228}
229
Jerome Forissier039e02d2022-08-09 17:10:15 +0200230#if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
231
Tom Van Eyckc1633172024-04-09 18:44:13 +0200232void mbedtls_ct_zeroize_if(mbedtls_ct_condition_t condition, void *buf, size_t len)
Jerome Forissier039e02d2022-08-09 17:10:15 +0200233{
Tom Van Eyckc1633172024-04-09 18:44:13 +0200234 uint32_t mask = (uint32_t) ~condition;
235 uint8_t *p = (uint8_t *) buf;
236 size_t i = 0;
237#if defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)
238 for (; (i + 4) <= len; i += 4) {
239 mbedtls_put_unaligned_uint32((void *) (p + i),
240 mbedtls_get_unaligned_uint32((void *) (p + i)) & mask);
Jerome Forissier039e02d2022-08-09 17:10:15 +0200241 }
Tom Van Eyckc1633172024-04-09 18:44:13 +0200242#endif
243 for (; i < len; i++) {
244 p[i] = p[i] & mask;
Jens Wiklander32b31802023-10-06 16:59:46 +0200245 }
Jerome Forissier039e02d2022-08-09 17:10:15 +0200246}
247
Tom Van Eyckc1633172024-04-09 18:44:13 +0200248#endif /* defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT) */