Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Message Processing Stack, Reader implementation |
| 3 | * |
| 4 | * Copyright The Mbed TLS Contributors |
| 5 | * SPDX-License-Identifier: Apache-2.0 |
| 6 | * |
| 7 | * Licensed under the Apache License, Version 2.0 (the "License"); you may |
| 8 | * not use this file except in compliance with the License. |
| 9 | * You may obtain a copy of the License at |
| 10 | * |
| 11 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | * |
| 13 | * Unless required by applicable law or agreed to in writing, software |
| 14 | * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
| 15 | * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | * See the License for the specific language governing permissions and |
| 17 | * limitations under the License. |
| 18 | * |
| 19 | * This file is part of Mbed TLS (https://tls.mbed.org) |
| 20 | */ |
| 21 | |
Hanno Becker | 43c8f8c | 2021-03-05 05:16:45 +0000 | [diff] [blame] | 22 | #include "common.h" |
| 23 | |
| 24 | #if defined(MBEDTLS_SSL_PROTO_TLS1_3_EXPERIMENTAL) |
| 25 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 26 | # include "mps_reader.h" |
| 27 | # include "mps_common.h" |
| 28 | # include "mps_trace.h" |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 29 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 30 | # include <string.h> |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 31 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 32 | # if (defined(__ARMCC_VERSION) || defined(_MSC_VER)) && !defined(inline) && \ |
| 33 | !defined(__cplusplus) |
| 34 | # define inline __inline |
| 35 | # endif |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 36 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 37 | # if defined(MBEDTLS_MPS_ENABLE_TRACE) |
Hanno Becker | 984fbde | 2021-01-28 09:02:18 +0000 | [diff] [blame] | 38 | static int mbedtls_mps_trace_id = MBEDTLS_MPS_TRACE_BIT_READER; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 39 | # endif /* MBEDTLS_MPS_ENABLE_TRACE */ |
Hanno Becker | b910016 | 2021-01-12 09:46:03 +0000 | [diff] [blame] | 40 | |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 41 | /* |
| 42 | * GENERAL NOTE ON CODING STYLE |
| 43 | * |
| 44 | * The following code intentionally separates memory loads |
| 45 | * and stores from other operations (arithmetic or branches). |
| 46 | * This leads to the introduction of many local variables |
| 47 | * and significantly increases the C-code line count, but |
| 48 | * should not increase the size of generated assembly. |
| 49 | * |
Hanno Becker | fea81b3 | 2021-02-22 15:18:11 +0000 | [diff] [blame] | 50 | * The reason for this is twofold: |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 51 | * (1) It will ease verification efforts using the VST |
Hanno Becker | fea81b3 | 2021-02-22 15:18:11 +0000 | [diff] [blame] | 52 | * (Verified Software Toolchain) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 53 | * whose program logic cannot directly reason |
| 54 | * about instructions containing a load or store in |
| 55 | * addition to other operations (e.g. *p = *q or |
| 56 | * tmp = *p + 42). |
| 57 | * (2) Operating on local variables and writing the results |
| 58 | * back to the target contexts on success only |
| 59 | * allows to maintain structure invariants even |
| 60 | * on failure - this in turn has two benefits: |
| 61 | * (2.a) If for some reason an error code is not caught |
| 62 | * and operation continues, functions are nonetheless |
| 63 | * called with sane contexts, reducing the risk |
| 64 | * of dangerous behavior. |
| 65 | * (2.b) Randomized testing is easier if structures |
| 66 | * remain intact even in the face of failing |
| 67 | * and/or non-sensical calls. |
| 68 | * Moreover, it might even reduce code-size because |
| 69 | * the compiler need not write back temporary results |
| 70 | * to memory in case of failure. |
| 71 | * |
| 72 | */ |
| 73 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 74 | static inline int mps_reader_is_accumulating(mbedtls_mps_reader const *rd) |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 75 | { |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 76 | mbedtls_mps_size_t acc_remaining; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 77 | if (rd->acc == NULL) |
| 78 | return 0; |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 79 | |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 80 | acc_remaining = rd->acc_share.acc_remaining; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 81 | return acc_remaining > 0; |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 82 | } |
| 83 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 84 | static inline int mps_reader_is_producing(mbedtls_mps_reader const *rd) |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 85 | { |
| 86 | unsigned char *frag = rd->frag; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 87 | return frag == NULL; |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 88 | } |
| 89 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 90 | static inline int mps_reader_is_consuming(mbedtls_mps_reader const *rd) |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 91 | { |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 92 | return !mps_reader_is_producing(rd); |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 93 | } |
| 94 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 95 | static inline mbedtls_mps_size_t |
| 96 | mps_reader_get_fragment_offset(mbedtls_mps_reader const *rd) |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 97 | { |
| 98 | unsigned char *acc = rd->acc; |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 99 | mbedtls_mps_size_t frag_offset; |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 100 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 101 | if (acc == NULL) |
| 102 | return 0; |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 103 | |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 104 | frag_offset = rd->acc_share.frag_offset; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 105 | return frag_offset; |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 106 | } |
| 107 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 108 | static inline mbedtls_mps_size_t |
| 109 | mps_reader_serving_from_accumulator(mbedtls_mps_reader const *rd) |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 110 | { |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 111 | mbedtls_mps_size_t frag_offset, end; |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 112 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 113 | frag_offset = mps_reader_get_fragment_offset(rd); |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 114 | end = rd->end; |
| 115 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 116 | return end < frag_offset; |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 117 | } |
| 118 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 119 | static inline void mps_reader_zero(mbedtls_mps_reader *rd) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 120 | { |
| 121 | /* A plain memset() would likely be more efficient, |
| 122 | * but the current way of zeroing makes it harder |
| 123 | * to overlook fields which should not be zero-initialized. |
Hanno Becker | 0bea62f | 2021-02-08 07:54:19 +0000 | [diff] [blame] | 124 | * It's also more suitable for FV efforts since it |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 125 | * doesn't require reasoning about structs being |
| 126 | * interpreted as unstructured binary blobs. */ |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 127 | static mbedtls_mps_reader const zero = { .frag = NULL, |
| 128 | .frag_len = 0, |
| 129 | .commit = 0, |
| 130 | .end = 0, |
| 131 | .pending = 0, |
| 132 | .acc = NULL, |
| 133 | .acc_len = 0, |
| 134 | .acc_available = 0, |
| 135 | .acc_share = { .acc_remaining = |
| 136 | 0 } }; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 137 | *rd = zero; |
| 138 | } |
| 139 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 140 | int mbedtls_mps_reader_init(mbedtls_mps_reader *rd, |
| 141 | unsigned char *acc, |
| 142 | mbedtls_mps_size_t acc_len) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 143 | { |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 144 | MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_init"); |
| 145 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 146 | "* Accumulator size: %u bytes", (unsigned)acc_len); |
| 147 | mps_reader_zero(rd); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 148 | rd->acc = acc; |
| 149 | rd->acc_len = acc_len; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 150 | MBEDTLS_MPS_TRACE_RETURN(0); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 151 | } |
| 152 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 153 | int mbedtls_mps_reader_free(mbedtls_mps_reader *rd) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 154 | { |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 155 | MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_free"); |
| 156 | mps_reader_zero(rd); |
| 157 | MBEDTLS_MPS_TRACE_RETURN(0); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 158 | } |
| 159 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 160 | int mbedtls_mps_reader_feed(mbedtls_mps_reader *rd, |
| 161 | unsigned char *new_frag, |
| 162 | mbedtls_mps_size_t new_frag_len) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 163 | { |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 164 | mbedtls_mps_size_t copy_to_acc; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 165 | MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_feed"); |
| 166 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 167 | "* Fragment length: %u bytes", (unsigned)new_frag_len); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 168 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 169 | if (new_frag == NULL) |
| 170 | MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_INVALID_ARG); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 171 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 172 | MBEDTLS_MPS_STATE_VALIDATE_RAW( |
| 173 | mps_reader_is_producing(rd), |
| 174 | "mbedtls_mps_reader_feed() requires reader to be in producing mode"); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 175 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 176 | if (mps_reader_is_accumulating(rd)) { |
| 177 | unsigned char *acc = rd->acc; |
Hanno Becker | 032b352 | 2021-03-08 16:23:26 +0000 | [diff] [blame] | 178 | mbedtls_mps_size_t acc_remaining = rd->acc_share.acc_remaining; |
| 179 | mbedtls_mps_size_t acc_available = rd->acc_available; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 180 | |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 181 | /* Skip over parts of the accumulator that have already been filled. */ |
Hanno Becker | 032b352 | 2021-03-08 16:23:26 +0000 | [diff] [blame] | 182 | acc += acc_available; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 183 | |
Hanno Becker | 032b352 | 2021-03-08 16:23:26 +0000 | [diff] [blame] | 184 | copy_to_acc = acc_remaining; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 185 | if (copy_to_acc > new_frag_len) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 186 | copy_to_acc = new_frag_len; |
| 187 | |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 188 | /* Copy new contents to accumulator. */ |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 189 | memcpy(acc, new_frag, copy_to_acc); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 190 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 191 | MBEDTLS_MPS_TRACE( |
| 192 | MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 193 | "Copy new data of size %u of %u into accumulator at offset %u", |
| 194 | (unsigned)copy_to_acc, (unsigned)new_frag_len, |
| 195 | (unsigned)acc_available); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 196 | |
| 197 | /* Check if, with the new fragment, we have enough data. */ |
Hanno Becker | 032b352 | 2021-03-08 16:23:26 +0000 | [diff] [blame] | 198 | acc_remaining -= copy_to_acc; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 199 | if (acc_remaining > 0) { |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 200 | /* We need to accumulate more data. Stay in producing mode. */ |
Hanno Becker | 032b352 | 2021-03-08 16:23:26 +0000 | [diff] [blame] | 201 | acc_available += copy_to_acc; |
| 202 | rd->acc_share.acc_remaining = acc_remaining; |
| 203 | rd->acc_available = acc_available; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 204 | MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_NEED_MORE); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 205 | } |
| 206 | |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 207 | /* We have filled the accumulator: Move to consuming mode. */ |
| 208 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 209 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 210 | "Enough data available to serve user request"); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 211 | |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 212 | /* Remember overlap of accumulator and fragment. */ |
Hanno Becker | 032b352 | 2021-03-08 16:23:26 +0000 | [diff] [blame] | 213 | rd->acc_share.frag_offset = acc_available; |
| 214 | acc_available += copy_to_acc; |
| 215 | rd->acc_available = acc_available; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 216 | } else /* Not accumulating */ |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 217 | { |
| 218 | rd->acc_share.frag_offset = 0; |
| 219 | } |
| 220 | |
| 221 | rd->frag = new_frag; |
| 222 | rd->frag_len = new_frag_len; |
| 223 | rd->commit = 0; |
| 224 | rd->end = 0; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 225 | MBEDTLS_MPS_TRACE_RETURN(0); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 226 | } |
| 227 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 228 | int mbedtls_mps_reader_get(mbedtls_mps_reader *rd, |
| 229 | mbedtls_mps_size_t desired, |
| 230 | unsigned char **buffer, |
| 231 | mbedtls_mps_size_t *buflen) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 232 | { |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 233 | unsigned char *frag; |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 234 | mbedtls_mps_size_t frag_len, frag_offset, end, frag_fetched, frag_remaining; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 235 | MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_get"); |
| 236 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, "* Bytes requested: %u", |
| 237 | (unsigned)desired); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 238 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 239 | MBEDTLS_MPS_STATE_VALIDATE_RAW( |
| 240 | mps_reader_is_consuming(rd), |
| 241 | "mbedtls_mps_reader_get() requires reader to be in consuming mode"); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 242 | |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 243 | end = rd->end; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 244 | frag_offset = mps_reader_get_fragment_offset(rd); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 245 | |
| 246 | /* Check if we're still serving from the accumulator. */ |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 247 | if (mps_reader_serving_from_accumulator(rd)) { |
Hanno Becker | 77e4f48 | 2021-02-22 16:46:06 +0000 | [diff] [blame] | 248 | /* Illustration of supported and unsupported cases: |
| 249 | * |
| 250 | * - Allowed #1 |
| 251 | * |
| 252 | * +-----------------------------------+ |
| 253 | * | frag | |
| 254 | * +-----------------------------------+ |
| 255 | * |
| 256 | * end end+desired |
| 257 | * | | |
| 258 | * +-----v-------v-------------+ |
| 259 | * | acc | |
| 260 | * +---------------------------+ |
| 261 | * | | |
Hanno Becker | ecb02fb | 2021-03-26 19:20:49 +0000 | [diff] [blame] | 262 | * frag_offset acc_available |
Hanno Becker | 77e4f48 | 2021-02-22 16:46:06 +0000 | [diff] [blame] | 263 | * |
| 264 | * - Allowed #2 |
| 265 | * |
| 266 | * +-----------------------------------+ |
| 267 | * | frag | |
| 268 | * +-----------------------------------+ |
| 269 | * |
| 270 | * end end+desired |
| 271 | * | | |
| 272 | * +----------v----------------v |
| 273 | * | acc | |
| 274 | * +---------------------------+ |
| 275 | * | | |
Hanno Becker | ecb02fb | 2021-03-26 19:20:49 +0000 | [diff] [blame] | 276 | * frag_offset acc_available |
Hanno Becker | 77e4f48 | 2021-02-22 16:46:06 +0000 | [diff] [blame] | 277 | * |
| 278 | * - Not allowed #1 (could be served, but we don't actually use it): |
| 279 | * |
| 280 | * +-----------------------------------+ |
| 281 | * | frag | |
| 282 | * +-----------------------------------+ |
| 283 | * |
| 284 | * end end+desired |
| 285 | * | | |
| 286 | * +------v-------------v------+ |
| 287 | * | acc | |
| 288 | * +---------------------------+ |
| 289 | * | | |
Hanno Becker | ecb02fb | 2021-03-26 19:20:49 +0000 | [diff] [blame] | 290 | * frag_offset acc_available |
Hanno Becker | 77e4f48 | 2021-02-22 16:46:06 +0000 | [diff] [blame] | 291 | * |
| 292 | * |
| 293 | * - Not allowed #2 (can't be served with a contiguous buffer): |
| 294 | * |
| 295 | * +-----------------------------------+ |
| 296 | * | frag | |
| 297 | * +-----------------------------------+ |
| 298 | * |
| 299 | * end end + desired |
| 300 | * | | |
| 301 | * +------v--------------------+ v |
| 302 | * | acc | |
| 303 | * +---------------------------+ |
| 304 | * | | |
Hanno Becker | ecb02fb | 2021-03-26 19:20:49 +0000 | [diff] [blame] | 305 | * frag_offset acc_available |
Hanno Becker | 77e4f48 | 2021-02-22 16:46:06 +0000 | [diff] [blame] | 306 | * |
| 307 | * In case of Allowed #2 we're switching to serve from |
| 308 | * `frag` starting from the next call to mbedtls_mps_reader_get(). |
| 309 | */ |
| 310 | |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 311 | unsigned char *acc; |
| 312 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 313 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 314 | "Serve the request from the accumulator"); |
| 315 | if (frag_offset - end < desired) { |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 316 | mbedtls_mps_size_t acc_available; |
| 317 | acc_available = rd->acc_available; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 318 | if (acc_available - end != desired) { |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 319 | /* It might be possible to serve some of these situations by |
| 320 | * making additional space in the accumulator, removing those |
| 321 | * parts that have already been committed. |
| 322 | * On the other hand, this brings additional complexity and |
| 323 | * enlarges the code size, while there doesn't seem to be a use |
| 324 | * case where we don't attempt exactly the same `get` calls when |
| 325 | * resuming on a reader than what we tried before pausing it. |
| 326 | * If we believe we adhere to this restricted usage throughout |
| 327 | * the library, this check is a good opportunity to |
| 328 | * validate this. */ |
Hanno Becker | 984fbde | 2021-01-28 09:02:18 +0000 | [diff] [blame] | 329 | MBEDTLS_MPS_TRACE_RETURN( |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 330 | MBEDTLS_ERR_MPS_READER_INCONSISTENT_REQUESTS); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 331 | } |
| 332 | } |
| 333 | |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 334 | acc = rd->acc; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 335 | acc += end; |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 336 | |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 337 | *buffer = acc; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 338 | if (buflen != NULL) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 339 | *buflen = desired; |
| 340 | |
| 341 | end += desired; |
| 342 | rd->end = end; |
| 343 | rd->pending = 0; |
| 344 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 345 | MBEDTLS_MPS_TRACE_RETURN(0); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 346 | } |
| 347 | |
| 348 | /* Attempt to serve the request from the current fragment */ |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 349 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 350 | "Serve the request from the current fragment."); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 351 | |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 352 | frag_len = rd->frag_len; |
| 353 | frag_fetched = end - frag_offset; /* The amount of data from the current |
| 354 | * fragment that has already been passed |
| 355 | * to the user. */ |
| 356 | frag_remaining = frag_len - frag_fetched; /* Remaining data in fragment */ |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 357 | |
| 358 | /* Check if we can serve the read request from the fragment. */ |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 359 | if (frag_remaining < desired) { |
| 360 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 361 | "There's not enough data in the current fragment " |
| 362 | "to serve the request."); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 363 | /* There's not enough data in the current fragment, |
| 364 | * so either just RETURN what we have or fail. */ |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 365 | if (buflen == NULL) { |
| 366 | if (frag_remaining > 0) { |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 367 | rd->pending = desired - frag_remaining; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 368 | MBEDTLS_MPS_TRACE( |
| 369 | MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 370 | "Remember to collect %u bytes before re-opening", |
| 371 | (unsigned)rd->pending); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 372 | } |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 373 | MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_OUT_OF_DATA); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 374 | } |
| 375 | |
| 376 | desired = frag_remaining; |
| 377 | } |
| 378 | |
| 379 | /* There's enough data in the current fragment to serve the |
| 380 | * (potentially modified) read request. */ |
Hanno Becker | f81e41f | 2021-02-08 08:04:01 +0000 | [diff] [blame] | 381 | |
| 382 | frag = rd->frag; |
| 383 | frag += frag_fetched; |
| 384 | |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 385 | *buffer = frag; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 386 | if (buflen != NULL) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 387 | *buflen = desired; |
| 388 | |
| 389 | end += desired; |
| 390 | rd->end = end; |
| 391 | rd->pending = 0; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 392 | MBEDTLS_MPS_TRACE_RETURN(0); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 393 | } |
| 394 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 395 | int mbedtls_mps_reader_commit(mbedtls_mps_reader *rd) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 396 | { |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 397 | mbedtls_mps_size_t end; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 398 | MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_commit"); |
| 399 | MBEDTLS_MPS_STATE_VALIDATE_RAW( |
| 400 | mps_reader_is_consuming(rd), |
| 401 | "mbedtls_mps_reader_commit() requires reader to be in consuming mode"); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 402 | |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 403 | end = rd->end; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 404 | rd->commit = end; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 405 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 406 | MBEDTLS_MPS_TRACE_RETURN(0); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 407 | } |
| 408 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 409 | int mbedtls_mps_reader_reclaim(mbedtls_mps_reader *rd, int *paused) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 410 | { |
| 411 | unsigned char *frag, *acc; |
| 412 | mbedtls_mps_size_t pending, commit; |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 413 | mbedtls_mps_size_t acc_len, frag_offset, frag_len; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 414 | MBEDTLS_MPS_TRACE_INIT("mbedtls_mps_reader_reclaim"); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 415 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 416 | if (paused != NULL) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 417 | *paused = 0; |
| 418 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 419 | MBEDTLS_MPS_STATE_VALIDATE_RAW( |
| 420 | mps_reader_is_consuming(rd), |
| 421 | "mbedtls_mps_reader_reclaim() requires reader to be in consuming mode"); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 422 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 423 | frag = rd->frag; |
| 424 | acc = rd->acc; |
| 425 | pending = rd->pending; |
| 426 | commit = rd->commit; |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 427 | frag_len = rd->frag_len; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 428 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 429 | frag_offset = mps_reader_get_fragment_offset(rd); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 430 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 431 | if (pending == 0) { |
| 432 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 433 | "No unsatisfied read-request has been logged."); |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 434 | |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 435 | /* Check if there's data left to be consumed. */ |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 436 | if (commit < frag_offset || commit - frag_offset < frag_len) { |
| 437 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 438 | "There is data left to be consumed."); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 439 | rd->end = commit; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 440 | MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_DATA_LEFT); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 441 | } |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 442 | |
Hanno Becker | b185543 | 2021-02-08 08:07:35 +0000 | [diff] [blame] | 443 | rd->acc_available = 0; |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 444 | rd->acc_share.acc_remaining = 0; |
| 445 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 446 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 447 | "Fragment has been fully processed and committed."); |
| 448 | } else { |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 449 | int overflow; |
| 450 | |
| 451 | mbedtls_mps_size_t acc_backup_offset; |
| 452 | mbedtls_mps_size_t acc_backup_len; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 453 | mbedtls_mps_size_t frag_backup_offset; |
| 454 | mbedtls_mps_size_t frag_backup_len; |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 455 | |
| 456 | mbedtls_mps_size_t backup_len; |
| 457 | mbedtls_mps_size_t acc_len_needed; |
| 458 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 459 | MBEDTLS_MPS_TRACE( |
| 460 | MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 461 | "There has been an unsatisfied read with %u bytes overhead.", |
| 462 | (unsigned)pending); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 463 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 464 | if (acc == NULL) { |
| 465 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 466 | "No accumulator present"); |
| 467 | MBEDTLS_MPS_TRACE_RETURN(MBEDTLS_ERR_MPS_READER_NEED_ACCUMULATOR); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 468 | } |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 469 | acc_len = rd->acc_len; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 470 | |
| 471 | /* Check if the upper layer has already fetched |
| 472 | * and committed the contents of the accumulator. */ |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 473 | if (commit < frag_offset) { |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 474 | /* No, accumulator is still being processed. */ |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 475 | frag_backup_offset = 0; |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 476 | frag_backup_len = frag_len; |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 477 | acc_backup_offset = commit; |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 478 | acc_backup_len = frag_offset - commit; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 479 | } else { |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 480 | /* Yes, the accumulator is already processed. */ |
Hanno Becker | 1682a8b | 2021-02-22 16:38:56 +0000 | [diff] [blame] | 481 | frag_backup_offset = commit - frag_offset; |
| 482 | frag_backup_len = frag_len - frag_backup_offset; |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 483 | acc_backup_offset = 0; |
| 484 | acc_backup_len = 0; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 485 | } |
| 486 | |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 487 | backup_len = acc_backup_len + frag_backup_len; |
| 488 | acc_len_needed = backup_len + pending; |
| 489 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 490 | overflow = 0; |
| 491 | overflow |= (backup_len < acc_backup_len); |
| 492 | overflow |= (acc_len_needed < backup_len); |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 493 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 494 | if (overflow || acc_len < acc_len_needed) { |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 495 | /* Except for the different return code, we behave as if |
| 496 | * there hadn't been a call to mbedtls_mps_reader_get() |
| 497 | * since the last commit. */ |
| 498 | rd->end = commit; |
| 499 | rd->pending = 0; |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 500 | MBEDTLS_MPS_TRACE( |
| 501 | MBEDTLS_MPS_TRACE_TYPE_ERROR, |
| 502 | "The accumulator is too small to handle the backup."); |
| 503 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR, "* Size: %u", |
| 504 | (unsigned)acc_len); |
| 505 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_ERROR, |
| 506 | "* Needed: %u (%u + %u)", |
| 507 | (unsigned)acc_len_needed, (unsigned)backup_len, |
| 508 | (unsigned)pending); |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 509 | MBEDTLS_MPS_TRACE_RETURN( |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 510 | MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL); |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 511 | } |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 512 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 513 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, "Fragment backup: %u", |
| 514 | (unsigned)frag_backup_len); |
| 515 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 516 | "Accumulator backup: %u", (unsigned)acc_backup_len); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 517 | |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 518 | /* Move uncommitted parts from the accumulator to the front |
| 519 | * of the accumulator. */ |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 520 | memmove(acc, acc + acc_backup_offset, acc_backup_len); |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 521 | |
| 522 | /* Copy uncmmitted parts of the current fragment to the |
| 523 | * accumulator. */ |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 524 | memcpy(acc + acc_backup_len, frag + frag_backup_offset, |
| 525 | frag_backup_len); |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 526 | |
Hanno Becker | b185543 | 2021-02-08 08:07:35 +0000 | [diff] [blame] | 527 | rd->acc_available = backup_len; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 528 | rd->acc_share.acc_remaining = pending; |
| 529 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 530 | if (paused != NULL) |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 531 | *paused = 1; |
| 532 | } |
| 533 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 534 | rd->frag = NULL; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 535 | rd->frag_len = 0; |
| 536 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 537 | rd->commit = 0; |
| 538 | rd->end = 0; |
Hanno Becker | 4f84e20 | 2021-02-08 06:54:30 +0000 | [diff] [blame] | 539 | rd->pending = 0; |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 540 | |
Mateusz Starzyk | c0eabdc | 2021-08-03 14:09:02 +0200 | [diff] [blame^] | 541 | MBEDTLS_MPS_TRACE(MBEDTLS_MPS_TRACE_TYPE_COMMENT, |
| 542 | "Final state: aa %u, al %u, ar %u", |
| 543 | (unsigned)rd->acc_available, (unsigned)rd->acc_len, |
| 544 | (unsigned)rd->acc_share.acc_remaining); |
| 545 | MBEDTLS_MPS_TRACE_RETURN(0); |
Hanno Becker | 13cd784 | 2021-01-12 07:08:33 +0000 | [diff] [blame] | 546 | } |
Hanno Becker | 43c8f8c | 2021-03-05 05:16:45 +0000 | [diff] [blame] | 547 | |
| 548 | #endif /* MBEDTLS_SSL_PROTO_TLS1_3_EXPERIMENTAL */ |