blob: 5c75c47a346cfb241861887d6cc808cf0f560a92 [file] [log] [blame]
Hanno Becker13cd7842021-01-12 07:08:33 +00001/*
2 * Message Processing Stack, Reader implementation
3 *
4 * Copyright The Mbed TLS Contributors
5 * SPDX-License-Identifier: Apache-2.0
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License"); you may
8 * not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 * This file is part of Mbed TLS (https://tls.mbed.org)
20 */
21
22#include "reader.h"
23#include "common.h"
24#include "trace.h"
25
26#include <string.h>
27
28#if ( defined(__ARMCC_VERSION) || defined(_MSC_VER) ) && \
29 !defined(inline) && !defined(__cplusplus)
30#define inline __inline
31#endif
32
33/*
34 * GENERAL NOTE ON CODING STYLE
35 *
36 * The following code intentionally separates memory loads
37 * and stores from other operations (arithmetic or branches).
38 * This leads to the introduction of many local variables
39 * and significantly increases the C-code line count, but
40 * should not increase the size of generated assembly.
41 *
42 * This reason for this is twofold:
43 * (1) It will ease verification efforts using the VST
44 * whose program logic cannot directly reason
45 * about instructions containing a load or store in
46 * addition to other operations (e.g. *p = *q or
47 * tmp = *p + 42).
48 * (2) Operating on local variables and writing the results
49 * back to the target contexts on success only
50 * allows to maintain structure invariants even
51 * on failure - this in turn has two benefits:
52 * (2.a) If for some reason an error code is not caught
53 * and operation continues, functions are nonetheless
54 * called with sane contexts, reducing the risk
55 * of dangerous behavior.
56 * (2.b) Randomized testing is easier if structures
57 * remain intact even in the face of failing
58 * and/or non-sensical calls.
59 * Moreover, it might even reduce code-size because
60 * the compiler need not write back temporary results
61 * to memory in case of failure.
62 *
63 */
64
65static inline void mps_reader_zero( mbedtls_reader *rd )
66{
67 /* A plain memset() would likely be more efficient,
68 * but the current way of zeroing makes it harder
69 * to overlook fields which should not be zero-initialized.
70 * It's also more suitable for VF efforts since it
71 * doesn't require reasoning about structs being
72 * interpreted as unstructured binary blobs. */
73 static mbedtls_reader const zero =
74 { .frag = NULL,
75 .frag_len = 0,
76 .commit = 0,
77 .end = 0,
78 .pending = 0,
79 .acc = NULL,
80 .acc_len = 0,
81 .acc_avail = 0,
82 .acc_share = { .acc_remaining = 0 }
83 };
84 *rd = zero;
85}
86
87int mbedtls_reader_init( mbedtls_reader *rd,
88 unsigned char *acc,
89 mbedtls_mps_size_t acc_len )
90{
91 TRACE_INIT( "reader_init, acc len %u", (unsigned) acc_len );
92 mps_reader_zero( rd );
93 rd->acc = acc;
94 rd->acc_len = acc_len;
95 RETURN( 0 );
96}
97
98int mbedtls_reader_free( mbedtls_reader *rd )
99{
100 TRACE_INIT( "reader_free" );
101 mps_reader_zero( rd );
102 RETURN( 0 );
103}
104
105int mbedtls_reader_feed( mbedtls_reader *rd,
106 unsigned char *new_frag,
107 mbedtls_mps_size_t new_frag_len )
108{
109 unsigned char *acc;
110 mbedtls_mps_size_t copy_to_acc;
111 TRACE_INIT( "reader_feed, frag %p, len %u",
112 (void*) new_frag, (unsigned) new_frag_len );
113
114 if( new_frag == NULL )
115 RETURN( MBEDTLS_ERR_MPS_READER_INVALID_ARG );
116
117 MBEDTLS_MPS_STATE_VALIDATE_RAW( rd->frag == NULL,
118 "mbedtls_reader_feed() requires reader to be in producing mode" );
119
120 acc = rd->acc;
121 if( acc != NULL )
122 {
123 mbedtls_mps_size_t aa, ar;
124
125 ar = rd->acc_share.acc_remaining;
126 aa = rd->acc_avail;
127
128 copy_to_acc = ar;
129 if( copy_to_acc > new_frag_len )
130 copy_to_acc = new_frag_len;
131
132 acc += aa;
133
134 if( copy_to_acc > 0 )
135 memcpy( acc, new_frag, copy_to_acc );
136
137 TRACE( trace_comment, "Copy new data of size %u of %u into accumulator at offset %u",
138 (unsigned) copy_to_acc, (unsigned) new_frag_len, (unsigned) aa );
139
140 /* Check if, with the new fragment, we have enough data. */
141 ar -= copy_to_acc;
142 if( ar > 0 )
143 {
144 /* Need more data */
145 aa += copy_to_acc;
146 rd->acc_share.acc_remaining = ar;
147 rd->acc_avail = aa;
148 RETURN( MBEDTLS_ERR_MPS_READER_NEED_MORE );
149 }
150
151 TRACE( trace_comment, "Enough data available to serve user request" );
152
153 rd->acc_share.frag_offset = aa;
154 aa += copy_to_acc;
155 rd->acc_avail = aa;
156 }
157 else
158 {
159 rd->acc_share.frag_offset = 0;
160 }
161
162 rd->frag = new_frag;
163 rd->frag_len = new_frag_len;
164 rd->commit = 0;
165 rd->end = 0;
166 RETURN( 0 );
167}
168
169
170int mbedtls_reader_get( mbedtls_reader *rd,
171 mbedtls_mps_size_t desired,
172 unsigned char **buffer,
173 mbedtls_mps_size_t *buflen )
174{
175 unsigned char *frag, *acc;
176 mbedtls_mps_size_t end, fo, fl, frag_fetched, frag_remaining;
177 TRACE_INIT( "reader_get %p, desired %u", (void*) rd, (unsigned) desired );
178
179 frag = rd->frag;
180 MBEDTLS_MPS_STATE_VALIDATE_RAW( frag != NULL,
181 "mbedtls_reader_get() requires reader to be in consuming mode" );
182
183 /* The fragment offset indicates the offset of the fragment
184 * from the accmulator, if the latter is present. Use a offset
185 * of \c 0 if no accumulator is used. */
186 acc = rd->acc;
187 if( acc == NULL )
188 fo = 0;
189 else
190 fo = rd->acc_share.frag_offset;
191
192 TRACE( trace_comment, "frag_off %u, end %u, acc_avail %d",
193 (unsigned) fo, (unsigned) rd->end,
194 acc == NULL ? -1 : (int) rd->acc_avail );
195
196 /* Check if we're still serving from the accumulator. */
197 end = rd->end;
198 if( end < fo )
199 {
200 TRACE( trace_comment, "Serve the request from the accumulator" );
201 if( fo - end < desired )
202 {
203 /* Illustration of supported and unsupported cases:
204 *
205 * - Allowed #1
206 *
207 * +-----------------------------------+
208 * | frag |
209 * +-----------------------------------+
210 *
211 * end end+desired
212 * | |
213 * +-----v-------v-------------+
214 * | acc |
215 * +---------------------------+
216 * | |
217 * fo/frag_offset aa/acc_avail
218 *
219 * - Allowed #2
220 *
221 * +-----------------------------------+
222 * | frag |
223 * +-----------------------------------+
224 *
225 * end end+desired
226 * | |
227 * +----------v----------------v
228 * | acc |
229 * +---------------------------+
230 * | |
231 * fo/frag_offset aa/acc_avail
232 *
233 * - Not allowed #1 (could be served, but we don't actually use it):
234 *
235 * +-----------------------------------+
236 * | frag |
237 * +-----------------------------------+
238 *
239 * end end+desired
240 * | |
241 * +------v-------------v------+
242 * | acc |
243 * +---------------------------+
244 * | |
245 * fo/frag_offset aa/acc_avail
246 *
247 *
248 * - Not allowed #2 (can't be served with a contiguous buffer):
249 *
250 * +-----------------------------------+
251 * | frag |
252 * +-----------------------------------+
253 *
254 * end end + desired
255 * | |
256 * +------v--------------------+ v
257 * | acc |
258 * +---------------------------+
259 * | |
260 * fo/frag_offset aa/acc_avail
261 *
262 * In case of Allowed #1 and #2 we're switching to serve from
263 * `frag` starting from the next call to mbedtls_reader_get().
264 */
265
266 mbedtls_mps_size_t aa;
267 aa = rd->acc_avail;
268 if( aa - end != desired )
269 {
270 /* It might be possible to serve some of these situations by
271 * making additional space in the accumulator, removing those
272 * parts that have already been committed.
273 * On the other hand, this brings additional complexity and
274 * enlarges the code size, while there doesn't seem to be a use
275 * case where we don't attempt exactly the same `get` calls when
276 * resuming on a reader than what we tried before pausing it.
277 * If we believe we adhere to this restricted usage throughout
278 * the library, this check is a good opportunity to
279 * validate this. */
280 RETURN( MBEDTLS_ERR_MPS_READER_INCONSISTENT_REQUESTS );
281 }
282 }
283
284 acc += end;
285 *buffer = acc;
286 if( buflen != NULL )
287 *buflen = desired;
288
289 end += desired;
290 rd->end = end;
291 rd->pending = 0;
292
293 RETURN( 0 );
294 }
295
296 /* Attempt to serve the request from the current fragment */
297 TRACE( trace_comment, "Serve the request from the current fragment." );
298
299 fl = rd->frag_len;
300 frag_fetched = end - fo; /* The amount of data from the current fragment
301 * that has already been passed to the user. */
302 frag += frag_fetched;
303 frag_remaining = fl - frag_fetched; /* Remaining data in fragment */
304
305 /* Check if we can serve the read request from the fragment. */
306 if( frag_remaining < desired )
307 {
308 TRACE( trace_comment, "There's not enough data in the current fragment to serve the request." );
309 /* There's not enough data in the current fragment,
310 * so either just RETURN what we have or fail. */
311 if( buflen == NULL )
312 {
313 if( frag_remaining > 0 )
314 {
315 rd->pending = desired - frag_remaining;
316 TRACE( trace_comment, "Remember to collect %u bytes before re-opening",
317 (unsigned) rd->pending );
318 }
319 RETURN( MBEDTLS_ERR_MPS_READER_OUT_OF_DATA );
320 }
321
322 desired = frag_remaining;
323 }
324
325 /* There's enough data in the current fragment to serve the
326 * (potentially modified) read request. */
327 *buffer = frag;
328 if( buflen != NULL )
329 *buflen = desired;
330
331 end += desired;
332 rd->end = end;
333 rd->pending = 0;
334 RETURN( 0 );
335}
336
337int mbedtls_reader_commit( mbedtls_reader *rd )
338{
339 unsigned char *acc;
340 mbedtls_mps_size_t aa, end, fo, shift;
341 TRACE_INIT( "reader_commit" );
342
343 MBEDTLS_MPS_STATE_VALIDATE_RAW( rd->frag != NULL,
344 "mbedtls_reader_commit() requires reader to be in consuming mode" );
345
346 acc = rd->acc;
347 end = rd->end;
348
349 if( acc == NULL )
350 {
351 TRACE( trace_comment, "No accumulator, just shift end" );
352 rd->commit = end;
353 RETURN( 0 );
354 }
355
356 fo = rd->acc_share.frag_offset;
357 if( end >= fo )
358 {
359 TRACE( trace_comment, "Started to serve fragment, get rid of accumulator" );
360 shift = fo;
361 aa = 0;
362 }
363 else
364 {
365 TRACE( trace_comment, "Still serving from accumulator" );
366 aa = rd->acc_avail;
367 shift = end;
368 memmove( acc, acc + shift, aa - shift );
369 aa -= shift;
370 }
371
372 end -= shift;
373 fo -= shift;
374
375 rd->acc_share.frag_offset = fo;
376 rd->acc_avail = aa;
377 rd->commit = end;
378 rd->end = end;
379
380 TRACE( trace_comment, "Final state: (end=commit,fo,avail) = (%u,%u,%u)",
381 (unsigned) end, (unsigned) fo, (unsigned) aa );
382 RETURN( 0 );
383}
384
385int mbedtls_reader_reclaim( mbedtls_reader *rd,
386 mbedtls_mps_size_t *paused )
387{
388 unsigned char *frag, *acc;
389 mbedtls_mps_size_t pending, commit;
390 mbedtls_mps_size_t al, fo, fl;
391 TRACE_INIT( "reader_reclaim" );
392
393 if( paused != NULL )
394 *paused = 0;
395
396 frag = rd->frag;
397 MBEDTLS_MPS_STATE_VALIDATE_RAW( frag != NULL,
398 "mbedtls_reader_reclaim() requires reader to be in consuming mode" );
399
400 acc = rd->acc;
401 pending = rd->pending;
402 commit = rd->commit;
403 fl = rd->frag_len;
404
405 if( acc == NULL )
406 fo = 0;
407 else
408 fo = rd->acc_share.frag_offset;
409
410 if( pending == 0 )
411 {
412 TRACE( trace_comment, "No unsatisfied read-request has been logged." );
413 /* Check if there's data left to be consumed. */
414 if( commit < fo || commit - fo < fl )
415 {
416 TRACE( trace_comment, "There is data left to be consumed." );
417 rd->end = commit;
418 RETURN( MBEDTLS_ERR_MPS_READER_DATA_LEFT );
419 }
420 TRACE( trace_comment, "The fragment has been completely processed and committed." );
421 }
422 else
423 {
424 mbedtls_mps_size_t frag_backup_offset;
425 mbedtls_mps_size_t frag_backup_len;
426 TRACE( trace_comment, "There has been an unsatisfied read-request with %u bytes overhead.",
427 (unsigned) pending );
428
429 if( acc == NULL )
430 {
431 TRACE( trace_comment, "No accumulator present" );
432 RETURN( MBEDTLS_ERR_MPS_READER_NEED_ACCUMULATOR );
433 }
434 al = rd->acc_len;
435
436 /* Check if the upper layer has already fetched
437 * and committed the contents of the accumulator. */
438 if( commit < fo )
439 {
440 /* No, accumulator is still being processed. */
441 int overflow;
442 TRACE( trace_comment, "Still processing data from the accumulator" );
443
444 overflow =
445 ( fo + fl < fo ) || ( fo + fl + pending < fo + fl );
446 if( overflow || al < fo + fl + pending )
447 {
448 rd->end = commit;
449 rd->pending = 0;
450 TRACE( trace_error, "The accumulator is too small to handle the backup." );
451 TRACE( trace_error, "* Remaining size: %u", (unsigned) al );
452 TRACE( trace_error, "* Needed: %u (%u + %u + %u)",
453 (unsigned) ( fo + fl + pending ),
454 (unsigned) fo, (unsigned) fl, (unsigned) pending );
455 RETURN( MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL );
456 }
457 frag_backup_offset = 0;
458 frag_backup_len = fl;
459 }
460 else
461 {
462 /* Yes, the accumulator is already processed. */
463 int overflow;
464 TRACE( trace_comment, "The accumulator has already been processed" );
465
466 frag_backup_offset = commit;
467 frag_backup_len = fl - commit;
468 overflow = ( frag_backup_len + pending < pending );
469
470 if( overflow ||
471 al - fo < frag_backup_len + pending )
472 {
473 rd->end = commit;
474 rd->pending = 0;
475 TRACE( trace_error, "The accumulator is too small to handle the backup." );
476 TRACE( trace_error, "* Remaining size: %u", (unsigned) ( al - fo ) );
477 TRACE( trace_error, "* Needed: %u (%u + %u)",
478 (unsigned) ( frag_backup_len + pending ),
479 (unsigned) frag_backup_len, (unsigned) pending );
480 RETURN( MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL );
481 }
482 }
483
484 frag += frag_backup_offset;
485 acc += fo;
486 memcpy( acc, frag, frag_backup_len );
487
488 TRACE( trace_comment, "Backup %u bytes into accumulator",
489 (unsigned) frag_backup_len );
490
491 rd->acc_avail = fo + frag_backup_len;
492 rd->acc_share.acc_remaining = pending;
493
494 if( paused != NULL )
495 *paused = 1;
496 }
497
498 rd->frag = NULL;
499 rd->frag_len = 0;
500
501 rd->commit = 0;
502 rd->end = 0;
503 rd->pending = 0;
504
505 TRACE( trace_comment, "Final state: aa %u, al %u, ar %u",
506 (unsigned) rd->acc_avail, (unsigned) rd->acc_len,
507 (unsigned) rd->acc_share.acc_remaining );
508 RETURN( 0 );
509}