1 /*
2 * Message Processing Stack, Reader implementation
3 *
4 * Copyright The Mbed TLS Contributors
5 * SPDX-License-Identifier: Apache-2.0
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License"); you may
8 * not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
15 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 * This file is part of Mbed TLS (https://tls.mbed.org)
20 */
21
22 #include "common.h"
23
24 #if defined(MBEDTLS_SSL_PROTO_TLS1_3_EXPERIMENTAL)
25
26 #include "mps_reader.h"
27 #include "mps_common.h"
28 #include "mps_trace.h"
29
30 #include <string.h>
31
32 #if ( defined(__ARMCC_VERSION) || defined(_MSC_VER) ) && \
33 !defined(inline) && !defined(__cplusplus)
34 #define inline __inline
35 #endif
36
37 #if defined(MBEDTLS_MPS_ENABLE_TRACE)
38 static int mbedtls_mps_trace_id = MBEDTLS_MPS_TRACE_BIT_READER;
39 #endif /* MBEDTLS_MPS_ENABLE_TRACE */
40
41 /*
42 * GENERAL NOTE ON CODING STYLE
43 *
44 * The following code intentionally separates memory loads
45 * and stores from other operations (arithmetic or branches).
46 * This leads to the introduction of many local variables
47 * and significantly increases the C-code line count, but
48 * should not increase the size of generated assembly.
49 *
50 * The reason for this is twofold:
51 * (1) It will ease verification efforts using the VST
52 * (Verified Software Toolchain)
53 * whose program logic cannot directly reason
54 * about instructions containing a load or store in
55 * addition to other operations (e.g. *p = *q or
56 * tmp = *p + 42).
57 * (2) Operating on local variables and writing the results
58 * back to the target contexts on success only
59 * allows to maintain structure invariants even
60 * on failure - this in turn has two benefits:
61 * (2.a) If for some reason an error code is not caught
62 * and operation continues, functions are nonetheless
63 * called with sane contexts, reducing the risk
64 * of dangerous behavior.
65 * (2.b) Randomized testing is easier if structures
66 * remain intact even in the face of failing
67 * and/or non-sensical calls.
68 * Moreover, it might even reduce code-size because
69 * the compiler need not write back temporary results
70 * to memory in case of failure.
71 *
72 */
73
mps_reader_is_accumulating(mbedtls_mps_reader const * rd)74 static inline int mps_reader_is_accumulating(
75 mbedtls_mps_reader const *rd )
76 {
77 mbedtls_mps_size_t acc_remaining;
78 if( rd->acc == NULL )
79 return( 0 );
80
81 acc_remaining = rd->acc_share.acc_remaining;
82 return( acc_remaining > 0 );
83 }
84
mps_reader_is_producing(mbedtls_mps_reader const * rd)85 static inline int mps_reader_is_producing(
86 mbedtls_mps_reader const *rd )
87 {
88 unsigned char *frag = rd->frag;
89 return( frag == NULL );
90 }
91
mps_reader_is_consuming(mbedtls_mps_reader const * rd)92 static inline int mps_reader_is_consuming(
93 mbedtls_mps_reader const *rd )
94 {
95 return( !mps_reader_is_producing( rd ) );
96 }
97
mps_reader_get_fragment_offset(mbedtls_mps_reader const * rd)98 static inline mbedtls_mps_size_t mps_reader_get_fragment_offset(
99 mbedtls_mps_reader const *rd )
100 {
101 unsigned char *acc = rd->acc;
102 mbedtls_mps_size_t frag_offset;
103
104 if( acc == NULL )
105 return( 0 );
106
107 frag_offset = rd->acc_share.frag_offset;
108 return( frag_offset );
109 }
110
mps_reader_serving_from_accumulator(mbedtls_mps_reader const * rd)111 static inline mbedtls_mps_size_t mps_reader_serving_from_accumulator(
112 mbedtls_mps_reader const *rd )
113 {
114 mbedtls_mps_size_t frag_offset, end;
115
116 frag_offset = mps_reader_get_fragment_offset( rd );
117 end = rd->end;
118
119 return( end < frag_offset );
120 }
121
mps_reader_zero(mbedtls_mps_reader * rd)122 static inline void mps_reader_zero( mbedtls_mps_reader *rd )
123 {
124 /* A plain memset() would likely be more efficient,
125 * but the current way of zeroing makes it harder
126 * to overlook fields which should not be zero-initialized.
127 * It's also more suitable for FV efforts since it
128 * doesn't require reasoning about structs being
129 * interpreted as unstructured binary blobs. */
130 static mbedtls_mps_reader const zero =
131 { .frag = NULL,
132 .frag_len = 0,
133 .commit = 0,
134 .end = 0,
135 .pending = 0,
136 .acc = NULL,
137 .acc_len = 0,
138 .acc_available = 0,
139 .acc_share = { .acc_remaining = 0 }
140 };
141 *rd = zero;
142 }
143
mbedtls_mps_reader_init(mbedtls_mps_reader * rd,unsigned char * acc,mbedtls_mps_size_t acc_len)144 int mbedtls_mps_reader_init( mbedtls_mps_reader *rd,
145 unsigned char *acc,
146 mbedtls_mps_size_t acc_len )
147 {
148 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_init" );
149 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
150 "* Accumulator size: %u bytes", (unsigned) acc_len );
151 mps_reader_zero( rd );
152 rd->acc = acc;
153 rd->acc_len = acc_len;
154 MBEDTLS_MPS_TRACE_RETURN( 0 );
155 }
156
mbedtls_mps_reader_free(mbedtls_mps_reader * rd)157 int mbedtls_mps_reader_free( mbedtls_mps_reader *rd )
158 {
159 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_free" );
160 mps_reader_zero( rd );
161 MBEDTLS_MPS_TRACE_RETURN( 0 );
162 }
163
mbedtls_mps_reader_feed(mbedtls_mps_reader * rd,unsigned char * new_frag,mbedtls_mps_size_t new_frag_len)164 int mbedtls_mps_reader_feed( mbedtls_mps_reader *rd,
165 unsigned char *new_frag,
166 mbedtls_mps_size_t new_frag_len )
167 {
168 mbedtls_mps_size_t copy_to_acc;
169 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_feed" );
170 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
171 "* Fragment length: %u bytes", (unsigned) new_frag_len );
172
173 if( new_frag == NULL )
174 MBEDTLS_MPS_TRACE_RETURN( MBEDTLS_ERR_MPS_READER_INVALID_ARG );
175
176 MBEDTLS_MPS_STATE_VALIDATE_RAW( mps_reader_is_producing( rd ),
177 "mbedtls_mps_reader_feed() requires reader to be in producing mode" );
178
179 if( mps_reader_is_accumulating( rd ) )
180 {
181 unsigned char *acc = rd->acc;
182 mbedtls_mps_size_t acc_remaining = rd->acc_share.acc_remaining;
183 mbedtls_mps_size_t acc_available = rd->acc_available;
184
185 /* Skip over parts of the accumulator that have already been filled. */
186 acc += acc_available;
187
188 copy_to_acc = acc_remaining;
189 if( copy_to_acc > new_frag_len )
190 copy_to_acc = new_frag_len;
191
192 /* Copy new contents to accumulator. */
193 memcpy( acc, new_frag, copy_to_acc );
194
195 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
196 "Copy new data of size %u of %u into accumulator at offset %u",
197 (unsigned) copy_to_acc, (unsigned) new_frag_len, (unsigned) acc_available );
198
199 /* Check if, with the new fragment, we have enough data. */
200 acc_remaining -= copy_to_acc;
201 if( acc_remaining > 0 )
202 {
203 /* We need to accumulate more data. Stay in producing mode. */
204 acc_available += copy_to_acc;
205 rd->acc_share.acc_remaining = acc_remaining;
206 rd->acc_available = acc_available;
207 MBEDTLS_MPS_TRACE_RETURN( MBEDTLS_ERR_MPS_READER_NEED_MORE );
208 }
209
210 /* We have filled the accumulator: Move to consuming mode. */
211
212 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
213 "Enough data available to serve user request" );
214
215 /* Remember overlap of accumulator and fragment. */
216 rd->acc_share.frag_offset = acc_available;
217 acc_available += copy_to_acc;
218 rd->acc_available = acc_available;
219 }
220 else /* Not accumulating */
221 {
222 rd->acc_share.frag_offset = 0;
223 }
224
225 rd->frag = new_frag;
226 rd->frag_len = new_frag_len;
227 rd->commit = 0;
228 rd->end = 0;
229 MBEDTLS_MPS_TRACE_RETURN( 0 );
230 }
231
232
mbedtls_mps_reader_get(mbedtls_mps_reader * rd,mbedtls_mps_size_t desired,unsigned char ** buffer,mbedtls_mps_size_t * buflen)233 int mbedtls_mps_reader_get( mbedtls_mps_reader *rd,
234 mbedtls_mps_size_t desired,
235 unsigned char **buffer,
236 mbedtls_mps_size_t *buflen )
237 {
238 unsigned char *frag;
239 mbedtls_mps_size_t frag_len, frag_offset, end, frag_fetched, frag_remaining;
240 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_get" );
241 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
242 "* Bytes requested: %u", (unsigned) desired );
243
244 MBEDTLS_MPS_STATE_VALIDATE_RAW( mps_reader_is_consuming( rd ),
245 "mbedtls_mps_reader_get() requires reader to be in consuming mode" );
246
247 end = rd->end;
248 frag_offset = mps_reader_get_fragment_offset( rd );
249
250 /* Check if we're still serving from the accumulator. */
251 if( mps_reader_serving_from_accumulator( rd ) )
252 {
253 /* Illustration of supported and unsupported cases:
254 *
255 * - Allowed #1
256 *
257 * +-----------------------------------+
258 * | frag |
259 * +-----------------------------------+
260 *
261 * end end+desired
262 * | |
263 * +-----v-------v-------------+
264 * | acc |
265 * +---------------------------+
266 * | |
267 * frag_offset acc_available
268 *
269 * - Allowed #2
270 *
271 * +-----------------------------------+
272 * | frag |
273 * +-----------------------------------+
274 *
275 * end end+desired
276 * | |
277 * +----------v----------------v
278 * | acc |
279 * +---------------------------+
280 * | |
281 * frag_offset acc_available
282 *
283 * - Not allowed #1 (could be served, but we don't actually use it):
284 *
285 * +-----------------------------------+
286 * | frag |
287 * +-----------------------------------+
288 *
289 * end end+desired
290 * | |
291 * +------v-------------v------+
292 * | acc |
293 * +---------------------------+
294 * | |
295 * frag_offset acc_available
296 *
297 *
298 * - Not allowed #2 (can't be served with a contiguous buffer):
299 *
300 * +-----------------------------------+
301 * | frag |
302 * +-----------------------------------+
303 *
304 * end end + desired
305 * | |
306 * +------v--------------------+ v
307 * | acc |
308 * +---------------------------+
309 * | |
310 * frag_offset acc_available
311 *
312 * In case of Allowed #2 we're switching to serve from
313 * `frag` starting from the next call to mbedtls_mps_reader_get().
314 */
315
316 unsigned char *acc;
317
318 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
319 "Serve the request from the accumulator" );
320 if( frag_offset - end < desired )
321 {
322 mbedtls_mps_size_t acc_available;
323 acc_available = rd->acc_available;
324 if( acc_available - end != desired )
325 {
326 /* It might be possible to serve some of these situations by
327 * making additional space in the accumulator, removing those
328 * parts that have already been committed.
329 * On the other hand, this brings additional complexity and
330 * enlarges the code size, while there doesn't seem to be a use
331 * case where we don't attempt exactly the same `get` calls when
332 * resuming on a reader than what we tried before pausing it.
333 * If we believe we adhere to this restricted usage throughout
334 * the library, this check is a good opportunity to
335 * validate this. */
336 MBEDTLS_MPS_TRACE_RETURN(
337 MBEDTLS_ERR_MPS_READER_INCONSISTENT_REQUESTS );
338 }
339 }
340
341 acc = rd->acc;
342 acc += end;
343
344 *buffer = acc;
345 if( buflen != NULL )
346 *buflen = desired;
347
348 end += desired;
349 rd->end = end;
350 rd->pending = 0;
351
352 MBEDTLS_MPS_TRACE_RETURN( 0 );
353 }
354
355 /* Attempt to serve the request from the current fragment */
356 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
357 "Serve the request from the current fragment." );
358
359 frag_len = rd->frag_len;
360 frag_fetched = end - frag_offset; /* The amount of data from the current
361 * fragment that has already been passed
362 * to the user. */
363 frag_remaining = frag_len - frag_fetched; /* Remaining data in fragment */
364
365 /* Check if we can serve the read request from the fragment. */
366 if( frag_remaining < desired )
367 {
368 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
369 "There's not enough data in the current fragment "
370 "to serve the request." );
371 /* There's not enough data in the current fragment,
372 * so either just RETURN what we have or fail. */
373 if( buflen == NULL )
374 {
375 if( frag_remaining > 0 )
376 {
377 rd->pending = desired - frag_remaining;
378 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
379 "Remember to collect %u bytes before re-opening",
380 (unsigned) rd->pending );
381 }
382 MBEDTLS_MPS_TRACE_RETURN( MBEDTLS_ERR_MPS_READER_OUT_OF_DATA );
383 }
384
385 desired = frag_remaining;
386 }
387
388 /* There's enough data in the current fragment to serve the
389 * (potentially modified) read request. */
390
391 frag = rd->frag;
392 frag += frag_fetched;
393
394 *buffer = frag;
395 if( buflen != NULL )
396 *buflen = desired;
397
398 end += desired;
399 rd->end = end;
400 rd->pending = 0;
401 MBEDTLS_MPS_TRACE_RETURN( 0 );
402 }
403
mbedtls_mps_reader_commit(mbedtls_mps_reader * rd)404 int mbedtls_mps_reader_commit( mbedtls_mps_reader *rd )
405 {
406 mbedtls_mps_size_t end;
407 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_commit" );
408 MBEDTLS_MPS_STATE_VALIDATE_RAW( mps_reader_is_consuming( rd ),
409 "mbedtls_mps_reader_commit() requires reader to be in consuming mode" );
410
411 end = rd->end;
412 rd->commit = end;
413
414 MBEDTLS_MPS_TRACE_RETURN( 0 );
415 }
416
mbedtls_mps_reader_reclaim(mbedtls_mps_reader * rd,int * paused)417 int mbedtls_mps_reader_reclaim( mbedtls_mps_reader *rd,
418 int *paused )
419 {
420 unsigned char *frag, *acc;
421 mbedtls_mps_size_t pending, commit;
422 mbedtls_mps_size_t acc_len, frag_offset, frag_len;
423 MBEDTLS_MPS_TRACE_INIT( "mbedtls_mps_reader_reclaim" );
424
425 if( paused != NULL )
426 *paused = 0;
427
428 MBEDTLS_MPS_STATE_VALIDATE_RAW( mps_reader_is_consuming( rd ),
429 "mbedtls_mps_reader_reclaim() requires reader to be in consuming mode" );
430
431 frag = rd->frag;
432 acc = rd->acc;
433 pending = rd->pending;
434 commit = rd->commit;
435 frag_len = rd->frag_len;
436
437 frag_offset = mps_reader_get_fragment_offset( rd );
438
439 if( pending == 0 )
440 {
441 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
442 "No unsatisfied read-request has been logged." );
443
444 /* Check if there's data left to be consumed. */
445 if( commit < frag_offset || commit - frag_offset < frag_len )
446 {
447 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
448 "There is data left to be consumed." );
449 rd->end = commit;
450 MBEDTLS_MPS_TRACE_RETURN( MBEDTLS_ERR_MPS_READER_DATA_LEFT );
451 }
452
453 rd->acc_available = 0;
454 rd->acc_share.acc_remaining = 0;
455
456 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
457 "Fragment has been fully processed and committed." );
458 }
459 else
460 {
461 int overflow;
462
463 mbedtls_mps_size_t acc_backup_offset;
464 mbedtls_mps_size_t acc_backup_len;
465 mbedtls_mps_size_t frag_backup_offset;
466 mbedtls_mps_size_t frag_backup_len;
467
468 mbedtls_mps_size_t backup_len;
469 mbedtls_mps_size_t acc_len_needed;
470
471 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
472 "There has been an unsatisfied read with %u bytes overhead.",
473 (unsigned) pending );
474
475 if( acc == NULL )
476 {
477 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
478 "No accumulator present" );
479 MBEDTLS_MPS_TRACE_RETURN(
480 MBEDTLS_ERR_MPS_READER_NEED_ACCUMULATOR );
481 }
482 acc_len = rd->acc_len;
483
484 /* Check if the upper layer has already fetched
485 * and committed the contents of the accumulator. */
486 if( commit < frag_offset )
487 {
488 /* No, accumulator is still being processed. */
489 frag_backup_offset = 0;
490 frag_backup_len = frag_len;
491 acc_backup_offset = commit;
492 acc_backup_len = frag_offset - commit;
493 }
494 else
495 {
496 /* Yes, the accumulator is already processed. */
497 frag_backup_offset = commit - frag_offset;
498 frag_backup_len = frag_len - frag_backup_offset;
499 acc_backup_offset = 0;
500 acc_backup_len = 0;
501 }
502
503 backup_len = acc_backup_len + frag_backup_len;
504 acc_len_needed = backup_len + pending;
505
506 overflow = 0;
507 overflow |= ( backup_len < acc_backup_len );
508 overflow |= ( acc_len_needed < backup_len );
509
510 if( overflow || acc_len < acc_len_needed )
511 {
512 /* Except for the different return code, we behave as if
513 * there hadn't been a call to mbedtls_mps_reader_get()
514 * since the last commit. */
515 rd->end = commit;
516 rd->pending = 0;
517 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_ERROR,
518 "The accumulator is too small to handle the backup." );
519 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_ERROR,
520 "* Size: %u", (unsigned) acc_len );
521 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_ERROR,
522 "* Needed: %u (%u + %u)",
523 (unsigned) acc_len_needed,
524 (unsigned) backup_len, (unsigned) pending );
525 MBEDTLS_MPS_TRACE_RETURN(
526 MBEDTLS_ERR_MPS_READER_ACCUMULATOR_TOO_SMALL );
527 }
528
529 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
530 "Fragment backup: %u", (unsigned) frag_backup_len );
531 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
532 "Accumulator backup: %u", (unsigned) acc_backup_len );
533
534 /* Move uncommitted parts from the accumulator to the front
535 * of the accumulator. */
536 memmove( acc, acc + acc_backup_offset, acc_backup_len );
537
538 /* Copy uncmmitted parts of the current fragment to the
539 * accumulator. */
540 memcpy( acc + acc_backup_len,
541 frag + frag_backup_offset, frag_backup_len );
542
543 rd->acc_available = backup_len;
544 rd->acc_share.acc_remaining = pending;
545
546 if( paused != NULL )
547 *paused = 1;
548 }
549
550 rd->frag = NULL;
551 rd->frag_len = 0;
552
553 rd->commit = 0;
554 rd->end = 0;
555 rd->pending = 0;
556
557 MBEDTLS_MPS_TRACE( MBEDTLS_MPS_TRACE_TYPE_COMMENT,
558 "Final state: aa %u, al %u, ar %u",
559 (unsigned) rd->acc_available, (unsigned) rd->acc_len,
560 (unsigned) rd->acc_share.acc_remaining );
561 MBEDTLS_MPS_TRACE_RETURN( 0 );
562 }
563
564 #endif /* MBEDTLS_SSL_PROTO_TLS1_3_EXPERIMENTAL */
565