1 /*
2 * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 *
5 * SPDX-License-Identifier: MIT
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy of
8 * this software and associated documentation files (the "Software"), to deal in
9 * the Software without restriction, including without limitation the rights to
10 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 * the Software, and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in all
15 * copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * https://www.FreeRTOS.org
25 * https://github.com/FreeRTOS
26 *
27 */
28
29 /*
30 * A sample implementation of pvPortMalloc() and vPortFree() that combines
31 * (coalescences) adjacent memory blocks as they are freed, and in so doing
32 * limits memory fragmentation.
33 *
34 * See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the
35 * memory management pages of https://www.FreeRTOS.org for more information.
36 */
37 #include <stdlib.h>
38 #include <string.h>
39
40 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
41 * all the API functions to use the MPU wrappers. That should only be done when
42 * task.h is included from an application file. */
43 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
44
45 #include "FreeRTOS.h"
46 #include "task.h"
47
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
49
50 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 0 )
51 #error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0
52 #endif
53
54 #ifndef configHEAP_CLEAR_MEMORY_ON_FREE
55 #define configHEAP_CLEAR_MEMORY_ON_FREE 0
56 #endif
57
58 /* Block sizes must not get too small. */
59 #define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( xHeapStructSize << 1 ) )
60
61 /* Assumes 8bit bytes! */
62 #define heapBITS_PER_BYTE ( ( size_t ) 8 )
63
64 /* Max value that fits in a size_t type. */
65 #define heapSIZE_MAX ( ~( ( size_t ) 0 ) )
66
67 /* Check if multiplying a and b will result in overflow. */
68 #define heapMULTIPLY_WILL_OVERFLOW( a, b ) ( ( ( a ) > 0 ) && ( ( b ) > ( heapSIZE_MAX / ( a ) ) ) )
69
70 /* Check if adding a and b will result in overflow. */
71 #define heapADD_WILL_OVERFLOW( a, b ) ( ( a ) > ( heapSIZE_MAX - ( b ) ) )
72
73 /* Check if the subtraction operation ( a - b ) will result in underflow. */
74 #define heapSUBTRACT_WILL_UNDERFLOW( a, b ) ( ( a ) < ( b ) )
75
76 /* MSB of the xBlockSize member of an BlockLink_t structure is used to track
77 * the allocation status of a block. When MSB of the xBlockSize member of
78 * an BlockLink_t structure is set then the block belongs to the application.
79 * When the bit is free the block is still part of the free heap space. */
80 #define heapBLOCK_ALLOCATED_BITMASK ( ( ( size_t ) 1 ) << ( ( sizeof( size_t ) * heapBITS_PER_BYTE ) - 1 ) )
81 #define heapBLOCK_SIZE_IS_VALID( xBlockSize ) ( ( ( xBlockSize ) & heapBLOCK_ALLOCATED_BITMASK ) == 0 )
82 #define heapBLOCK_IS_ALLOCATED( pxBlock ) ( ( ( pxBlock->xBlockSize ) & heapBLOCK_ALLOCATED_BITMASK ) != 0 )
83 #define heapALLOCATE_BLOCK( pxBlock ) ( ( pxBlock->xBlockSize ) |= heapBLOCK_ALLOCATED_BITMASK )
84 #define heapFREE_BLOCK( pxBlock ) ( ( pxBlock->xBlockSize ) &= ~heapBLOCK_ALLOCATED_BITMASK )
85
86 /*-----------------------------------------------------------*/
87
88 /* Allocate the memory for the heap. */
89 #if ( configAPPLICATION_ALLOCATED_HEAP == 1 )
90
91 /* The application writer has already defined the array used for the RTOS
92 * heap - probably so it can be placed in a special segment or address. */
93 extern uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
94 #else
95 PRIVILEGED_DATA static uint8_t ucHeap[ configTOTAL_HEAP_SIZE ];
96 #endif /* configAPPLICATION_ALLOCATED_HEAP */
97
98 /* Define the linked list structure. This is used to link free blocks in order
99 * of their memory address. */
100 typedef struct A_BLOCK_LINK
101 {
102 struct A_BLOCK_LINK * pxNextFreeBlock; /**< The next free block in the list. */
103 size_t xBlockSize; /**< The size of the free block. */
104 } BlockLink_t;
105
106 /* Setting configENABLE_HEAP_PROTECTOR to 1 enables heap block pointers
107 * protection using an application supplied canary value to catch heap
108 * corruption should a heap buffer overflow occur.
109 */
110 #if ( configENABLE_HEAP_PROTECTOR == 1 )
111
112 /**
113 * @brief Application provided function to get a random value to be used as canary.
114 *
115 * @param pxHeapCanary [out] Output parameter to return the canary value.
116 */
117 extern void vApplicationGetRandomHeapCanary( portPOINTER_SIZE_TYPE * pxHeapCanary );
118
119 /* Canary value for protecting internal heap pointers. */
120 PRIVILEGED_DATA static portPOINTER_SIZE_TYPE xHeapCanary;
121
122 /* Macro to load/store BlockLink_t pointers to memory. By XORing the
123 * pointers with a random canary value, heap overflows will result
124 * in randomly unpredictable pointer values which will be caught by
125 * heapVALIDATE_BLOCK_POINTER assert. */
126 #define heapPROTECT_BLOCK_POINTER( pxBlock ) ( ( BlockLink_t * ) ( ( ( portPOINTER_SIZE_TYPE ) ( pxBlock ) ) ^ xHeapCanary ) )
127 #else
128
129 #define heapPROTECT_BLOCK_POINTER( pxBlock ) ( pxBlock )
130
131 #endif /* configENABLE_HEAP_PROTECTOR */
132
133 /* Assert that a heap block pointer is within the heap bounds. */
134 #define heapVALIDATE_BLOCK_POINTER( pxBlock ) \
135 configASSERT( ( ( uint8_t * ) ( pxBlock ) >= &( ucHeap[ 0 ] ) ) && \
136 ( ( uint8_t * ) ( pxBlock ) <= &( ucHeap[ configTOTAL_HEAP_SIZE - 1 ] ) ) )
137
138 /*-----------------------------------------------------------*/
139
140 /*
141 * Inserts a block of memory that is being freed into the correct position in
142 * the list of free memory blocks. The block being freed will be merged with
143 * the block in front it and/or the block behind it if the memory blocks are
144 * adjacent to each other.
145 */
146 static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) PRIVILEGED_FUNCTION;
147
148 /*
149 * Called automatically to setup the required heap structures the first time
150 * pvPortMalloc() is called.
151 */
152 static void prvHeapInit( void ) PRIVILEGED_FUNCTION;
153
154 /*-----------------------------------------------------------*/
155
156 /* The size of the structure placed at the beginning of each allocated memory
157 * block must by correctly byte aligned. */
158 static const size_t xHeapStructSize = ( sizeof( BlockLink_t ) + ( ( size_t ) ( portBYTE_ALIGNMENT - 1 ) ) ) & ~( ( size_t ) portBYTE_ALIGNMENT_MASK );
159
160 /* Create a couple of list links to mark the start and end of the list. */
161 PRIVILEGED_DATA static BlockLink_t xStart;
162 PRIVILEGED_DATA static BlockLink_t * pxEnd = NULL;
163
164 /* Keeps track of the number of calls to allocate and free memory as well as the
165 * number of free bytes remaining, but says nothing about fragmentation. */
166 PRIVILEGED_DATA static size_t xFreeBytesRemaining = ( size_t ) 0U;
167 PRIVILEGED_DATA static size_t xMinimumEverFreeBytesRemaining = ( size_t ) 0U;
168 PRIVILEGED_DATA static size_t xNumberOfSuccessfulAllocations = ( size_t ) 0U;
169 PRIVILEGED_DATA static size_t xNumberOfSuccessfulFrees = ( size_t ) 0U;
170
171 /*-----------------------------------------------------------*/
172
pvPortMalloc(size_t xWantedSize)173 void * pvPortMalloc( size_t xWantedSize )
174 {
175 BlockLink_t * pxBlock;
176 BlockLink_t * pxPreviousBlock;
177 BlockLink_t * pxNewBlockLink;
178 void * pvReturn = NULL;
179 size_t xAdditionalRequiredSize;
180 size_t xAllocatedBlockSize = 0;
181
182 if( xWantedSize > 0 )
183 {
184 /* The wanted size must be increased so it can contain a BlockLink_t
185 * structure in addition to the requested amount of bytes. */
186 if( heapADD_WILL_OVERFLOW( xWantedSize, xHeapStructSize ) == 0 )
187 {
188 xWantedSize += xHeapStructSize;
189
190 /* Ensure that blocks are always aligned to the required number
191 * of bytes. */
192 if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
193 {
194 /* Byte alignment required. */
195 xAdditionalRequiredSize = portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK );
196
197 if( heapADD_WILL_OVERFLOW( xWantedSize, xAdditionalRequiredSize ) == 0 )
198 {
199 xWantedSize += xAdditionalRequiredSize;
200 }
201 else
202 {
203 xWantedSize = 0;
204 }
205 }
206 else
207 {
208 mtCOVERAGE_TEST_MARKER();
209 }
210 }
211 else
212 {
213 xWantedSize = 0;
214 }
215 }
216 else
217 {
218 mtCOVERAGE_TEST_MARKER();
219 }
220
221 vTaskSuspendAll();
222 {
223 /* If this is the first call to malloc then the heap will require
224 * initialisation to setup the list of free blocks. */
225 if( pxEnd == NULL )
226 {
227 prvHeapInit();
228 }
229 else
230 {
231 mtCOVERAGE_TEST_MARKER();
232 }
233
234 /* Check the block size we are trying to allocate is not so large that the
235 * top bit is set. The top bit of the block size member of the BlockLink_t
236 * structure is used to determine who owns the block - the application or
237 * the kernel, so it must be free. */
238 if( heapBLOCK_SIZE_IS_VALID( xWantedSize ) != 0 )
239 {
240 if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
241 {
242 /* Traverse the list from the start (lowest address) block until
243 * one of adequate size is found. */
244 pxPreviousBlock = &xStart;
245 pxBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock );
246 heapVALIDATE_BLOCK_POINTER( pxBlock );
247
248 while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != heapPROTECT_BLOCK_POINTER( NULL ) ) )
249 {
250 pxPreviousBlock = pxBlock;
251 pxBlock = heapPROTECT_BLOCK_POINTER( pxBlock->pxNextFreeBlock );
252 heapVALIDATE_BLOCK_POINTER( pxBlock );
253 }
254
255 /* If the end marker was reached then a block of adequate size
256 * was not found. */
257 if( pxBlock != pxEnd )
258 {
259 /* Return the memory space pointed to - jumping over the
260 * BlockLink_t structure at its start. */
261 pvReturn = ( void * ) ( ( ( uint8_t * ) heapPROTECT_BLOCK_POINTER( pxPreviousBlock->pxNextFreeBlock ) ) + xHeapStructSize );
262 heapVALIDATE_BLOCK_POINTER( pvReturn );
263
264 /* This block is being returned for use so must be taken out
265 * of the list of free blocks. */
266 pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
267
268 /* If the block is larger than required it can be split into
269 * two. */
270 configASSERT( heapSUBTRACT_WILL_UNDERFLOW( pxBlock->xBlockSize, xWantedSize ) == 0 );
271
272 if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
273 {
274 /* This block is to be split into two. Create a new
275 * block following the number of bytes requested. The void
276 * cast is used to prevent byte alignment warnings from the
277 * compiler. */
278 pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );
279 configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 );
280
281 /* Calculate the sizes of two blocks split from the
282 * single block. */
283 pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
284 pxBlock->xBlockSize = xWantedSize;
285
286 /* Insert the new block into the list of free blocks. */
287 pxNewBlockLink->pxNextFreeBlock = pxPreviousBlock->pxNextFreeBlock;
288 pxPreviousBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxNewBlockLink );
289 }
290 else
291 {
292 mtCOVERAGE_TEST_MARKER();
293 }
294
295 xFreeBytesRemaining -= pxBlock->xBlockSize;
296
297 if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
298 {
299 xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
300 }
301 else
302 {
303 mtCOVERAGE_TEST_MARKER();
304 }
305
306 xAllocatedBlockSize = pxBlock->xBlockSize;
307
308 /* The block is being returned - it is allocated and owned
309 * by the application and has no "next" block. */
310 heapALLOCATE_BLOCK( pxBlock );
311 pxBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
312 xNumberOfSuccessfulAllocations++;
313 }
314 else
315 {
316 mtCOVERAGE_TEST_MARKER();
317 }
318 }
319 else
320 {
321 mtCOVERAGE_TEST_MARKER();
322 }
323 }
324 else
325 {
326 mtCOVERAGE_TEST_MARKER();
327 }
328
329 traceMALLOC( pvReturn, xAllocatedBlockSize );
330
331 /* Prevent compiler warnings when trace macros are not used. */
332 ( void ) xAllocatedBlockSize;
333 }
334 ( void ) xTaskResumeAll();
335
336 #if ( configUSE_MALLOC_FAILED_HOOK == 1 )
337 {
338 if( pvReturn == NULL )
339 {
340 vApplicationMallocFailedHook();
341 }
342 else
343 {
344 mtCOVERAGE_TEST_MARKER();
345 }
346 }
347 #endif /* if ( configUSE_MALLOC_FAILED_HOOK == 1 ) */
348
349 configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 );
350 return pvReturn;
351 }
352 /*-----------------------------------------------------------*/
353
vPortFree(void * pv)354 void vPortFree( void * pv )
355 {
356 uint8_t * puc = ( uint8_t * ) pv;
357 BlockLink_t * pxLink;
358
359 if( pv != NULL )
360 {
361 /* The memory being freed will have an BlockLink_t structure immediately
362 * before it. */
363 puc -= xHeapStructSize;
364
365 /* This casting is to keep the compiler from issuing warnings. */
366 pxLink = ( void * ) puc;
367
368 heapVALIDATE_BLOCK_POINTER( pxLink );
369 configASSERT( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 );
370 configASSERT( pxLink->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( NULL ) );
371
372 if( heapBLOCK_IS_ALLOCATED( pxLink ) != 0 )
373 {
374 if( pxLink->pxNextFreeBlock == heapPROTECT_BLOCK_POINTER( NULL ) )
375 {
376 /* The block is being returned to the heap - it is no longer
377 * allocated. */
378 heapFREE_BLOCK( pxLink );
379 #if ( configHEAP_CLEAR_MEMORY_ON_FREE == 1 )
380 {
381 /* Check for underflow as this can occur if xBlockSize is
382 * overwritten in a heap block. */
383 if( heapSUBTRACT_WILL_UNDERFLOW( pxLink->xBlockSize, xHeapStructSize ) == 0 )
384 {
385 ( void ) memset( puc + xHeapStructSize, 0, pxLink->xBlockSize - xHeapStructSize );
386 }
387 }
388 #endif
389
390 vTaskSuspendAll();
391 {
392 /* Add this block to the list of free blocks. */
393 xFreeBytesRemaining += pxLink->xBlockSize;
394 traceFREE( pv, pxLink->xBlockSize );
395 prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) );
396 xNumberOfSuccessfulFrees++;
397 }
398 ( void ) xTaskResumeAll();
399 }
400 else
401 {
402 mtCOVERAGE_TEST_MARKER();
403 }
404 }
405 else
406 {
407 mtCOVERAGE_TEST_MARKER();
408 }
409 }
410 }
411 /*-----------------------------------------------------------*/
412
xPortGetFreeHeapSize(void)413 size_t xPortGetFreeHeapSize( void )
414 {
415 return xFreeBytesRemaining;
416 }
417 /*-----------------------------------------------------------*/
418
xPortGetMinimumEverFreeHeapSize(void)419 size_t xPortGetMinimumEverFreeHeapSize( void )
420 {
421 return xMinimumEverFreeBytesRemaining;
422 }
423 /*-----------------------------------------------------------*/
424
xPortResetHeapMinimumEverFreeHeapSize(void)425 void xPortResetHeapMinimumEverFreeHeapSize( void )
426 {
427 xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
428 }
429 /*-----------------------------------------------------------*/
430
vPortInitialiseBlocks(void)431 void vPortInitialiseBlocks( void )
432 {
433 /* This just exists to keep the linker quiet. */
434 }
435 /*-----------------------------------------------------------*/
436
pvPortCalloc(size_t xNum,size_t xSize)437 void * pvPortCalloc( size_t xNum,
438 size_t xSize )
439 {
440 void * pv = NULL;
441
442 if( heapMULTIPLY_WILL_OVERFLOW( xNum, xSize ) == 0 )
443 {
444 pv = pvPortMalloc( xNum * xSize );
445
446 if( pv != NULL )
447 {
448 ( void ) memset( pv, 0, xNum * xSize );
449 }
450 }
451
452 return pv;
453 }
454 /*-----------------------------------------------------------*/
455
prvHeapInit(void)456 static void prvHeapInit( void ) /* PRIVILEGED_FUNCTION */
457 {
458 BlockLink_t * pxFirstFreeBlock;
459 portPOINTER_SIZE_TYPE uxStartAddress, uxEndAddress;
460 size_t xTotalHeapSize = configTOTAL_HEAP_SIZE;
461
462 /* Ensure the heap starts on a correctly aligned boundary. */
463 uxStartAddress = ( portPOINTER_SIZE_TYPE ) ucHeap;
464
465 if( ( uxStartAddress & portBYTE_ALIGNMENT_MASK ) != 0 )
466 {
467 uxStartAddress += ( portBYTE_ALIGNMENT - 1 );
468 uxStartAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK );
469 xTotalHeapSize -= ( size_t ) ( uxStartAddress - ( portPOINTER_SIZE_TYPE ) ucHeap );
470 }
471
472 #if ( configENABLE_HEAP_PROTECTOR == 1 )
473 {
474 vApplicationGetRandomHeapCanary( &( xHeapCanary ) );
475 }
476 #endif
477
478 /* xStart is used to hold a pointer to the first item in the list of free
479 * blocks. The void cast is used to prevent compiler warnings. */
480 xStart.pxNextFreeBlock = ( void * ) heapPROTECT_BLOCK_POINTER( uxStartAddress );
481 xStart.xBlockSize = ( size_t ) 0;
482
483 /* pxEnd is used to mark the end of the list of free blocks and is inserted
484 * at the end of the heap space. */
485 uxEndAddress = uxStartAddress + ( portPOINTER_SIZE_TYPE ) xTotalHeapSize;
486 uxEndAddress -= ( portPOINTER_SIZE_TYPE ) xHeapStructSize;
487 uxEndAddress &= ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK );
488 pxEnd = ( BlockLink_t * ) uxEndAddress;
489 pxEnd->xBlockSize = 0;
490 pxEnd->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( NULL );
491
492 /* To start with there is a single free block that is sized to take up the
493 * entire heap space, minus the space taken by pxEnd. */
494 pxFirstFreeBlock = ( BlockLink_t * ) uxStartAddress;
495 pxFirstFreeBlock->xBlockSize = ( size_t ) ( uxEndAddress - ( portPOINTER_SIZE_TYPE ) pxFirstFreeBlock );
496 pxFirstFreeBlock->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxEnd );
497
498 /* Only one block exists - and it covers the entire usable heap space. */
499 xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
500 xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
501 }
502 /*-----------------------------------------------------------*/
503
prvInsertBlockIntoFreeList(BlockLink_t * pxBlockToInsert)504 static void prvInsertBlockIntoFreeList( BlockLink_t * pxBlockToInsert ) /* PRIVILEGED_FUNCTION */
505 {
506 BlockLink_t * pxIterator;
507 uint8_t * puc;
508
509 /* Iterate through the list until a block is found that has a higher address
510 * than the block being inserted. */
511 for( pxIterator = &xStart; heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) < pxBlockToInsert; pxIterator = heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) )
512 {
513 /* Nothing to do here, just iterate to the right position. */
514 }
515
516 if( pxIterator != &xStart )
517 {
518 heapVALIDATE_BLOCK_POINTER( pxIterator );
519 }
520
521 /* Do the block being inserted, and the block it is being inserted after
522 * make a contiguous block of memory? */
523 puc = ( uint8_t * ) pxIterator;
524
525 if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert )
526 {
527 pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
528 pxBlockToInsert = pxIterator;
529 }
530 else
531 {
532 mtCOVERAGE_TEST_MARKER();
533 }
534
535 /* Do the block being inserted, and the block it is being inserted before
536 * make a contiguous block of memory? */
537 puc = ( uint8_t * ) pxBlockToInsert;
538
539 if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) )
540 {
541 if( heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock ) != pxEnd )
542 {
543 /* Form one big block from the two blocks. */
544 pxBlockToInsert->xBlockSize += heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock )->xBlockSize;
545 pxBlockToInsert->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxIterator->pxNextFreeBlock )->pxNextFreeBlock;
546 }
547 else
548 {
549 pxBlockToInsert->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxEnd );
550 }
551 }
552 else
553 {
554 pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
555 }
556
557 /* If the block being inserted plugged a gap, so was merged with the block
558 * before and the block after, then it's pxNextFreeBlock pointer will have
559 * already been set, and should not be set here as that would make it point
560 * to itself. */
561 if( pxIterator != pxBlockToInsert )
562 {
563 pxIterator->pxNextFreeBlock = heapPROTECT_BLOCK_POINTER( pxBlockToInsert );
564 }
565 else
566 {
567 mtCOVERAGE_TEST_MARKER();
568 }
569 }
570 /*-----------------------------------------------------------*/
571
vPortGetHeapStats(HeapStats_t * pxHeapStats)572 void vPortGetHeapStats( HeapStats_t * pxHeapStats )
573 {
574 BlockLink_t * pxBlock;
575 size_t xBlocks = 0, xMaxSize = 0, xMinSize = SIZE_MAX;
576
577 vTaskSuspendAll();
578 {
579 pxBlock = heapPROTECT_BLOCK_POINTER( xStart.pxNextFreeBlock );
580
581 /* pxBlock will be NULL if the heap has not been initialised. The heap
582 * is initialised automatically when the first allocation is made. */
583 if( pxBlock != NULL )
584 {
585 while( pxBlock != pxEnd )
586 {
587 /* Increment the number of blocks and record the largest block seen
588 * so far. */
589 xBlocks++;
590
591 if( pxBlock->xBlockSize > xMaxSize )
592 {
593 xMaxSize = pxBlock->xBlockSize;
594 }
595
596 if( pxBlock->xBlockSize < xMinSize )
597 {
598 xMinSize = pxBlock->xBlockSize;
599 }
600
601 /* Move to the next block in the chain until the last block is
602 * reached. */
603 pxBlock = heapPROTECT_BLOCK_POINTER( pxBlock->pxNextFreeBlock );
604 }
605 }
606 }
607 ( void ) xTaskResumeAll();
608
609 pxHeapStats->xSizeOfLargestFreeBlockInBytes = xMaxSize;
610 pxHeapStats->xSizeOfSmallestFreeBlockInBytes = xMinSize;
611 pxHeapStats->xNumberOfFreeBlocks = xBlocks;
612
613 taskENTER_CRITICAL();
614 {
615 pxHeapStats->xAvailableHeapSpaceInBytes = xFreeBytesRemaining;
616 pxHeapStats->xNumberOfSuccessfulAllocations = xNumberOfSuccessfulAllocations;
617 pxHeapStats->xNumberOfSuccessfulFrees = xNumberOfSuccessfulFrees;
618 pxHeapStats->xMinimumEverFreeBytesRemaining = xMinimumEverFreeBytesRemaining;
619 }
620 taskEXIT_CRITICAL();
621 }
622 /*-----------------------------------------------------------*/
623
624 /*
625 * Reset the state in this file. This state is normally initialized at start up.
626 * This function must be called by the application before restarting the
627 * scheduler.
628 */
vPortHeapResetState(void)629 void vPortHeapResetState( void )
630 {
631 pxEnd = NULL;
632
633 xFreeBytesRemaining = ( size_t ) 0U;
634 xMinimumEverFreeBytesRemaining = ( size_t ) 0U;
635 xNumberOfSuccessfulAllocations = ( size_t ) 0U;
636 xNumberOfSuccessfulFrees = ( size_t ) 0U;
637 }
638 /*-----------------------------------------------------------*/
639