1 /*
2  * FreeRTOS Kernel <DEVELOPMENT BRANCH>
3  * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4  *
5  * SPDX-License-Identifier: MIT
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy of
8  * this software and associated documentation files (the "Software"), to deal in
9  * the Software without restriction, including without limitation the rights to
10  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11  * the Software, and to permit persons to whom the Software is furnished to do so,
12  * subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in all
15  * copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * https://www.FreeRTOS.org
25  * https://github.com/FreeRTOS
26  *
27  */
28 
29 #include <stdlib.h>
30 #include <string.h>
31 
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33  * all the API functions to use the MPU wrappers.  That should only be done when
34  * task.h is included from an application file. */
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
36 
37 #include "FreeRTOS.h"
38 #include "task.h"
39 #include "queue.h"
40 
41 #if ( configUSE_CO_ROUTINES == 1 )
42     #include "croutine.h"
43 #endif
44 
45 /* The MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
46  * for the header files above, but not in this file, in order to generate the
47  * correct privileged Vs unprivileged linkage and placement. */
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
49 
50 
51 /* Constants used with the cRxLock and cTxLock structure members. */
52 #define queueUNLOCKED             ( ( int8_t ) -1 )
53 #define queueLOCKED_UNMODIFIED    ( ( int8_t ) 0 )
54 #define queueINT8_MAX             ( ( int8_t ) 127 )
55 
56 /* When the Queue_t structure is used to represent a base queue its pcHead and
57  * pcTail members are used as pointers into the queue storage area.  When the
58  * Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
59  * not necessary, and the pcHead pointer is set to NULL to indicate that the
60  * structure instead holds a pointer to the mutex holder (if any).  Map alternative
61  * names to the pcHead and structure member to ensure the readability of the code
62  * is maintained.  The QueuePointers_t and SemaphoreData_t types are used to form
63  * a union as their usage is mutually exclusive dependent on what the queue is
64  * being used for. */
65 #define uxQueueType               pcHead
66 #define queueQUEUE_IS_MUTEX       NULL
67 
68 typedef struct QueuePointers
69 {
70     int8_t * pcTail;     /**< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
71     int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */
72 } QueuePointers_t;
73 
74 typedef struct SemaphoreData
75 {
76     TaskHandle_t xMutexHolder;        /**< The handle of the task that holds the mutex. */
77     UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
78 } SemaphoreData_t;
79 
80 /* Semaphores do not actually store or copy data, so have an item size of
81  * zero. */
82 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH    ( ( UBaseType_t ) 0 )
83 #define queueMUTEX_GIVE_BLOCK_TIME          ( ( TickType_t ) 0U )
84 
85 #if ( configUSE_PREEMPTION == 0 )
86 
87 /* If the cooperative scheduler is being used then a yield should not be
88  * performed just because a higher priority task has been woken. */
89     #define queueYIELD_IF_USING_PREEMPTION()
90 #else
91     #if ( configNUMBER_OF_CORES == 1 )
92         #define queueYIELD_IF_USING_PREEMPTION()    portYIELD_WITHIN_API()
93     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
94         #define queueYIELD_IF_USING_PREEMPTION()    vTaskYieldWithinAPI()
95     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
96 #endif
97 
98 /*
99  * Definition of the queue used by the scheduler.
100  * Items are queued by copy, not reference.  See the following link for the
101  * rationale: https://www.FreeRTOS.org/Embedded-RTOS-Queues.html
102  */
103 typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
104 {
105     int8_t * pcHead;           /**< Points to the beginning of the queue storage area. */
106     int8_t * pcWriteTo;        /**< Points to the free next place in the storage area. */
107 
108     union
109     {
110         QueuePointers_t xQueue;     /**< Data required exclusively when this structure is used as a queue. */
111         SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */
112     } u;
113 
114     List_t xTasksWaitingToSend;             /**< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */
115     List_t xTasksWaitingToReceive;          /**< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */
116 
117     volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */
118     UBaseType_t uxLength;                   /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */
119     UBaseType_t uxItemSize;                 /**< The size of each items that the queue will hold. */
120 
121     volatile int8_t cRxLock;                /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
122     volatile int8_t cTxLock;                /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
123 
124     #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
125         uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
126     #endif
127 
128     #if ( configUSE_QUEUE_SETS == 1 )
129         struct QueueDefinition * pxQueueSetContainer;
130     #endif
131 
132     #if ( configUSE_TRACE_FACILITY == 1 )
133         UBaseType_t uxQueueNumber;
134         uint8_t ucQueueType;
135     #endif
136 } xQUEUE;
137 
138 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
139  * name below to enable the use of older kernel aware debuggers. */
140 typedef xQUEUE Queue_t;
141 
142 /*-----------------------------------------------------------*/
143 
144 /*
145  * The queue registry is just a means for kernel aware debuggers to locate
146  * queue structures.  It has no other purpose so is an optional component.
147  */
148 #if ( configQUEUE_REGISTRY_SIZE > 0 )
149 
150 /* The type stored within the queue registry array.  This allows a name
151  * to be assigned to each queue making kernel aware debugging a little
152  * more user friendly. */
153     typedef struct QUEUE_REGISTRY_ITEM
154     {
155         const char * pcQueueName;
156         QueueHandle_t xHandle;
157     } xQueueRegistryItem;
158 
159 /* The old xQueueRegistryItem name is maintained above then typedefed to the
160  * new xQueueRegistryItem name below to enable the use of older kernel aware
161  * debuggers. */
162     typedef xQueueRegistryItem QueueRegistryItem_t;
163 
164 /* The queue registry is simply an array of QueueRegistryItem_t structures.
165  * The pcQueueName member of a structure being NULL is indicative of the
166  * array position being vacant. */
167 
168 /* MISRA Ref 8.4.2 [Declaration shall be visible] */
169 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */
170 /* coverity[misra_c_2012_rule_8_4_violation] */
171     PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
172 
173 #endif /* configQUEUE_REGISTRY_SIZE */
174 
175 /*
176  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not
177  * prevent an ISR from adding or removing items to the queue, but does prevent
178  * an ISR from removing tasks from the queue event lists.  If an ISR finds a
179  * queue is locked it will instead increment the appropriate queue lock count
180  * to indicate that a task may require unblocking.  When the queue in unlocked
181  * these lock counts are inspected, and the appropriate action taken.
182  */
183 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
184 
185 /*
186  * Uses a critical section to determine if there is any data in a queue.
187  *
188  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
189  */
190 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
191 
192 /*
193  * Uses a critical section to determine if there is any space in a queue.
194  *
195  * @return pdTRUE if there is no space, otherwise pdFALSE;
196  */
197 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
198 
199 /*
200  * Copies an item into the queue, either at the front of the queue or the
201  * back of the queue.
202  */
203 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
204                                       const void * pvItemToQueue,
205                                       const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
206 
207 /*
208  * Copies an item out of a queue.
209  */
210 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
211                                   void * const pvBuffer ) PRIVILEGED_FUNCTION;
212 
213 #if ( configUSE_QUEUE_SETS == 1 )
214 
215 /*
216  * Checks to see if a queue is a member of a queue set, and if so, notifies
217  * the queue set that the queue contains data.
218  */
219     static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
220 #endif
221 
222 /*
223  * Called after a Queue_t structure has been allocated either statically or
224  * dynamically to fill in the structure's members.
225  */
226 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
227                                    const UBaseType_t uxItemSize,
228                                    uint8_t * pucQueueStorage,
229                                    const uint8_t ucQueueType,
230                                    Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
231 
232 /*
233  * Mutexes are a special type of queue.  When a mutex is created, first the
234  * queue is created, then prvInitialiseMutex() is called to configure the queue
235  * as a mutex.
236  */
237 #if ( configUSE_MUTEXES == 1 )
238     static void prvInitialiseMutex( Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
239 #endif
240 
241 #if ( configUSE_MUTEXES == 1 )
242 
243 /*
244  * If a task waiting for a mutex causes the mutex holder to inherit a
245  * priority, but the waiting task times out, then the holder should
246  * disinherit the priority - but only down to the highest priority of any
247  * other tasks that are waiting for the same mutex.  This function returns
248  * that priority.
249  */
250     static UBaseType_t prvGetHighestPriorityOfWaitToReceiveList( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
251 #endif
252 /*-----------------------------------------------------------*/
253 
254 /*
255  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from
256  * accessing the queue event lists.
257  */
258 #define prvLockQueue( pxQueue )                            \
259     taskENTER_CRITICAL();                                  \
260     {                                                      \
261         if( ( pxQueue )->cRxLock == queueUNLOCKED )        \
262         {                                                  \
263             ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
264         }                                                  \
265         if( ( pxQueue )->cTxLock == queueUNLOCKED )        \
266         {                                                  \
267             ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
268         }                                                  \
269     }                                                      \
270     taskEXIT_CRITICAL()
271 
272 /*
273  * Macro to increment cTxLock member of the queue data structure. It is
274  * capped at the number of tasks in the system as we cannot unblock more
275  * tasks than the number of tasks in the system.
276  */
277 #define prvIncrementQueueTxLock( pxQueue, cTxLock )                           \
278     do {                                                                      \
279         const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks();         \
280         if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks )                   \
281         {                                                                     \
282             configASSERT( ( cTxLock ) != queueINT8_MAX );                     \
283             ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \
284         }                                                                     \
285     } while( 0 )
286 
287 /*
288  * Macro to increment cRxLock member of the queue data structure. It is
289  * capped at the number of tasks in the system as we cannot unblock more
290  * tasks than the number of tasks in the system.
291  */
292 #define prvIncrementQueueRxLock( pxQueue, cRxLock )                           \
293     do {                                                                      \
294         const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks();         \
295         if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks )                   \
296         {                                                                     \
297             configASSERT( ( cRxLock ) != queueINT8_MAX );                     \
298             ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
299         }                                                                     \
300     } while( 0 )
301 /*-----------------------------------------------------------*/
302 
xQueueGenericReset(QueueHandle_t xQueue,BaseType_t xNewQueue)303 BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
304                                BaseType_t xNewQueue )
305 {
306     BaseType_t xReturn = pdPASS;
307     Queue_t * const pxQueue = xQueue;
308 
309     traceENTER_xQueueGenericReset( xQueue, xNewQueue );
310 
311     configASSERT( pxQueue );
312 
313     if( ( pxQueue != NULL ) &&
314         ( pxQueue->uxLength >= 1U ) &&
315         /* Check for multiplication overflow. */
316         ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
317     {
318         taskENTER_CRITICAL();
319         {
320             pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
321             pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
322             pxQueue->pcWriteTo = pxQueue->pcHead;
323             pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize );
324             pxQueue->cRxLock = queueUNLOCKED;
325             pxQueue->cTxLock = queueUNLOCKED;
326 
327             if( xNewQueue == pdFALSE )
328             {
329                 /* If there are tasks blocked waiting to read from the queue, then
330                  * the tasks will remain blocked as after this function exits the queue
331                  * will still be empty.  If there are tasks blocked waiting to write to
332                  * the queue, then one should be unblocked as after this function exits
333                  * it will be possible to write to it. */
334                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
335                 {
336                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
337                     {
338                         queueYIELD_IF_USING_PREEMPTION();
339                     }
340                     else
341                     {
342                         mtCOVERAGE_TEST_MARKER();
343                     }
344                 }
345                 else
346                 {
347                     mtCOVERAGE_TEST_MARKER();
348                 }
349             }
350             else
351             {
352                 /* Ensure the event queues start in the correct state. */
353                 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
354                 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
355             }
356         }
357         taskEXIT_CRITICAL();
358     }
359     else
360     {
361         xReturn = pdFAIL;
362     }
363 
364     configASSERT( xReturn != pdFAIL );
365 
366     /* A value is returned for calling semantic consistency with previous
367      * versions. */
368     traceRETURN_xQueueGenericReset( xReturn );
369 
370     return xReturn;
371 }
372 /*-----------------------------------------------------------*/
373 
374 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
375 
xQueueGenericCreateStatic(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,StaticQueue_t * pxStaticQueue,const uint8_t ucQueueType)376     QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
377                                              const UBaseType_t uxItemSize,
378                                              uint8_t * pucQueueStorage,
379                                              StaticQueue_t * pxStaticQueue,
380                                              const uint8_t ucQueueType )
381     {
382         Queue_t * pxNewQueue = NULL;
383 
384         traceENTER_xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType );
385 
386         /* The StaticQueue_t structure and the queue storage area must be
387          * supplied. */
388         configASSERT( pxStaticQueue );
389 
390         if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
391             ( pxStaticQueue != NULL ) &&
392 
393             /* A queue storage area should be provided if the item size is not 0, and
394              * should not be provided if the item size is 0. */
395             ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0U ) ) ) &&
396             ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0U ) ) ) )
397         {
398             #if ( configASSERT_DEFINED == 1 )
399             {
400                 /* Sanity check that the size of the structure used to declare a
401                  * variable of type StaticQueue_t or StaticSemaphore_t equals the size of
402                  * the real queue and semaphore structures. */
403                 volatile size_t xSize = sizeof( StaticQueue_t );
404 
405                 /* This assertion cannot be branch covered in unit tests */
406                 configASSERT( xSize == sizeof( Queue_t ) ); /* LCOV_EXCL_BR_LINE */
407                 ( void ) xSize;                             /* Prevent unused variable warning when configASSERT() is not defined. */
408             }
409             #endif /* configASSERT_DEFINED */
410 
411             /* The address of a statically allocated queue was passed in, use it.
412              * The address of a statically allocated storage area was also passed in
413              * but is already set. */
414             /* MISRA Ref 11.3.1 [Misaligned access] */
415             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
416             /* coverity[misra_c_2012_rule_11_3_violation] */
417             pxNewQueue = ( Queue_t * ) pxStaticQueue;
418 
419             #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
420             {
421                 /* Queues can be allocated either statically or dynamically, so
422                  * note this queue was allocated statically in case the queue is
423                  * later deleted. */
424                 pxNewQueue->ucStaticallyAllocated = pdTRUE;
425             }
426             #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
427 
428             prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
429         }
430         else
431         {
432             configASSERT( pxNewQueue );
433             mtCOVERAGE_TEST_MARKER();
434         }
435 
436         traceRETURN_xQueueGenericCreateStatic( pxNewQueue );
437 
438         return pxNewQueue;
439     }
440 
441 #endif /* configSUPPORT_STATIC_ALLOCATION */
442 /*-----------------------------------------------------------*/
443 
444 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
445 
xQueueGenericGetStaticBuffers(QueueHandle_t xQueue,uint8_t ** ppucQueueStorage,StaticQueue_t ** ppxStaticQueue)446     BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
447                                               uint8_t ** ppucQueueStorage,
448                                               StaticQueue_t ** ppxStaticQueue )
449     {
450         BaseType_t xReturn;
451         Queue_t * const pxQueue = xQueue;
452 
453         traceENTER_xQueueGenericGetStaticBuffers( xQueue, ppucQueueStorage, ppxStaticQueue );
454 
455         configASSERT( pxQueue );
456         configASSERT( ppxStaticQueue );
457 
458         #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
459         {
460             /* Check if the queue was statically allocated. */
461             if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdTRUE )
462             {
463                 if( ppucQueueStorage != NULL )
464                 {
465                     *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
466                 }
467 
468                 /* MISRA Ref 11.3.1 [Misaligned access] */
469                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
470                 /* coverity[misra_c_2012_rule_11_3_violation] */
471                 *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
472                 xReturn = pdTRUE;
473             }
474             else
475             {
476                 xReturn = pdFALSE;
477             }
478         }
479         #else /* configSUPPORT_DYNAMIC_ALLOCATION */
480         {
481             /* Queue must have been statically allocated. */
482             if( ppucQueueStorage != NULL )
483             {
484                 *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
485             }
486 
487             *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
488             xReturn = pdTRUE;
489         }
490         #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
491 
492         traceRETURN_xQueueGenericGetStaticBuffers( xReturn );
493 
494         return xReturn;
495     }
496 
497 #endif /* configSUPPORT_STATIC_ALLOCATION */
498 /*-----------------------------------------------------------*/
499 
500 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
501 
xQueueGenericCreate(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,const uint8_t ucQueueType)502     QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
503                                        const UBaseType_t uxItemSize,
504                                        const uint8_t ucQueueType )
505     {
506         Queue_t * pxNewQueue = NULL;
507         size_t xQueueSizeInBytes;
508         uint8_t * pucQueueStorage;
509 
510         traceENTER_xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
511 
512         if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
513             /* Check for multiplication overflow. */
514             ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) &&
515             /* Check for addition overflow. */
516             /* MISRA Ref 14.3.1 [Configuration dependent invariant] */
517             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-143. */
518             /* coverity[misra_c_2012_rule_14_3_violation] */
519             ( ( SIZE_MAX - sizeof( Queue_t ) ) >= ( size_t ) ( ( size_t ) uxQueueLength * ( size_t ) uxItemSize ) ) )
520         {
521             /* Allocate enough space to hold the maximum number of items that
522              * can be in the queue at any time.  It is valid for uxItemSize to be
523              * zero in the case the queue is used as a semaphore. */
524             xQueueSizeInBytes = ( size_t ) ( ( size_t ) uxQueueLength * ( size_t ) uxItemSize );
525 
526             /* MISRA Ref 11.5.1 [Malloc memory assignment] */
527             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
528             /* coverity[misra_c_2012_rule_11_5_violation] */
529             pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
530 
531             if( pxNewQueue != NULL )
532             {
533                 /* Jump past the queue structure to find the location of the queue
534                  * storage area. */
535                 pucQueueStorage = ( uint8_t * ) pxNewQueue;
536                 pucQueueStorage += sizeof( Queue_t );
537 
538                 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
539                 {
540                     /* Queues can be created either statically or dynamically, so
541                      * note this task was created dynamically in case it is later
542                      * deleted. */
543                     pxNewQueue->ucStaticallyAllocated = pdFALSE;
544                 }
545                 #endif /* configSUPPORT_STATIC_ALLOCATION */
546 
547                 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
548             }
549             else
550             {
551                 traceQUEUE_CREATE_FAILED( ucQueueType );
552                 mtCOVERAGE_TEST_MARKER();
553             }
554         }
555         else
556         {
557             configASSERT( pxNewQueue );
558             mtCOVERAGE_TEST_MARKER();
559         }
560 
561         traceRETURN_xQueueGenericCreate( pxNewQueue );
562 
563         return pxNewQueue;
564     }
565 
566 #endif /* configSUPPORT_STATIC_ALLOCATION */
567 /*-----------------------------------------------------------*/
568 
prvInitialiseNewQueue(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,const uint8_t ucQueueType,Queue_t * pxNewQueue)569 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
570                                    const UBaseType_t uxItemSize,
571                                    uint8_t * pucQueueStorage,
572                                    const uint8_t ucQueueType,
573                                    Queue_t * pxNewQueue )
574 {
575     /* Remove compiler warnings about unused parameters should
576      * configUSE_TRACE_FACILITY not be set to 1. */
577     ( void ) ucQueueType;
578 
579     if( uxItemSize == ( UBaseType_t ) 0 )
580     {
581         /* No RAM was allocated for the queue storage area, but PC head cannot
582          * be set to NULL because NULL is used as a key to say the queue is used as
583          * a mutex.  Therefore just set pcHead to point to the queue as a benign
584          * value that is known to be within the memory map. */
585         pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
586     }
587     else
588     {
589         /* Set the head to the start of the queue storage area. */
590         pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
591     }
592 
593     /* Initialise the queue members as described where the queue type is
594      * defined. */
595     pxNewQueue->uxLength = uxQueueLength;
596     pxNewQueue->uxItemSize = uxItemSize;
597     ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
598 
599     #if ( configUSE_TRACE_FACILITY == 1 )
600     {
601         pxNewQueue->ucQueueType = ucQueueType;
602     }
603     #endif /* configUSE_TRACE_FACILITY */
604 
605     #if ( configUSE_QUEUE_SETS == 1 )
606     {
607         pxNewQueue->pxQueueSetContainer = NULL;
608     }
609     #endif /* configUSE_QUEUE_SETS */
610 
611     traceQUEUE_CREATE( pxNewQueue );
612 }
613 /*-----------------------------------------------------------*/
614 
615 #if ( configUSE_MUTEXES == 1 )
616 
prvInitialiseMutex(Queue_t * pxNewQueue)617     static void prvInitialiseMutex( Queue_t * pxNewQueue )
618     {
619         if( pxNewQueue != NULL )
620         {
621             /* The queue create function will set all the queue structure members
622             * correctly for a generic queue, but this function is creating a
623             * mutex.  Overwrite those members that need to be set differently -
624             * in particular the information required for priority inheritance. */
625             pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
626             pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
627 
628             /* In case this is a recursive mutex. */
629             pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
630 
631             traceCREATE_MUTEX( pxNewQueue );
632 
633             /* Start with the semaphore in the expected state. */
634             ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
635         }
636         else
637         {
638             traceCREATE_MUTEX_FAILED();
639         }
640     }
641 
642 #endif /* configUSE_MUTEXES */
643 /*-----------------------------------------------------------*/
644 
645 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
646 
xQueueCreateMutex(const uint8_t ucQueueType)647     QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
648     {
649         QueueHandle_t xNewQueue;
650         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
651 
652         traceENTER_xQueueCreateMutex( ucQueueType );
653 
654         xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
655         prvInitialiseMutex( ( Queue_t * ) xNewQueue );
656 
657         traceRETURN_xQueueCreateMutex( xNewQueue );
658 
659         return xNewQueue;
660     }
661 
662 #endif /* configUSE_MUTEXES */
663 /*-----------------------------------------------------------*/
664 
665 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
666 
xQueueCreateMutexStatic(const uint8_t ucQueueType,StaticQueue_t * pxStaticQueue)667     QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType,
668                                            StaticQueue_t * pxStaticQueue )
669     {
670         QueueHandle_t xNewQueue;
671         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
672 
673         traceENTER_xQueueCreateMutexStatic( ucQueueType, pxStaticQueue );
674 
675         /* Prevent compiler warnings about unused parameters if
676          * configUSE_TRACE_FACILITY does not equal 1. */
677         ( void ) ucQueueType;
678 
679         xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
680         prvInitialiseMutex( ( Queue_t * ) xNewQueue );
681 
682         traceRETURN_xQueueCreateMutexStatic( xNewQueue );
683 
684         return xNewQueue;
685     }
686 
687 #endif /* configUSE_MUTEXES */
688 /*-----------------------------------------------------------*/
689 
690 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
691 
xQueueGetMutexHolder(QueueHandle_t xSemaphore)692     TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
693     {
694         TaskHandle_t pxReturn;
695         Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore;
696 
697         traceENTER_xQueueGetMutexHolder( xSemaphore );
698 
699         configASSERT( xSemaphore );
700 
701         /* This function is called by xSemaphoreGetMutexHolder(), and should not
702          * be called directly.  Note:  This is a good way of determining if the
703          * calling task is the mutex holder, but not a good way of determining the
704          * identity of the mutex holder, as the holder may change between the
705          * following critical section exiting and the function returning. */
706         taskENTER_CRITICAL();
707         {
708             if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
709             {
710                 pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
711             }
712             else
713             {
714                 pxReturn = NULL;
715             }
716         }
717         taskEXIT_CRITICAL();
718 
719         traceRETURN_xQueueGetMutexHolder( pxReturn );
720 
721         return pxReturn;
722     }
723 
724 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
725 /*-----------------------------------------------------------*/
726 
727 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
728 
xQueueGetMutexHolderFromISR(QueueHandle_t xSemaphore)729     TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
730     {
731         TaskHandle_t pxReturn;
732 
733         traceENTER_xQueueGetMutexHolderFromISR( xSemaphore );
734 
735         configASSERT( xSemaphore );
736 
737         /* Mutexes cannot be used in interrupt service routines, so the mutex
738          * holder should not change in an ISR, and therefore a critical section is
739          * not required here. */
740         if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
741         {
742             pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder;
743         }
744         else
745         {
746             pxReturn = NULL;
747         }
748 
749         traceRETURN_xQueueGetMutexHolderFromISR( pxReturn );
750 
751         return pxReturn;
752     }
753 
754 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
755 /*-----------------------------------------------------------*/
756 
757 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
758 
xQueueGiveMutexRecursive(QueueHandle_t xMutex)759     BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
760     {
761         BaseType_t xReturn;
762         Queue_t * const pxMutex = ( Queue_t * ) xMutex;
763 
764         traceENTER_xQueueGiveMutexRecursive( xMutex );
765 
766         configASSERT( pxMutex );
767 
768         /* If this is the task that holds the mutex then xMutexHolder will not
769          * change outside of this task.  If this task does not hold the mutex then
770          * pxMutexHolder can never coincidentally equal the tasks handle, and as
771          * this is the only condition we are interested in it does not matter if
772          * pxMutexHolder is accessed simultaneously by another task.  Therefore no
773          * mutual exclusion is required to test the pxMutexHolder variable. */
774         if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
775         {
776             traceGIVE_MUTEX_RECURSIVE( pxMutex );
777 
778             /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
779              * the task handle, therefore no underflow check is required.  Also,
780              * uxRecursiveCallCount is only modified by the mutex holder, and as
781              * there can only be one, no mutual exclusion is required to modify the
782              * uxRecursiveCallCount member. */
783             ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;
784 
785             /* Has the recursive call count unwound to 0? */
786             if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )
787             {
788                 /* Return the mutex.  This will automatically unblock any other
789                  * task that might be waiting to access the mutex. */
790                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
791             }
792             else
793             {
794                 mtCOVERAGE_TEST_MARKER();
795             }
796 
797             xReturn = pdPASS;
798         }
799         else
800         {
801             /* The mutex cannot be given because the calling task is not the
802              * holder. */
803             xReturn = pdFAIL;
804 
805             traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
806         }
807 
808         traceRETURN_xQueueGiveMutexRecursive( xReturn );
809 
810         return xReturn;
811     }
812 
813 #endif /* configUSE_RECURSIVE_MUTEXES */
814 /*-----------------------------------------------------------*/
815 
816 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
817 
xQueueTakeMutexRecursive(QueueHandle_t xMutex,TickType_t xTicksToWait)818     BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
819                                          TickType_t xTicksToWait )
820     {
821         BaseType_t xReturn;
822         Queue_t * const pxMutex = ( Queue_t * ) xMutex;
823 
824         traceENTER_xQueueTakeMutexRecursive( xMutex, xTicksToWait );
825 
826         configASSERT( pxMutex );
827 
828         /* Comments regarding mutual exclusion as per those within
829          * xQueueGiveMutexRecursive(). */
830 
831         traceTAKE_MUTEX_RECURSIVE( pxMutex );
832 
833         if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
834         {
835             ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
836 
837             /* Check if an overflow occurred. */
838             configASSERT( pxMutex->u.xSemaphore.uxRecursiveCallCount );
839 
840             xReturn = pdPASS;
841         }
842         else
843         {
844             xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
845 
846             /* pdPASS will only be returned if the mutex was successfully
847              * obtained.  The calling task may have entered the Blocked state
848              * before reaching here. */
849             if( xReturn != pdFAIL )
850             {
851                 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
852 
853                 /* Check if an overflow occurred. */
854                 configASSERT( pxMutex->u.xSemaphore.uxRecursiveCallCount );
855             }
856             else
857             {
858                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
859             }
860         }
861 
862         traceRETURN_xQueueTakeMutexRecursive( xReturn );
863 
864         return xReturn;
865     }
866 
867 #endif /* configUSE_RECURSIVE_MUTEXES */
868 /*-----------------------------------------------------------*/
869 
870 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
871 
xQueueCreateCountingSemaphoreStatic(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount,StaticQueue_t * pxStaticQueue)872     QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
873                                                        const UBaseType_t uxInitialCount,
874                                                        StaticQueue_t * pxStaticQueue )
875     {
876         QueueHandle_t xHandle = NULL;
877 
878         traceENTER_xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue );
879 
880         if( ( uxMaxCount != 0U ) &&
881             ( uxInitialCount <= uxMaxCount ) )
882         {
883             xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
884 
885             if( xHandle != NULL )
886             {
887                 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
888 
889                 traceCREATE_COUNTING_SEMAPHORE();
890             }
891             else
892             {
893                 traceCREATE_COUNTING_SEMAPHORE_FAILED();
894             }
895         }
896         else
897         {
898             configASSERT( xHandle );
899             mtCOVERAGE_TEST_MARKER();
900         }
901 
902         traceRETURN_xQueueCreateCountingSemaphoreStatic( xHandle );
903 
904         return xHandle;
905     }
906 
907 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
908 /*-----------------------------------------------------------*/
909 
910 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
911 
xQueueCreateCountingSemaphore(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount)912     QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
913                                                  const UBaseType_t uxInitialCount )
914     {
915         QueueHandle_t xHandle = NULL;
916 
917         traceENTER_xQueueCreateCountingSemaphore( uxMaxCount, uxInitialCount );
918 
919         if( ( uxMaxCount != 0U ) &&
920             ( uxInitialCount <= uxMaxCount ) )
921         {
922             xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
923 
924             if( xHandle != NULL )
925             {
926                 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
927 
928                 traceCREATE_COUNTING_SEMAPHORE();
929             }
930             else
931             {
932                 traceCREATE_COUNTING_SEMAPHORE_FAILED();
933             }
934         }
935         else
936         {
937             configASSERT( xHandle );
938             mtCOVERAGE_TEST_MARKER();
939         }
940 
941         traceRETURN_xQueueCreateCountingSemaphore( xHandle );
942 
943         return xHandle;
944     }
945 
946 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
947 /*-----------------------------------------------------------*/
948 
xQueueGenericSend(QueueHandle_t xQueue,const void * const pvItemToQueue,TickType_t xTicksToWait,const BaseType_t xCopyPosition)949 BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
950                               const void * const pvItemToQueue,
951                               TickType_t xTicksToWait,
952                               const BaseType_t xCopyPosition )
953 {
954     BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
955     TimeOut_t xTimeOut;
956     Queue_t * const pxQueue = xQueue;
957 
958     traceENTER_xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition );
959 
960     configASSERT( pxQueue );
961     configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
962     configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
963     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
964     {
965         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
966     }
967     #endif
968 
969     for( ; ; )
970     {
971         taskENTER_CRITICAL();
972         {
973             /* Is there room on the queue now?  The running task must be the
974              * highest priority task wanting to access the queue.  If the head item
975              * in the queue is to be overwritten then it does not matter if the
976              * queue is full. */
977             if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
978             {
979                 traceQUEUE_SEND( pxQueue );
980 
981                 #if ( configUSE_QUEUE_SETS == 1 )
982                 {
983                     const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
984 
985                     xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
986 
987                     if( pxQueue->pxQueueSetContainer != NULL )
988                     {
989                         if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
990                         {
991                             /* Do not notify the queue set as an existing item
992                              * was overwritten in the queue so the number of items
993                              * in the queue has not changed. */
994                             mtCOVERAGE_TEST_MARKER();
995                         }
996                         else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
997                         {
998                             /* The queue is a member of a queue set, and posting
999                              * to the queue set caused a higher priority task to
1000                              * unblock. A context switch is required. */
1001                             queueYIELD_IF_USING_PREEMPTION();
1002                         }
1003                         else
1004                         {
1005                             mtCOVERAGE_TEST_MARKER();
1006                         }
1007                     }
1008                     else
1009                     {
1010                         /* If there was a task waiting for data to arrive on the
1011                          * queue then unblock it now. */
1012                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1013                         {
1014                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1015                             {
1016                                 /* The unblocked task has a priority higher than
1017                                  * our own so yield immediately.  Yes it is ok to
1018                                  * do this from within the critical section - the
1019                                  * kernel takes care of that. */
1020                                 queueYIELD_IF_USING_PREEMPTION();
1021                             }
1022                             else
1023                             {
1024                                 mtCOVERAGE_TEST_MARKER();
1025                             }
1026                         }
1027                         else if( xYieldRequired != pdFALSE )
1028                         {
1029                             /* This path is a special case that will only get
1030                              * executed if the task was holding multiple mutexes
1031                              * and the mutexes were given back in an order that is
1032                              * different to that in which they were taken. */
1033                             queueYIELD_IF_USING_PREEMPTION();
1034                         }
1035                         else
1036                         {
1037                             mtCOVERAGE_TEST_MARKER();
1038                         }
1039                     }
1040                 }
1041                 #else /* configUSE_QUEUE_SETS */
1042                 {
1043                     xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1044 
1045                     /* If there was a task waiting for data to arrive on the
1046                      * queue then unblock it now. */
1047                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1048                     {
1049                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1050                         {
1051                             /* The unblocked task has a priority higher than
1052                              * our own so yield immediately.  Yes it is ok to do
1053                              * this from within the critical section - the kernel
1054                              * takes care of that. */
1055                             queueYIELD_IF_USING_PREEMPTION();
1056                         }
1057                         else
1058                         {
1059                             mtCOVERAGE_TEST_MARKER();
1060                         }
1061                     }
1062                     else if( xYieldRequired != pdFALSE )
1063                     {
1064                         /* This path is a special case that will only get
1065                          * executed if the task was holding multiple mutexes and
1066                          * the mutexes were given back in an order that is
1067                          * different to that in which they were taken. */
1068                         queueYIELD_IF_USING_PREEMPTION();
1069                     }
1070                     else
1071                     {
1072                         mtCOVERAGE_TEST_MARKER();
1073                     }
1074                 }
1075                 #endif /* configUSE_QUEUE_SETS */
1076 
1077                 taskEXIT_CRITICAL();
1078 
1079                 traceRETURN_xQueueGenericSend( pdPASS );
1080 
1081                 return pdPASS;
1082             }
1083             else
1084             {
1085                 if( xTicksToWait == ( TickType_t ) 0 )
1086                 {
1087                     /* The queue was full and no block time is specified (or
1088                      * the block time has expired) so leave now. */
1089                     taskEXIT_CRITICAL();
1090 
1091                     /* Return to the original privilege level before exiting
1092                      * the function. */
1093                     traceQUEUE_SEND_FAILED( pxQueue );
1094                     traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1095 
1096                     return errQUEUE_FULL;
1097                 }
1098                 else if( xEntryTimeSet == pdFALSE )
1099                 {
1100                     /* The queue was full and a block time was specified so
1101                      * configure the timeout structure. */
1102                     vTaskInternalSetTimeOutState( &xTimeOut );
1103                     xEntryTimeSet = pdTRUE;
1104                 }
1105                 else
1106                 {
1107                     /* Entry time was already set. */
1108                     mtCOVERAGE_TEST_MARKER();
1109                 }
1110             }
1111         }
1112         taskEXIT_CRITICAL();
1113 
1114         /* Interrupts and other tasks can send to and receive from the queue
1115          * now the critical section has been exited. */
1116 
1117         vTaskSuspendAll();
1118         prvLockQueue( pxQueue );
1119 
1120         /* Update the timeout state to see if it has expired yet. */
1121         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1122         {
1123             if( prvIsQueueFull( pxQueue ) != pdFALSE )
1124             {
1125                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
1126                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
1127 
1128                 /* Unlocking the queue means queue events can effect the
1129                  * event list. It is possible that interrupts occurring now
1130                  * remove this task from the event list again - but as the
1131                  * scheduler is suspended the task will go onto the pending
1132                  * ready list instead of the actual ready list. */
1133                 prvUnlockQueue( pxQueue );
1134 
1135                 /* Resuming the scheduler will move tasks from the pending
1136                  * ready list into the ready list - so it is feasible that this
1137                  * task is already in the ready list before it yields - in which
1138                  * case the yield will not cause a context switch unless there
1139                  * is also a higher priority task in the pending ready list. */
1140                 if( xTaskResumeAll() == pdFALSE )
1141                 {
1142                     taskYIELD_WITHIN_API();
1143                 }
1144             }
1145             else
1146             {
1147                 /* Try again. */
1148                 prvUnlockQueue( pxQueue );
1149                 ( void ) xTaskResumeAll();
1150             }
1151         }
1152         else
1153         {
1154             /* The timeout has expired. */
1155             prvUnlockQueue( pxQueue );
1156             ( void ) xTaskResumeAll();
1157 
1158             traceQUEUE_SEND_FAILED( pxQueue );
1159             traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1160 
1161             return errQUEUE_FULL;
1162         }
1163     }
1164 }
1165 /*-----------------------------------------------------------*/
1166 
xQueueGenericSendFromISR(QueueHandle_t xQueue,const void * const pvItemToQueue,BaseType_t * const pxHigherPriorityTaskWoken,const BaseType_t xCopyPosition)1167 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
1168                                      const void * const pvItemToQueue,
1169                                      BaseType_t * const pxHigherPriorityTaskWoken,
1170                                      const BaseType_t xCopyPosition )
1171 {
1172     BaseType_t xReturn;
1173     UBaseType_t uxSavedInterruptStatus;
1174     Queue_t * const pxQueue = xQueue;
1175 
1176     traceENTER_xQueueGenericSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken, xCopyPosition );
1177 
1178     configASSERT( ( pxQueue != NULL ) && !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1179     configASSERT( ( pxQueue != NULL ) && !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
1180 
1181     /* RTOS ports that support interrupt nesting have the concept of a maximum
1182      * system call (or maximum API call) interrupt priority.  Interrupts that are
1183      * above the maximum system call priority are kept permanently enabled, even
1184      * when the RTOS kernel is in a critical section, but cannot make any calls to
1185      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1186      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1187      * failure if a FreeRTOS API function is called from an interrupt that has been
1188      * assigned a priority above the configured maximum system call priority.
1189      * Only FreeRTOS functions that end in FromISR can be called from interrupts
1190      * that have been assigned a priority at or (logically) below the maximum
1191      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
1192      * safe API to ensure interrupt entry is as fast and as simple as possible.
1193      * More information (albeit Cortex-M specific) is provided on the following
1194      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1195     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1196 
1197     /* Similar to xQueueGenericSend, except without blocking if there is no room
1198      * in the queue.  Also don't directly wake a task that was blocked on a queue
1199      * read, instead return a flag to say whether a context switch is required or
1200      * not (i.e. has a task with a higher priority than us been woken by this
1201      * post). */
1202     /* MISRA Ref 4.7.1 [Return value shall be checked] */
1203     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
1204     /* coverity[misra_c_2012_directive_4_7_violation] */
1205     uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
1206     {
1207         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
1208         {
1209             const int8_t cTxLock = pxQueue->cTxLock;
1210             const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
1211 
1212             traceQUEUE_SEND_FROM_ISR( pxQueue );
1213 
1214             /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
1215              *  semaphore or mutex.  That means prvCopyDataToQueue() cannot result
1216              *  in a task disinheriting a priority and prvCopyDataToQueue() can be
1217              *  called here even though the disinherit function does not check if
1218              *  the scheduler is suspended before accessing the ready lists. */
1219             ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1220 
1221             /* The event list is not altered if the queue is locked.  This will
1222              * be done when the queue is unlocked later. */
1223             if( cTxLock == queueUNLOCKED )
1224             {
1225                 #if ( configUSE_QUEUE_SETS == 1 )
1226                 {
1227                     if( pxQueue->pxQueueSetContainer != NULL )
1228                     {
1229                         if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
1230                         {
1231                             /* Do not notify the queue set as an existing item
1232                              * was overwritten in the queue so the number of items
1233                              * in the queue has not changed. */
1234                             mtCOVERAGE_TEST_MARKER();
1235                         }
1236                         else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1237                         {
1238                             /* The queue is a member of a queue set, and posting
1239                              * to the queue set caused a higher priority task to
1240                              * unblock.  A context switch is required. */
1241                             if( pxHigherPriorityTaskWoken != NULL )
1242                             {
1243                                 *pxHigherPriorityTaskWoken = pdTRUE;
1244                             }
1245                             else
1246                             {
1247                                 mtCOVERAGE_TEST_MARKER();
1248                             }
1249                         }
1250                         else
1251                         {
1252                             mtCOVERAGE_TEST_MARKER();
1253                         }
1254                     }
1255                     else
1256                     {
1257                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1258                         {
1259                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1260                             {
1261                                 /* The task waiting has a higher priority so
1262                                  *  record that a context switch is required. */
1263                                 if( pxHigherPriorityTaskWoken != NULL )
1264                                 {
1265                                     *pxHigherPriorityTaskWoken = pdTRUE;
1266                                 }
1267                                 else
1268                                 {
1269                                     mtCOVERAGE_TEST_MARKER();
1270                                 }
1271                             }
1272                             else
1273                             {
1274                                 mtCOVERAGE_TEST_MARKER();
1275                             }
1276                         }
1277                         else
1278                         {
1279                             mtCOVERAGE_TEST_MARKER();
1280                         }
1281                     }
1282                 }
1283                 #else /* configUSE_QUEUE_SETS */
1284                 {
1285                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1286                     {
1287                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1288                         {
1289                             /* The task waiting has a higher priority so record that a
1290                              * context switch is required. */
1291                             if( pxHigherPriorityTaskWoken != NULL )
1292                             {
1293                                 *pxHigherPriorityTaskWoken = pdTRUE;
1294                             }
1295                             else
1296                             {
1297                                 mtCOVERAGE_TEST_MARKER();
1298                             }
1299                         }
1300                         else
1301                         {
1302                             mtCOVERAGE_TEST_MARKER();
1303                         }
1304                     }
1305                     else
1306                     {
1307                         mtCOVERAGE_TEST_MARKER();
1308                     }
1309 
1310                     /* Not used in this path. */
1311                     ( void ) uxPreviousMessagesWaiting;
1312                 }
1313                 #endif /* configUSE_QUEUE_SETS */
1314             }
1315             else
1316             {
1317                 /* Increment the lock count so the task that unlocks the queue
1318                  * knows that data was posted while it was locked. */
1319                 prvIncrementQueueTxLock( pxQueue, cTxLock );
1320             }
1321 
1322             xReturn = pdPASS;
1323         }
1324         else
1325         {
1326             traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1327             xReturn = errQUEUE_FULL;
1328         }
1329     }
1330     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1331 
1332     traceRETURN_xQueueGenericSendFromISR( xReturn );
1333 
1334     return xReturn;
1335 }
1336 /*-----------------------------------------------------------*/
1337 
xQueueGiveFromISR(QueueHandle_t xQueue,BaseType_t * const pxHigherPriorityTaskWoken)1338 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
1339                               BaseType_t * const pxHigherPriorityTaskWoken )
1340 {
1341     BaseType_t xReturn;
1342     UBaseType_t uxSavedInterruptStatus;
1343     Queue_t * const pxQueue = xQueue;
1344 
1345     traceENTER_xQueueGiveFromISR( xQueue, pxHigherPriorityTaskWoken );
1346 
1347     /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1348      * item size is 0.  Don't directly wake a task that was blocked on a queue
1349      * read, instead return a flag to say whether a context switch is required or
1350      * not (i.e. has a task with a higher priority than us been woken by this
1351      * post). */
1352 
1353     /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1354      * if the item size is not 0. */
1355     configASSERT( ( pxQueue != NULL ) && ( pxQueue->uxItemSize == 0 ) );
1356 
1357     /* Normally a mutex would not be given from an interrupt, especially if
1358      * there is a mutex holder, as priority inheritance makes no sense for an
1359      * interrupt, only tasks. */
1360     configASSERT( ( pxQueue != NULL ) && !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );
1361 
1362     /* RTOS ports that support interrupt nesting have the concept of a maximum
1363      * system call (or maximum API call) interrupt priority.  Interrupts that are
1364      * above the maximum system call priority are kept permanently enabled, even
1365      * when the RTOS kernel is in a critical section, but cannot make any calls to
1366      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1367      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1368      * failure if a FreeRTOS API function is called from an interrupt that has been
1369      * assigned a priority above the configured maximum system call priority.
1370      * Only FreeRTOS functions that end in FromISR can be called from interrupts
1371      * that have been assigned a priority at or (logically) below the maximum
1372      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
1373      * safe API to ensure interrupt entry is as fast and as simple as possible.
1374      * More information (albeit Cortex-M specific) is provided on the following
1375      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1376     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1377 
1378     /* MISRA Ref 4.7.1 [Return value shall be checked] */
1379     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
1380     /* coverity[misra_c_2012_directive_4_7_violation] */
1381     uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
1382     {
1383         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1384 
1385         /* When the queue is used to implement a semaphore no data is ever
1386          * moved through the queue but it is still valid to see if the queue 'has
1387          * space'. */
1388         if( uxMessagesWaiting < pxQueue->uxLength )
1389         {
1390             const int8_t cTxLock = pxQueue->cTxLock;
1391 
1392             traceQUEUE_SEND_FROM_ISR( pxQueue );
1393 
1394             /* A task can only have an inherited priority if it is a mutex
1395              * holder - and if there is a mutex holder then the mutex cannot be
1396              * given from an ISR.  As this is the ISR version of the function it
1397              * can be assumed there is no mutex holder and no need to determine if
1398              * priority disinheritance is needed.  Simply increase the count of
1399              * messages (semaphores) available. */
1400             pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
1401 
1402             /* The event list is not altered if the queue is locked.  This will
1403              * be done when the queue is unlocked later. */
1404             if( cTxLock == queueUNLOCKED )
1405             {
1406                 #if ( configUSE_QUEUE_SETS == 1 )
1407                 {
1408                     if( pxQueue->pxQueueSetContainer != NULL )
1409                     {
1410                         if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1411                         {
1412                             /* The semaphore is a member of a queue set, and
1413                              * posting to the queue set caused a higher priority
1414                              * task to unblock.  A context switch is required. */
1415                             if( pxHigherPriorityTaskWoken != NULL )
1416                             {
1417                                 *pxHigherPriorityTaskWoken = pdTRUE;
1418                             }
1419                             else
1420                             {
1421                                 mtCOVERAGE_TEST_MARKER();
1422                             }
1423                         }
1424                         else
1425                         {
1426                             mtCOVERAGE_TEST_MARKER();
1427                         }
1428                     }
1429                     else
1430                     {
1431                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1432                         {
1433                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1434                             {
1435                                 /* The task waiting has a higher priority so
1436                                  *  record that a context switch is required. */
1437                                 if( pxHigherPriorityTaskWoken != NULL )
1438                                 {
1439                                     *pxHigherPriorityTaskWoken = pdTRUE;
1440                                 }
1441                                 else
1442                                 {
1443                                     mtCOVERAGE_TEST_MARKER();
1444                                 }
1445                             }
1446                             else
1447                             {
1448                                 mtCOVERAGE_TEST_MARKER();
1449                             }
1450                         }
1451                         else
1452                         {
1453                             mtCOVERAGE_TEST_MARKER();
1454                         }
1455                     }
1456                 }
1457                 #else /* configUSE_QUEUE_SETS */
1458                 {
1459                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1460                     {
1461                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1462                         {
1463                             /* The task waiting has a higher priority so record that a
1464                              * context switch is required. */
1465                             if( pxHigherPriorityTaskWoken != NULL )
1466                             {
1467                                 *pxHigherPriorityTaskWoken = pdTRUE;
1468                             }
1469                             else
1470                             {
1471                                 mtCOVERAGE_TEST_MARKER();
1472                             }
1473                         }
1474                         else
1475                         {
1476                             mtCOVERAGE_TEST_MARKER();
1477                         }
1478                     }
1479                     else
1480                     {
1481                         mtCOVERAGE_TEST_MARKER();
1482                     }
1483                 }
1484                 #endif /* configUSE_QUEUE_SETS */
1485             }
1486             else
1487             {
1488                 /* Increment the lock count so the task that unlocks the queue
1489                  * knows that data was posted while it was locked. */
1490                 prvIncrementQueueTxLock( pxQueue, cTxLock );
1491             }
1492 
1493             xReturn = pdPASS;
1494         }
1495         else
1496         {
1497             traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1498             xReturn = errQUEUE_FULL;
1499         }
1500     }
1501     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1502 
1503     traceRETURN_xQueueGiveFromISR( xReturn );
1504 
1505     return xReturn;
1506 }
1507 /*-----------------------------------------------------------*/
1508 
xQueueReceive(QueueHandle_t xQueue,void * const pvBuffer,TickType_t xTicksToWait)1509 BaseType_t xQueueReceive( QueueHandle_t xQueue,
1510                           void * const pvBuffer,
1511                           TickType_t xTicksToWait )
1512 {
1513     BaseType_t xEntryTimeSet = pdFALSE;
1514     TimeOut_t xTimeOut;
1515     Queue_t * const pxQueue = xQueue;
1516 
1517     traceENTER_xQueueReceive( xQueue, pvBuffer, xTicksToWait );
1518 
1519     /* Check the pointer is not NULL. */
1520     configASSERT( ( pxQueue ) );
1521 
1522     /* The buffer into which data is received can only be NULL if the data size
1523      * is zero (so no data is copied into the buffer). */
1524     configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1525 
1526     /* Cannot block if the scheduler is suspended. */
1527     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1528     {
1529         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1530     }
1531     #endif
1532 
1533     for( ; ; )
1534     {
1535         taskENTER_CRITICAL();
1536         {
1537             const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1538 
1539             /* Is there data in the queue now?  To be running the calling task
1540              * must be the highest priority task wanting to access the queue. */
1541             if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1542             {
1543                 /* Data available, remove one item. */
1544                 prvCopyDataFromQueue( pxQueue, pvBuffer );
1545                 traceQUEUE_RECEIVE( pxQueue );
1546                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
1547 
1548                 /* There is now space in the queue, were any tasks waiting to
1549                  * post to the queue?  If so, unblock the highest priority waiting
1550                  * task. */
1551                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1552                 {
1553                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1554                     {
1555                         queueYIELD_IF_USING_PREEMPTION();
1556                     }
1557                     else
1558                     {
1559                         mtCOVERAGE_TEST_MARKER();
1560                     }
1561                 }
1562                 else
1563                 {
1564                     mtCOVERAGE_TEST_MARKER();
1565                 }
1566 
1567                 taskEXIT_CRITICAL();
1568 
1569                 traceRETURN_xQueueReceive( pdPASS );
1570 
1571                 return pdPASS;
1572             }
1573             else
1574             {
1575                 if( xTicksToWait == ( TickType_t ) 0 )
1576                 {
1577                     /* The queue was empty and no block time is specified (or
1578                      * the block time has expired) so leave now. */
1579                     taskEXIT_CRITICAL();
1580 
1581                     traceQUEUE_RECEIVE_FAILED( pxQueue );
1582                     traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1583 
1584                     return errQUEUE_EMPTY;
1585                 }
1586                 else if( xEntryTimeSet == pdFALSE )
1587                 {
1588                     /* The queue was empty and a block time was specified so
1589                      * configure the timeout structure. */
1590                     vTaskInternalSetTimeOutState( &xTimeOut );
1591                     xEntryTimeSet = pdTRUE;
1592                 }
1593                 else
1594                 {
1595                     /* Entry time was already set. */
1596                     mtCOVERAGE_TEST_MARKER();
1597                 }
1598             }
1599         }
1600         taskEXIT_CRITICAL();
1601 
1602         /* Interrupts and other tasks can send to and receive from the queue
1603          * now the critical section has been exited. */
1604 
1605         vTaskSuspendAll();
1606         prvLockQueue( pxQueue );
1607 
1608         /* Update the timeout state to see if it has expired yet. */
1609         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1610         {
1611             /* The timeout has not expired.  If the queue is still empty place
1612              * the task on the list of tasks waiting to receive from the queue. */
1613             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1614             {
1615                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1616                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1617                 prvUnlockQueue( pxQueue );
1618 
1619                 if( xTaskResumeAll() == pdFALSE )
1620                 {
1621                     taskYIELD_WITHIN_API();
1622                 }
1623                 else
1624                 {
1625                     mtCOVERAGE_TEST_MARKER();
1626                 }
1627             }
1628             else
1629             {
1630                 /* The queue contains data again.  Loop back to try and read the
1631                  * data. */
1632                 prvUnlockQueue( pxQueue );
1633                 ( void ) xTaskResumeAll();
1634             }
1635         }
1636         else
1637         {
1638             /* Timed out.  If there is no data in the queue exit, otherwise loop
1639              * back and attempt to read the data. */
1640             prvUnlockQueue( pxQueue );
1641             ( void ) xTaskResumeAll();
1642 
1643             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1644             {
1645                 traceQUEUE_RECEIVE_FAILED( pxQueue );
1646                 traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1647 
1648                 return errQUEUE_EMPTY;
1649             }
1650             else
1651             {
1652                 mtCOVERAGE_TEST_MARKER();
1653             }
1654         }
1655     }
1656 }
1657 /*-----------------------------------------------------------*/
1658 
xQueueSemaphoreTake(QueueHandle_t xQueue,TickType_t xTicksToWait)1659 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
1660                                 TickType_t xTicksToWait )
1661 {
1662     BaseType_t xEntryTimeSet = pdFALSE;
1663     TimeOut_t xTimeOut;
1664     Queue_t * const pxQueue = xQueue;
1665 
1666     #if ( configUSE_MUTEXES == 1 )
1667         BaseType_t xInheritanceOccurred = pdFALSE;
1668     #endif
1669 
1670     traceENTER_xQueueSemaphoreTake( xQueue, xTicksToWait );
1671 
1672     /* Check the queue pointer is not NULL. */
1673     configASSERT( ( pxQueue ) );
1674 
1675     /* Check this really is a semaphore, in which case the item size will be
1676      * 0. */
1677     configASSERT( pxQueue->uxItemSize == 0 );
1678 
1679     /* Cannot block if the scheduler is suspended. */
1680     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1681     {
1682         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1683     }
1684     #endif
1685 
1686     for( ; ; )
1687     {
1688         taskENTER_CRITICAL();
1689         {
1690             /* Semaphores are queues with an item size of 0, and where the
1691              * number of messages in the queue is the semaphore's count value. */
1692             const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
1693 
1694             /* Is there data in the queue now?  To be running the calling task
1695              * must be the highest priority task wanting to access the queue. */
1696             if( uxSemaphoreCount > ( UBaseType_t ) 0 )
1697             {
1698                 traceQUEUE_RECEIVE( pxQueue );
1699 
1700                 /* Semaphores are queues with a data size of zero and where the
1701                  * messages waiting is the semaphore's count.  Reduce the count. */
1702                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxSemaphoreCount - ( UBaseType_t ) 1 );
1703 
1704                 #if ( configUSE_MUTEXES == 1 )
1705                 {
1706                     if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1707                     {
1708                         /* Record the information required to implement
1709                          * priority inheritance should it become necessary. */
1710                         pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();
1711                     }
1712                     else
1713                     {
1714                         mtCOVERAGE_TEST_MARKER();
1715                     }
1716                 }
1717                 #endif /* configUSE_MUTEXES */
1718 
1719                 /* Check to see if other tasks are blocked waiting to give the
1720                  * semaphore, and if so, unblock the highest priority such task. */
1721                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1722                 {
1723                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1724                     {
1725                         queueYIELD_IF_USING_PREEMPTION();
1726                     }
1727                     else
1728                     {
1729                         mtCOVERAGE_TEST_MARKER();
1730                     }
1731                 }
1732                 else
1733                 {
1734                     mtCOVERAGE_TEST_MARKER();
1735                 }
1736 
1737                 taskEXIT_CRITICAL();
1738 
1739                 traceRETURN_xQueueSemaphoreTake( pdPASS );
1740 
1741                 return pdPASS;
1742             }
1743             else
1744             {
1745                 if( xTicksToWait == ( TickType_t ) 0 )
1746                 {
1747                     /* The semaphore count was 0 and no block time is specified
1748                      * (or the block time has expired) so exit now. */
1749                     taskEXIT_CRITICAL();
1750 
1751                     traceQUEUE_RECEIVE_FAILED( pxQueue );
1752                     traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1753 
1754                     return errQUEUE_EMPTY;
1755                 }
1756                 else if( xEntryTimeSet == pdFALSE )
1757                 {
1758                     /* The semaphore count was 0 and a block time was specified
1759                      * so configure the timeout structure ready to block. */
1760                     vTaskInternalSetTimeOutState( &xTimeOut );
1761                     xEntryTimeSet = pdTRUE;
1762                 }
1763                 else
1764                 {
1765                     /* Entry time was already set. */
1766                     mtCOVERAGE_TEST_MARKER();
1767                 }
1768             }
1769         }
1770         taskEXIT_CRITICAL();
1771 
1772         /* Interrupts and other tasks can give to and take from the semaphore
1773          * now the critical section has been exited. */
1774 
1775         vTaskSuspendAll();
1776         prvLockQueue( pxQueue );
1777 
1778         /* Update the timeout state to see if it has expired yet. */
1779         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1780         {
1781             /* A block time is specified and not expired.  If the semaphore
1782              * count is 0 then enter the Blocked state to wait for a semaphore to
1783              * become available.  As semaphores are implemented with queues the
1784              * queue being empty is equivalent to the semaphore count being 0. */
1785             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1786             {
1787                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1788 
1789                 #if ( configUSE_MUTEXES == 1 )
1790                 {
1791                     if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1792                     {
1793                         taskENTER_CRITICAL();
1794                         {
1795                             xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
1796                         }
1797                         taskEXIT_CRITICAL();
1798                     }
1799                     else
1800                     {
1801                         mtCOVERAGE_TEST_MARKER();
1802                     }
1803                 }
1804                 #endif /* if ( configUSE_MUTEXES == 1 ) */
1805 
1806                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1807                 prvUnlockQueue( pxQueue );
1808 
1809                 if( xTaskResumeAll() == pdFALSE )
1810                 {
1811                     taskYIELD_WITHIN_API();
1812                 }
1813                 else
1814                 {
1815                     mtCOVERAGE_TEST_MARKER();
1816                 }
1817             }
1818             else
1819             {
1820                 /* There was no timeout and the semaphore count was not 0, so
1821                  * attempt to take the semaphore again. */
1822                 prvUnlockQueue( pxQueue );
1823                 ( void ) xTaskResumeAll();
1824             }
1825         }
1826         else
1827         {
1828             /* Timed out. */
1829             prvUnlockQueue( pxQueue );
1830             ( void ) xTaskResumeAll();
1831 
1832             /* If the semaphore count is 0 exit now as the timeout has
1833              * expired.  Otherwise return to attempt to take the semaphore that is
1834              * known to be available.  As semaphores are implemented by queues the
1835              * queue being empty is equivalent to the semaphore count being 0. */
1836             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1837             {
1838                 #if ( configUSE_MUTEXES == 1 )
1839                 {
1840                     /* xInheritanceOccurred could only have be set if
1841                      * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
1842                      * test the mutex type again to check it is actually a mutex. */
1843                     if( xInheritanceOccurred != pdFALSE )
1844                     {
1845                         taskENTER_CRITICAL();
1846                         {
1847                             UBaseType_t uxHighestWaitingPriority;
1848 
1849                             /* This task blocking on the mutex caused another
1850                              * task to inherit this task's priority.  Now this task
1851                              * has timed out the priority should be disinherited
1852                              * again, but only as low as the next highest priority
1853                              * task that is waiting for the same mutex. */
1854                             uxHighestWaitingPriority = prvGetHighestPriorityOfWaitToReceiveList( pxQueue );
1855 
1856                             /* vTaskPriorityDisinheritAfterTimeout uses the uxHighestWaitingPriority
1857                              * parameter to index pxReadyTasksLists when adding the task holding
1858                              * mutex to the ready list for its new priority. Coverity thinks that
1859                              * it can result in out-of-bounds access which is not true because
1860                              * uxHighestWaitingPriority, as returned by prvGetHighestPriorityOfWaitToReceiveList,
1861                              * is capped at ( configMAX_PRIORITIES - 1 ). */
1862                             /* coverity[overrun] */
1863                             vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
1864                         }
1865                         taskEXIT_CRITICAL();
1866                     }
1867                 }
1868                 #endif /* configUSE_MUTEXES */
1869 
1870                 traceQUEUE_RECEIVE_FAILED( pxQueue );
1871                 traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1872 
1873                 return errQUEUE_EMPTY;
1874             }
1875             else
1876             {
1877                 mtCOVERAGE_TEST_MARKER();
1878             }
1879         }
1880     }
1881 }
1882 /*-----------------------------------------------------------*/
1883 
xQueuePeek(QueueHandle_t xQueue,void * const pvBuffer,TickType_t xTicksToWait)1884 BaseType_t xQueuePeek( QueueHandle_t xQueue,
1885                        void * const pvBuffer,
1886                        TickType_t xTicksToWait )
1887 {
1888     BaseType_t xEntryTimeSet = pdFALSE;
1889     TimeOut_t xTimeOut;
1890     int8_t * pcOriginalReadPosition;
1891     Queue_t * const pxQueue = xQueue;
1892 
1893     traceENTER_xQueuePeek( xQueue, pvBuffer, xTicksToWait );
1894 
1895     /* The buffer into which data is received can only be NULL if the data size
1896      * is zero (so no data is copied into the buffer. */
1897     configASSERT( ( pxQueue != NULL ) && !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1898 
1899     /* Cannot block if the scheduler is suspended. */
1900     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1901     {
1902         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1903     }
1904     #endif
1905 
1906     for( ; ; )
1907     {
1908         taskENTER_CRITICAL();
1909         {
1910             const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1911 
1912             /* Is there data in the queue now?  To be running the calling task
1913              * must be the highest priority task wanting to access the queue. */
1914             if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1915             {
1916                 /* Remember the read position so it can be reset after the data
1917                  * is read from the queue as this function is only peeking the
1918                  * data, not removing it. */
1919                 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
1920 
1921                 prvCopyDataFromQueue( pxQueue, pvBuffer );
1922                 traceQUEUE_PEEK( pxQueue );
1923 
1924                 /* The data is not being removed, so reset the read pointer. */
1925                 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
1926 
1927                 /* The data is being left in the queue, so see if there are
1928                  * any other tasks waiting for the data. */
1929                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1930                 {
1931                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1932                     {
1933                         /* The task waiting has a higher priority than this task. */
1934                         queueYIELD_IF_USING_PREEMPTION();
1935                     }
1936                     else
1937                     {
1938                         mtCOVERAGE_TEST_MARKER();
1939                     }
1940                 }
1941                 else
1942                 {
1943                     mtCOVERAGE_TEST_MARKER();
1944                 }
1945 
1946                 taskEXIT_CRITICAL();
1947 
1948                 traceRETURN_xQueuePeek( pdPASS );
1949 
1950                 return pdPASS;
1951             }
1952             else
1953             {
1954                 if( xTicksToWait == ( TickType_t ) 0 )
1955                 {
1956                     /* The queue was empty and no block time is specified (or
1957                      * the block time has expired) so leave now. */
1958                     taskEXIT_CRITICAL();
1959 
1960                     traceQUEUE_PEEK_FAILED( pxQueue );
1961                     traceRETURN_xQueuePeek( errQUEUE_EMPTY );
1962 
1963                     return errQUEUE_EMPTY;
1964                 }
1965                 else if( xEntryTimeSet == pdFALSE )
1966                 {
1967                     /* The queue was empty and a block time was specified so
1968                      * configure the timeout structure ready to enter the blocked
1969                      * state. */
1970                     vTaskInternalSetTimeOutState( &xTimeOut );
1971                     xEntryTimeSet = pdTRUE;
1972                 }
1973                 else
1974                 {
1975                     /* Entry time was already set. */
1976                     mtCOVERAGE_TEST_MARKER();
1977                 }
1978             }
1979         }
1980         taskEXIT_CRITICAL();
1981 
1982         /* Interrupts and other tasks can send to and receive from the queue
1983          * now that the critical section has been exited. */
1984 
1985         vTaskSuspendAll();
1986         prvLockQueue( pxQueue );
1987 
1988         /* Update the timeout state to see if it has expired yet. */
1989         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1990         {
1991             /* Timeout has not expired yet, check to see if there is data in the
1992             * queue now, and if not enter the Blocked state to wait for data. */
1993             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1994             {
1995                 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
1996                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1997                 prvUnlockQueue( pxQueue );
1998 
1999                 if( xTaskResumeAll() == pdFALSE )
2000                 {
2001                     taskYIELD_WITHIN_API();
2002                 }
2003                 else
2004                 {
2005                     mtCOVERAGE_TEST_MARKER();
2006                 }
2007             }
2008             else
2009             {
2010                 /* There is data in the queue now, so don't enter the blocked
2011                  * state, instead return to try and obtain the data. */
2012                 prvUnlockQueue( pxQueue );
2013                 ( void ) xTaskResumeAll();
2014             }
2015         }
2016         else
2017         {
2018             /* The timeout has expired.  If there is still no data in the queue
2019              * exit, otherwise go back and try to read the data again. */
2020             prvUnlockQueue( pxQueue );
2021             ( void ) xTaskResumeAll();
2022 
2023             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
2024             {
2025                 traceQUEUE_PEEK_FAILED( pxQueue );
2026                 traceRETURN_xQueuePeek( errQUEUE_EMPTY );
2027 
2028                 return errQUEUE_EMPTY;
2029             }
2030             else
2031             {
2032                 mtCOVERAGE_TEST_MARKER();
2033             }
2034         }
2035     }
2036 }
2037 /*-----------------------------------------------------------*/
2038 
xQueueReceiveFromISR(QueueHandle_t xQueue,void * const pvBuffer,BaseType_t * const pxHigherPriorityTaskWoken)2039 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
2040                                  void * const pvBuffer,
2041                                  BaseType_t * const pxHigherPriorityTaskWoken )
2042 {
2043     BaseType_t xReturn;
2044     UBaseType_t uxSavedInterruptStatus;
2045     Queue_t * const pxQueue = xQueue;
2046 
2047     traceENTER_xQueueReceiveFromISR( xQueue, pvBuffer, pxHigherPriorityTaskWoken );
2048 
2049     configASSERT( pxQueue );
2050     configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2051 
2052     /* RTOS ports that support interrupt nesting have the concept of a maximum
2053      * system call (or maximum API call) interrupt priority.  Interrupts that are
2054      * above the maximum system call priority are kept permanently enabled, even
2055      * when the RTOS kernel is in a critical section, but cannot make any calls to
2056      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
2057      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2058      * failure if a FreeRTOS API function is called from an interrupt that has been
2059      * assigned a priority above the configured maximum system call priority.
2060      * Only FreeRTOS functions that end in FromISR can be called from interrupts
2061      * that have been assigned a priority at or (logically) below the maximum
2062      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
2063      * safe API to ensure interrupt entry is as fast and as simple as possible.
2064      * More information (albeit Cortex-M specific) is provided on the following
2065      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2066     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2067 
2068     /* MISRA Ref 4.7.1 [Return value shall be checked] */
2069     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
2070     /* coverity[misra_c_2012_directive_4_7_violation] */
2071     uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
2072     {
2073         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2074 
2075         /* Cannot block in an ISR, so check there is data available. */
2076         if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2077         {
2078             const int8_t cRxLock = pxQueue->cRxLock;
2079 
2080             traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
2081 
2082             prvCopyDataFromQueue( pxQueue, pvBuffer );
2083             pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
2084 
2085             /* If the queue is locked the event list will not be modified.
2086              * Instead update the lock count so the task that unlocks the queue
2087              * will know that an ISR has removed data while the queue was
2088              * locked. */
2089             if( cRxLock == queueUNLOCKED )
2090             {
2091                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2092                 {
2093                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2094                     {
2095                         /* The task waiting has a higher priority than us so
2096                          * force a context switch. */
2097                         if( pxHigherPriorityTaskWoken != NULL )
2098                         {
2099                             *pxHigherPriorityTaskWoken = pdTRUE;
2100                         }
2101                         else
2102                         {
2103                             mtCOVERAGE_TEST_MARKER();
2104                         }
2105                     }
2106                     else
2107                     {
2108                         mtCOVERAGE_TEST_MARKER();
2109                     }
2110                 }
2111                 else
2112                 {
2113                     mtCOVERAGE_TEST_MARKER();
2114                 }
2115             }
2116             else
2117             {
2118                 /* Increment the lock count so the task that unlocks the queue
2119                  * knows that data was removed while it was locked. */
2120                 prvIncrementQueueRxLock( pxQueue, cRxLock );
2121             }
2122 
2123             xReturn = pdPASS;
2124         }
2125         else
2126         {
2127             xReturn = pdFAIL;
2128             traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
2129         }
2130     }
2131     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2132 
2133     traceRETURN_xQueueReceiveFromISR( xReturn );
2134 
2135     return xReturn;
2136 }
2137 /*-----------------------------------------------------------*/
2138 
xQueuePeekFromISR(QueueHandle_t xQueue,void * const pvBuffer)2139 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
2140                               void * const pvBuffer )
2141 {
2142     BaseType_t xReturn;
2143     UBaseType_t uxSavedInterruptStatus;
2144     int8_t * pcOriginalReadPosition;
2145     Queue_t * const pxQueue = xQueue;
2146 
2147     traceENTER_xQueuePeekFromISR( xQueue, pvBuffer );
2148 
2149     configASSERT( ( pxQueue != NULL ) && !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2150     configASSERT( ( pxQueue != NULL ) && ( pxQueue->uxItemSize != 0 ) ); /* Can't peek a semaphore. */
2151 
2152     /* RTOS ports that support interrupt nesting have the concept of a maximum
2153      * system call (or maximum API call) interrupt priority.  Interrupts that are
2154      * above the maximum system call priority are kept permanently enabled, even
2155      * when the RTOS kernel is in a critical section, but cannot make any calls to
2156      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
2157      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2158      * failure if a FreeRTOS API function is called from an interrupt that has been
2159      * assigned a priority above the configured maximum system call priority.
2160      * Only FreeRTOS functions that end in FromISR can be called from interrupts
2161      * that have been assigned a priority at or (logically) below the maximum
2162      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
2163      * safe API to ensure interrupt entry is as fast and as simple as possible.
2164      * More information (albeit Cortex-M specific) is provided on the following
2165      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2166     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2167 
2168     /* MISRA Ref 4.7.1 [Return value shall be checked] */
2169     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
2170     /* coverity[misra_c_2012_directive_4_7_violation] */
2171     uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
2172     {
2173         /* Cannot block in an ISR, so check there is data available. */
2174         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2175         {
2176             traceQUEUE_PEEK_FROM_ISR( pxQueue );
2177 
2178             /* Remember the read position so it can be reset as nothing is
2179              * actually being removed from the queue. */
2180             pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
2181             prvCopyDataFromQueue( pxQueue, pvBuffer );
2182             pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
2183 
2184             xReturn = pdPASS;
2185         }
2186         else
2187         {
2188             xReturn = pdFAIL;
2189             traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
2190         }
2191     }
2192     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2193 
2194     traceRETURN_xQueuePeekFromISR( xReturn );
2195 
2196     return xReturn;
2197 }
2198 /*-----------------------------------------------------------*/
2199 
uxQueueMessagesWaiting(const QueueHandle_t xQueue)2200 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
2201 {
2202     UBaseType_t uxReturn;
2203 
2204     traceENTER_uxQueueMessagesWaiting( xQueue );
2205 
2206     configASSERT( xQueue );
2207 
2208     portBASE_TYPE_ENTER_CRITICAL();
2209     {
2210         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
2211     }
2212     portBASE_TYPE_EXIT_CRITICAL();
2213 
2214     traceRETURN_uxQueueMessagesWaiting( uxReturn );
2215 
2216     return uxReturn;
2217 }
2218 /*-----------------------------------------------------------*/
2219 
uxQueueSpacesAvailable(const QueueHandle_t xQueue)2220 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
2221 {
2222     UBaseType_t uxReturn;
2223     Queue_t * const pxQueue = xQueue;
2224 
2225     traceENTER_uxQueueSpacesAvailable( xQueue );
2226 
2227     configASSERT( pxQueue );
2228 
2229     portBASE_TYPE_ENTER_CRITICAL();
2230     {
2231         uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting );
2232     }
2233     portBASE_TYPE_EXIT_CRITICAL();
2234 
2235     traceRETURN_uxQueueSpacesAvailable( uxReturn );
2236 
2237     return uxReturn;
2238 }
2239 /*-----------------------------------------------------------*/
2240 
uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)2241 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
2242 {
2243     UBaseType_t uxReturn;
2244     Queue_t * const pxQueue = xQueue;
2245 
2246     traceENTER_uxQueueMessagesWaitingFromISR( xQueue );
2247 
2248     configASSERT( pxQueue );
2249     uxReturn = pxQueue->uxMessagesWaiting;
2250 
2251     traceRETURN_uxQueueMessagesWaitingFromISR( uxReturn );
2252 
2253     return uxReturn;
2254 }
2255 /*-----------------------------------------------------------*/
2256 
vQueueDelete(QueueHandle_t xQueue)2257 void vQueueDelete( QueueHandle_t xQueue )
2258 {
2259     Queue_t * const pxQueue = xQueue;
2260 
2261     traceENTER_vQueueDelete( xQueue );
2262 
2263     configASSERT( pxQueue );
2264     traceQUEUE_DELETE( pxQueue );
2265 
2266     #if ( configQUEUE_REGISTRY_SIZE > 0 )
2267     {
2268         vQueueUnregisterQueue( pxQueue );
2269     }
2270     #endif
2271 
2272     #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
2273     {
2274         /* The queue can only have been allocated dynamically - free it
2275          * again. */
2276         vPortFree( pxQueue );
2277     }
2278     #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
2279     {
2280         /* The queue could have been allocated statically or dynamically, so
2281          * check before attempting to free the memory. */
2282         if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
2283         {
2284             vPortFree( pxQueue );
2285         }
2286         else
2287         {
2288             mtCOVERAGE_TEST_MARKER();
2289         }
2290     }
2291     #else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */
2292     {
2293         /* The queue must have been statically allocated, so is not going to be
2294          * deleted.  Avoid compiler warnings about the unused parameter. */
2295         ( void ) pxQueue;
2296     }
2297     #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
2298 
2299     traceRETURN_vQueueDelete();
2300 }
2301 /*-----------------------------------------------------------*/
2302 
2303 #if ( configUSE_TRACE_FACILITY == 1 )
2304 
uxQueueGetQueueNumber(QueueHandle_t xQueue)2305     UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
2306     {
2307         traceENTER_uxQueueGetQueueNumber( xQueue );
2308 
2309         traceRETURN_uxQueueGetQueueNumber( ( ( Queue_t * ) xQueue )->uxQueueNumber );
2310 
2311         return ( ( Queue_t * ) xQueue )->uxQueueNumber;
2312     }
2313 
2314 #endif /* configUSE_TRACE_FACILITY */
2315 /*-----------------------------------------------------------*/
2316 
2317 #if ( configUSE_TRACE_FACILITY == 1 )
2318 
vQueueSetQueueNumber(QueueHandle_t xQueue,UBaseType_t uxQueueNumber)2319     void vQueueSetQueueNumber( QueueHandle_t xQueue,
2320                                UBaseType_t uxQueueNumber )
2321     {
2322         traceENTER_vQueueSetQueueNumber( xQueue, uxQueueNumber );
2323 
2324         ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
2325 
2326         traceRETURN_vQueueSetQueueNumber();
2327     }
2328 
2329 #endif /* configUSE_TRACE_FACILITY */
2330 /*-----------------------------------------------------------*/
2331 
2332 #if ( configUSE_TRACE_FACILITY == 1 )
2333 
ucQueueGetQueueType(QueueHandle_t xQueue)2334     uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
2335     {
2336         traceENTER_ucQueueGetQueueType( xQueue );
2337 
2338         traceRETURN_ucQueueGetQueueType( ( ( Queue_t * ) xQueue )->ucQueueType );
2339 
2340         return ( ( Queue_t * ) xQueue )->ucQueueType;
2341     }
2342 
2343 #endif /* configUSE_TRACE_FACILITY */
2344 /*-----------------------------------------------------------*/
2345 
uxQueueGetQueueItemSize(QueueHandle_t xQueue)2346 UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2347 {
2348     traceENTER_uxQueueGetQueueItemSize( xQueue );
2349 
2350     traceRETURN_uxQueueGetQueueItemSize( ( ( Queue_t * ) xQueue )->uxItemSize );
2351 
2352     return ( ( Queue_t * ) xQueue )->uxItemSize;
2353 }
2354 /*-----------------------------------------------------------*/
2355 
uxQueueGetQueueLength(QueueHandle_t xQueue)2356 UBaseType_t uxQueueGetQueueLength( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2357 {
2358     traceENTER_uxQueueGetQueueLength( xQueue );
2359 
2360     traceRETURN_uxQueueGetQueueLength( ( ( Queue_t * ) xQueue )->uxLength );
2361 
2362     return ( ( Queue_t * ) xQueue )->uxLength;
2363 }
2364 /*-----------------------------------------------------------*/
2365 
2366 #if ( configUSE_MUTEXES == 1 )
2367 
prvGetHighestPriorityOfWaitToReceiveList(const Queue_t * const pxQueue)2368     static UBaseType_t prvGetHighestPriorityOfWaitToReceiveList( const Queue_t * const pxQueue )
2369     {
2370         UBaseType_t uxHighestPriorityOfWaitingTasks;
2371 
2372         /* If a task waiting for a mutex causes the mutex holder to inherit a
2373          * priority, but the waiting task times out, then the holder should
2374          * disinherit the priority - but only down to the highest priority of any
2375          * other tasks that are waiting for the same mutex.  For this purpose,
2376          * return the priority of the highest priority task that is waiting for the
2377          * mutex. */
2378         if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U )
2379         {
2380             uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) ( ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ) );
2381         }
2382         else
2383         {
2384             uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
2385         }
2386 
2387         return uxHighestPriorityOfWaitingTasks;
2388     }
2389 
2390 #endif /* configUSE_MUTEXES */
2391 /*-----------------------------------------------------------*/
2392 
prvCopyDataToQueue(Queue_t * const pxQueue,const void * pvItemToQueue,const BaseType_t xPosition)2393 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
2394                                       const void * pvItemToQueue,
2395                                       const BaseType_t xPosition )
2396 {
2397     BaseType_t xReturn = pdFALSE;
2398     UBaseType_t uxMessagesWaiting;
2399 
2400     /* This function is called from a critical section. */
2401 
2402     uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2403 
2404     if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
2405     {
2406         #if ( configUSE_MUTEXES == 1 )
2407         {
2408             if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
2409             {
2410                 /* The mutex is no longer being held. */
2411                 xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
2412                 pxQueue->u.xSemaphore.xMutexHolder = NULL;
2413             }
2414             else
2415             {
2416                 mtCOVERAGE_TEST_MARKER();
2417             }
2418         }
2419         #endif /* configUSE_MUTEXES */
2420     }
2421     else if( xPosition == queueSEND_TO_BACK )
2422     {
2423         ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
2424         pxQueue->pcWriteTo += pxQueue->uxItemSize;
2425 
2426         if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail )
2427         {
2428             pxQueue->pcWriteTo = pxQueue->pcHead;
2429         }
2430         else
2431         {
2432             mtCOVERAGE_TEST_MARKER();
2433         }
2434     }
2435     else
2436     {
2437         ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
2438         pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
2439 
2440         if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead )
2441         {
2442             pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
2443         }
2444         else
2445         {
2446             mtCOVERAGE_TEST_MARKER();
2447         }
2448 
2449         if( xPosition == queueOVERWRITE )
2450         {
2451             if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2452             {
2453                 /* An item is not being added but overwritten, so subtract
2454                  * one from the recorded number of items in the queue so when
2455                  * one is added again below the number of recorded items remains
2456                  * correct. */
2457                 --uxMessagesWaiting;
2458             }
2459             else
2460             {
2461                 mtCOVERAGE_TEST_MARKER();
2462             }
2463         }
2464         else
2465         {
2466             mtCOVERAGE_TEST_MARKER();
2467         }
2468     }
2469 
2470     pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
2471 
2472     return xReturn;
2473 }
2474 /*-----------------------------------------------------------*/
2475 
prvCopyDataFromQueue(Queue_t * const pxQueue,void * const pvBuffer)2476 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
2477                                   void * const pvBuffer )
2478 {
2479     if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
2480     {
2481         pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2482 
2483         if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2484         {
2485             pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2486         }
2487         else
2488         {
2489             mtCOVERAGE_TEST_MARKER();
2490         }
2491 
2492         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize );
2493     }
2494 }
2495 /*-----------------------------------------------------------*/
2496 
prvUnlockQueue(Queue_t * const pxQueue)2497 static void prvUnlockQueue( Queue_t * const pxQueue )
2498 {
2499     /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
2500 
2501     /* The lock counts contains the number of extra data items placed or
2502      * removed from the queue while the queue was locked.  When a queue is
2503      * locked items can be added or removed, but the event lists cannot be
2504      * updated. */
2505     taskENTER_CRITICAL();
2506     {
2507         int8_t cTxLock = pxQueue->cTxLock;
2508 
2509         /* See if data was added to the queue while it was locked. */
2510         while( cTxLock > queueLOCKED_UNMODIFIED )
2511         {
2512             /* Data was posted while the queue was locked.  Are any tasks
2513              * blocked waiting for data to become available? */
2514             #if ( configUSE_QUEUE_SETS == 1 )
2515             {
2516                 if( pxQueue->pxQueueSetContainer != NULL )
2517                 {
2518                     if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
2519                     {
2520                         /* The queue is a member of a queue set, and posting to
2521                          * the queue set caused a higher priority task to unblock.
2522                          * A context switch is required. */
2523                         vTaskMissedYield();
2524                     }
2525                     else
2526                     {
2527                         mtCOVERAGE_TEST_MARKER();
2528                     }
2529                 }
2530                 else
2531                 {
2532                     /* Tasks that are removed from the event list will get
2533                      * added to the pending ready list as the scheduler is still
2534                      * suspended. */
2535                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2536                     {
2537                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2538                         {
2539                             /* The task waiting has a higher priority so record that a
2540                              * context switch is required. */
2541                             vTaskMissedYield();
2542                         }
2543                         else
2544                         {
2545                             mtCOVERAGE_TEST_MARKER();
2546                         }
2547                     }
2548                     else
2549                     {
2550                         break;
2551                     }
2552                 }
2553             }
2554             #else /* configUSE_QUEUE_SETS */
2555             {
2556                 /* Tasks that are removed from the event list will get added to
2557                  * the pending ready list as the scheduler is still suspended. */
2558                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2559                 {
2560                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2561                     {
2562                         /* The task waiting has a higher priority so record that
2563                          * a context switch is required. */
2564                         vTaskMissedYield();
2565                     }
2566                     else
2567                     {
2568                         mtCOVERAGE_TEST_MARKER();
2569                     }
2570                 }
2571                 else
2572                 {
2573                     break;
2574                 }
2575             }
2576             #endif /* configUSE_QUEUE_SETS */
2577 
2578             --cTxLock;
2579         }
2580 
2581         pxQueue->cTxLock = queueUNLOCKED;
2582     }
2583     taskEXIT_CRITICAL();
2584 
2585     /* Do the same for the Rx lock. */
2586     taskENTER_CRITICAL();
2587     {
2588         int8_t cRxLock = pxQueue->cRxLock;
2589 
2590         while( cRxLock > queueLOCKED_UNMODIFIED )
2591         {
2592             if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2593             {
2594                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2595                 {
2596                     vTaskMissedYield();
2597                 }
2598                 else
2599                 {
2600                     mtCOVERAGE_TEST_MARKER();
2601                 }
2602 
2603                 --cRxLock;
2604             }
2605             else
2606             {
2607                 break;
2608             }
2609         }
2610 
2611         pxQueue->cRxLock = queueUNLOCKED;
2612     }
2613     taskEXIT_CRITICAL();
2614 }
2615 /*-----------------------------------------------------------*/
2616 
prvIsQueueEmpty(const Queue_t * pxQueue)2617 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
2618 {
2619     BaseType_t xReturn;
2620 
2621     taskENTER_CRITICAL();
2622     {
2623         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2624         {
2625             xReturn = pdTRUE;
2626         }
2627         else
2628         {
2629             xReturn = pdFALSE;
2630         }
2631     }
2632     taskEXIT_CRITICAL();
2633 
2634     return xReturn;
2635 }
2636 /*-----------------------------------------------------------*/
2637 
xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)2638 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
2639 {
2640     BaseType_t xReturn;
2641     Queue_t * const pxQueue = xQueue;
2642 
2643     traceENTER_xQueueIsQueueEmptyFromISR( xQueue );
2644 
2645     configASSERT( pxQueue );
2646 
2647     if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2648     {
2649         xReturn = pdTRUE;
2650     }
2651     else
2652     {
2653         xReturn = pdFALSE;
2654     }
2655 
2656     traceRETURN_xQueueIsQueueEmptyFromISR( xReturn );
2657 
2658     return xReturn;
2659 }
2660 /*-----------------------------------------------------------*/
2661 
prvIsQueueFull(const Queue_t * pxQueue)2662 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
2663 {
2664     BaseType_t xReturn;
2665 
2666     taskENTER_CRITICAL();
2667     {
2668         if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2669         {
2670             xReturn = pdTRUE;
2671         }
2672         else
2673         {
2674             xReturn = pdFALSE;
2675         }
2676     }
2677     taskEXIT_CRITICAL();
2678 
2679     return xReturn;
2680 }
2681 /*-----------------------------------------------------------*/
2682 
xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)2683 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
2684 {
2685     BaseType_t xReturn;
2686     Queue_t * const pxQueue = xQueue;
2687 
2688     traceENTER_xQueueIsQueueFullFromISR( xQueue );
2689 
2690     configASSERT( pxQueue );
2691 
2692     if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2693     {
2694         xReturn = pdTRUE;
2695     }
2696     else
2697     {
2698         xReturn = pdFALSE;
2699     }
2700 
2701     traceRETURN_xQueueIsQueueFullFromISR( xReturn );
2702 
2703     return xReturn;
2704 }
2705 /*-----------------------------------------------------------*/
2706 
2707 #if ( configUSE_CO_ROUTINES == 1 )
2708 
xQueueCRSend(QueueHandle_t xQueue,const void * pvItemToQueue,TickType_t xTicksToWait)2709     BaseType_t xQueueCRSend( QueueHandle_t xQueue,
2710                              const void * pvItemToQueue,
2711                              TickType_t xTicksToWait )
2712     {
2713         BaseType_t xReturn;
2714         Queue_t * const pxQueue = xQueue;
2715 
2716         traceENTER_xQueueCRSend( xQueue, pvItemToQueue, xTicksToWait );
2717 
2718         /* If the queue is already full we may have to block.  A critical section
2719          * is required to prevent an interrupt removing something from the queue
2720          * between the check to see if the queue is full and blocking on the queue. */
2721         portDISABLE_INTERRUPTS();
2722         {
2723             if( prvIsQueueFull( pxQueue ) != pdFALSE )
2724             {
2725                 /* The queue is full - do we want to block or just leave without
2726                  * posting? */
2727                 if( xTicksToWait > ( TickType_t ) 0 )
2728                 {
2729                     /* As this is called from a coroutine we cannot block directly, but
2730                      * return indicating that we need to block. */
2731                     vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
2732                     portENABLE_INTERRUPTS();
2733                     return errQUEUE_BLOCKED;
2734                 }
2735                 else
2736                 {
2737                     portENABLE_INTERRUPTS();
2738                     return errQUEUE_FULL;
2739                 }
2740             }
2741         }
2742         portENABLE_INTERRUPTS();
2743 
2744         portDISABLE_INTERRUPTS();
2745         {
2746             if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2747             {
2748                 /* There is room in the queue, copy the data into the queue. */
2749                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2750                 xReturn = pdPASS;
2751 
2752                 /* Were any co-routines waiting for data to become available? */
2753                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2754                 {
2755                     /* In this instance the co-routine could be placed directly
2756                      * into the ready list as we are within a critical section.
2757                      * Instead the same pending ready list mechanism is used as if
2758                      * the event were caused from within an interrupt. */
2759                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2760                     {
2761                         /* The co-routine waiting has a higher priority so record
2762                          * that a yield might be appropriate. */
2763                         xReturn = errQUEUE_YIELD;
2764                     }
2765                     else
2766                     {
2767                         mtCOVERAGE_TEST_MARKER();
2768                     }
2769                 }
2770                 else
2771                 {
2772                     mtCOVERAGE_TEST_MARKER();
2773                 }
2774             }
2775             else
2776             {
2777                 xReturn = errQUEUE_FULL;
2778             }
2779         }
2780         portENABLE_INTERRUPTS();
2781 
2782         traceRETURN_xQueueCRSend( xReturn );
2783 
2784         return xReturn;
2785     }
2786 
2787 #endif /* configUSE_CO_ROUTINES */
2788 /*-----------------------------------------------------------*/
2789 
2790 #if ( configUSE_CO_ROUTINES == 1 )
2791 
xQueueCRReceive(QueueHandle_t xQueue,void * pvBuffer,TickType_t xTicksToWait)2792     BaseType_t xQueueCRReceive( QueueHandle_t xQueue,
2793                                 void * pvBuffer,
2794                                 TickType_t xTicksToWait )
2795     {
2796         BaseType_t xReturn;
2797         Queue_t * const pxQueue = xQueue;
2798 
2799         traceENTER_xQueueCRReceive( xQueue, pvBuffer, xTicksToWait );
2800 
2801         /* If the queue is already empty we may have to block.  A critical section
2802          * is required to prevent an interrupt adding something to the queue
2803          * between the check to see if the queue is empty and blocking on the queue. */
2804         portDISABLE_INTERRUPTS();
2805         {
2806             if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2807             {
2808                 /* There are no messages in the queue, do we want to block or just
2809                  * leave with nothing? */
2810                 if( xTicksToWait > ( TickType_t ) 0 )
2811                 {
2812                     /* As this is a co-routine we cannot block directly, but return
2813                      * indicating that we need to block. */
2814                     vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
2815                     portENABLE_INTERRUPTS();
2816                     return errQUEUE_BLOCKED;
2817                 }
2818                 else
2819                 {
2820                     portENABLE_INTERRUPTS();
2821                     return errQUEUE_FULL;
2822                 }
2823             }
2824             else
2825             {
2826                 mtCOVERAGE_TEST_MARKER();
2827             }
2828         }
2829         portENABLE_INTERRUPTS();
2830 
2831         portDISABLE_INTERRUPTS();
2832         {
2833             if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2834             {
2835                 /* Data is available from the queue. */
2836                 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2837 
2838                 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2839                 {
2840                     pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2841                 }
2842                 else
2843                 {
2844                     mtCOVERAGE_TEST_MARKER();
2845                 }
2846 
2847                 --( pxQueue->uxMessagesWaiting );
2848                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2849 
2850                 xReturn = pdPASS;
2851 
2852                 /* Were any co-routines waiting for space to become available? */
2853                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2854                 {
2855                     /* In this instance the co-routine could be placed directly
2856                      * into the ready list as we are within a critical section.
2857                      * Instead the same pending ready list mechanism is used as if
2858                      * the event were caused from within an interrupt. */
2859                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2860                     {
2861                         xReturn = errQUEUE_YIELD;
2862                     }
2863                     else
2864                     {
2865                         mtCOVERAGE_TEST_MARKER();
2866                     }
2867                 }
2868                 else
2869                 {
2870                     mtCOVERAGE_TEST_MARKER();
2871                 }
2872             }
2873             else
2874             {
2875                 xReturn = pdFAIL;
2876             }
2877         }
2878         portENABLE_INTERRUPTS();
2879 
2880         traceRETURN_xQueueCRReceive( xReturn );
2881 
2882         return xReturn;
2883     }
2884 
2885 #endif /* configUSE_CO_ROUTINES */
2886 /*-----------------------------------------------------------*/
2887 
2888 #if ( configUSE_CO_ROUTINES == 1 )
2889 
xQueueCRSendFromISR(QueueHandle_t xQueue,const void * pvItemToQueue,BaseType_t xCoRoutinePreviouslyWoken)2890     BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue,
2891                                     const void * pvItemToQueue,
2892                                     BaseType_t xCoRoutinePreviouslyWoken )
2893     {
2894         Queue_t * const pxQueue = xQueue;
2895 
2896         traceENTER_xQueueCRSendFromISR( xQueue, pvItemToQueue, xCoRoutinePreviouslyWoken );
2897 
2898         /* Cannot block within an ISR so if there is no space on the queue then
2899          * exit without doing anything. */
2900         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2901         {
2902             prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2903 
2904             /* We only want to wake one co-routine per ISR, so check that a
2905              * co-routine has not already been woken. */
2906             if( xCoRoutinePreviouslyWoken == pdFALSE )
2907             {
2908                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2909                 {
2910                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2911                     {
2912                         return pdTRUE;
2913                     }
2914                     else
2915                     {
2916                         mtCOVERAGE_TEST_MARKER();
2917                     }
2918                 }
2919                 else
2920                 {
2921                     mtCOVERAGE_TEST_MARKER();
2922                 }
2923             }
2924             else
2925             {
2926                 mtCOVERAGE_TEST_MARKER();
2927             }
2928         }
2929         else
2930         {
2931             mtCOVERAGE_TEST_MARKER();
2932         }
2933 
2934         traceRETURN_xQueueCRSendFromISR( xCoRoutinePreviouslyWoken );
2935 
2936         return xCoRoutinePreviouslyWoken;
2937     }
2938 
2939 #endif /* configUSE_CO_ROUTINES */
2940 /*-----------------------------------------------------------*/
2941 
2942 #if ( configUSE_CO_ROUTINES == 1 )
2943 
xQueueCRReceiveFromISR(QueueHandle_t xQueue,void * pvBuffer,BaseType_t * pxCoRoutineWoken)2944     BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue,
2945                                        void * pvBuffer,
2946                                        BaseType_t * pxCoRoutineWoken )
2947     {
2948         BaseType_t xReturn;
2949         Queue_t * const pxQueue = xQueue;
2950 
2951         traceENTER_xQueueCRReceiveFromISR( xQueue, pvBuffer, pxCoRoutineWoken );
2952 
2953         /* We cannot block from an ISR, so check there is data available. If
2954          * not then just leave without doing anything. */
2955         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2956         {
2957             /* Copy the data from the queue. */
2958             pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2959 
2960             if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2961             {
2962                 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2963             }
2964             else
2965             {
2966                 mtCOVERAGE_TEST_MARKER();
2967             }
2968 
2969             --( pxQueue->uxMessagesWaiting );
2970             ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2971 
2972             if( ( *pxCoRoutineWoken ) == pdFALSE )
2973             {
2974                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2975                 {
2976                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2977                     {
2978                         *pxCoRoutineWoken = pdTRUE;
2979                     }
2980                     else
2981                     {
2982                         mtCOVERAGE_TEST_MARKER();
2983                     }
2984                 }
2985                 else
2986                 {
2987                     mtCOVERAGE_TEST_MARKER();
2988                 }
2989             }
2990             else
2991             {
2992                 mtCOVERAGE_TEST_MARKER();
2993             }
2994 
2995             xReturn = pdPASS;
2996         }
2997         else
2998         {
2999             xReturn = pdFAIL;
3000         }
3001 
3002         traceRETURN_xQueueCRReceiveFromISR( xReturn );
3003 
3004         return xReturn;
3005     }
3006 
3007 #endif /* configUSE_CO_ROUTINES */
3008 /*-----------------------------------------------------------*/
3009 
3010 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3011 
vQueueAddToRegistry(QueueHandle_t xQueue,const char * pcQueueName)3012     void vQueueAddToRegistry( QueueHandle_t xQueue,
3013                               const char * pcQueueName )
3014     {
3015         UBaseType_t ux;
3016         QueueRegistryItem_t * pxEntryToWrite = NULL;
3017 
3018         traceENTER_vQueueAddToRegistry( xQueue, pcQueueName );
3019 
3020         configASSERT( xQueue );
3021 
3022         if( pcQueueName != NULL )
3023         {
3024             /* See if there is an empty space in the registry.  A NULL name denotes
3025              * a free slot. */
3026             for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3027             {
3028                 /* Replace an existing entry if the queue is already in the registry. */
3029                 if( xQueue == xQueueRegistry[ ux ].xHandle )
3030                 {
3031                     pxEntryToWrite = &( xQueueRegistry[ ux ] );
3032                     break;
3033                 }
3034                 /* Otherwise, store in the next empty location */
3035                 else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) )
3036                 {
3037                     pxEntryToWrite = &( xQueueRegistry[ ux ] );
3038                 }
3039                 else
3040                 {
3041                     mtCOVERAGE_TEST_MARKER();
3042                 }
3043             }
3044         }
3045 
3046         if( pxEntryToWrite != NULL )
3047         {
3048             /* Store the information on this queue. */
3049             pxEntryToWrite->pcQueueName = pcQueueName;
3050             pxEntryToWrite->xHandle = xQueue;
3051 
3052             traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
3053         }
3054 
3055         traceRETURN_vQueueAddToRegistry();
3056     }
3057 
3058 #endif /* configQUEUE_REGISTRY_SIZE */
3059 /*-----------------------------------------------------------*/
3060 
3061 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3062 
pcQueueGetName(QueueHandle_t xQueue)3063     const char * pcQueueGetName( QueueHandle_t xQueue )
3064     {
3065         UBaseType_t ux;
3066         const char * pcReturn = NULL;
3067 
3068         traceENTER_pcQueueGetName( xQueue );
3069 
3070         configASSERT( xQueue );
3071 
3072         /* Note there is nothing here to protect against another task adding or
3073          * removing entries from the registry while it is being searched. */
3074 
3075         for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3076         {
3077             if( xQueueRegistry[ ux ].xHandle == xQueue )
3078             {
3079                 pcReturn = xQueueRegistry[ ux ].pcQueueName;
3080                 break;
3081             }
3082             else
3083             {
3084                 mtCOVERAGE_TEST_MARKER();
3085             }
3086         }
3087 
3088         traceRETURN_pcQueueGetName( pcReturn );
3089 
3090         return pcReturn;
3091     }
3092 
3093 #endif /* configQUEUE_REGISTRY_SIZE */
3094 /*-----------------------------------------------------------*/
3095 
3096 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3097 
vQueueUnregisterQueue(QueueHandle_t xQueue)3098     void vQueueUnregisterQueue( QueueHandle_t xQueue )
3099     {
3100         UBaseType_t ux;
3101 
3102         traceENTER_vQueueUnregisterQueue( xQueue );
3103 
3104         configASSERT( xQueue );
3105 
3106         /* See if the handle of the queue being unregistered in actually in the
3107          * registry. */
3108         for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3109         {
3110             if( xQueueRegistry[ ux ].xHandle == xQueue )
3111             {
3112                 /* Set the name to NULL to show that this slot if free again. */
3113                 xQueueRegistry[ ux ].pcQueueName = NULL;
3114 
3115                 /* Set the handle to NULL to ensure the same queue handle cannot
3116                  * appear in the registry twice if it is added, removed, then
3117                  * added again. */
3118                 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
3119                 break;
3120             }
3121             else
3122             {
3123                 mtCOVERAGE_TEST_MARKER();
3124             }
3125         }
3126 
3127         traceRETURN_vQueueUnregisterQueue();
3128     }
3129 
3130 #endif /* configQUEUE_REGISTRY_SIZE */
3131 /*-----------------------------------------------------------*/
3132 
3133 #if ( configUSE_TIMERS == 1 )
3134 
vQueueWaitForMessageRestricted(QueueHandle_t xQueue,TickType_t xTicksToWait,const BaseType_t xWaitIndefinitely)3135     void vQueueWaitForMessageRestricted( QueueHandle_t xQueue,
3136                                          TickType_t xTicksToWait,
3137                                          const BaseType_t xWaitIndefinitely )
3138     {
3139         Queue_t * const pxQueue = xQueue;
3140 
3141         traceENTER_vQueueWaitForMessageRestricted( xQueue, xTicksToWait, xWaitIndefinitely );
3142 
3143         /* This function should not be called by application code hence the
3144          * 'Restricted' in its name.  It is not part of the public API.  It is
3145          * designed for use by kernel code, and has special calling requirements.
3146          * It can result in vListInsert() being called on a list that can only
3147          * possibly ever have one item in it, so the list will be fast, but even
3148          * so it should be called with the scheduler locked and not from a critical
3149          * section. */
3150 
3151         /* Only do anything if there are no messages in the queue.  This function
3152          *  will not actually cause the task to block, just place it on a blocked
3153          *  list.  It will not block until the scheduler is unlocked - at which
3154          *  time a yield will be performed.  If an item is added to the queue while
3155          *  the queue is locked, and the calling task blocks on the queue, then the
3156          *  calling task will be immediately unblocked when the queue is unlocked. */
3157         prvLockQueue( pxQueue );
3158 
3159         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
3160         {
3161             /* There is nothing in the queue, block for the specified period. */
3162             vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
3163         }
3164         else
3165         {
3166             mtCOVERAGE_TEST_MARKER();
3167         }
3168 
3169         prvUnlockQueue( pxQueue );
3170 
3171         traceRETURN_vQueueWaitForMessageRestricted();
3172     }
3173 
3174 #endif /* configUSE_TIMERS */
3175 /*-----------------------------------------------------------*/
3176 
3177 #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
3178 
xQueueCreateSet(const UBaseType_t uxEventQueueLength)3179     QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
3180     {
3181         QueueSetHandle_t pxQueue;
3182 
3183         traceENTER_xQueueCreateSet( uxEventQueueLength );
3184 
3185         pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
3186 
3187         traceRETURN_xQueueCreateSet( pxQueue );
3188 
3189         return pxQueue;
3190     }
3191 
3192 #endif /* #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
3193 /*-----------------------------------------------------------*/
3194 
3195 #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
3196 
xQueueCreateSetStatic(const UBaseType_t uxEventQueueLength,uint8_t * pucQueueStorage,StaticQueue_t * pxStaticQueue)3197     QueueSetHandle_t xQueueCreateSetStatic( const UBaseType_t uxEventQueueLength,
3198                                             uint8_t * pucQueueStorage,
3199                                             StaticQueue_t * pxStaticQueue )
3200     {
3201         QueueSetHandle_t pxQueue;
3202 
3203         traceENTER_xQueueCreateSetStatic( uxEventQueueLength );
3204 
3205         pxQueue = xQueueGenericCreateStatic( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), pucQueueStorage, pxStaticQueue, queueQUEUE_TYPE_SET );
3206 
3207         traceRETURN_xQueueCreateSetStatic( pxQueue );
3208 
3209         return pxQueue;
3210     }
3211 
3212 #endif /* #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
3213 /*-----------------------------------------------------------*/
3214 
3215 #if ( configUSE_QUEUE_SETS == 1 )
3216 
xQueueAddToSet(QueueSetMemberHandle_t xQueueOrSemaphore,QueueSetHandle_t xQueueSet)3217     BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3218                                QueueSetHandle_t xQueueSet )
3219     {
3220         BaseType_t xReturn;
3221 
3222         traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet );
3223 
3224         taskENTER_CRITICAL();
3225         {
3226             if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
3227             {
3228                 /* Cannot add a queue/semaphore to more than one queue set. */
3229                 xReturn = pdFAIL;
3230             }
3231             else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
3232             {
3233                 /* Cannot add a queue/semaphore to a queue set if there are already
3234                  * items in the queue/semaphore. */
3235                 xReturn = pdFAIL;
3236             }
3237             else
3238             {
3239                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
3240                 xReturn = pdPASS;
3241             }
3242         }
3243         taskEXIT_CRITICAL();
3244 
3245         traceRETURN_xQueueAddToSet( xReturn );
3246 
3247         return xReturn;
3248     }
3249 
3250 #endif /* configUSE_QUEUE_SETS */
3251 /*-----------------------------------------------------------*/
3252 
3253 #if ( configUSE_QUEUE_SETS == 1 )
3254 
xQueueRemoveFromSet(QueueSetMemberHandle_t xQueueOrSemaphore,QueueSetHandle_t xQueueSet)3255     BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3256                                     QueueSetHandle_t xQueueSet )
3257     {
3258         BaseType_t xReturn;
3259         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
3260 
3261         traceENTER_xQueueRemoveFromSet( xQueueOrSemaphore, xQueueSet );
3262 
3263         if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
3264         {
3265             /* The queue was not a member of the set. */
3266             xReturn = pdFAIL;
3267         }
3268         else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
3269         {
3270             /* It is dangerous to remove a queue from a set when the queue is
3271              * not empty because the queue set will still hold pending events for
3272              * the queue. */
3273             xReturn = pdFAIL;
3274         }
3275         else
3276         {
3277             taskENTER_CRITICAL();
3278             {
3279                 /* The queue is no longer contained in the set. */
3280                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
3281             }
3282             taskEXIT_CRITICAL();
3283             xReturn = pdPASS;
3284         }
3285 
3286         traceRETURN_xQueueRemoveFromSet( xReturn );
3287 
3288         return xReturn;
3289     }
3290 
3291 #endif /* configUSE_QUEUE_SETS */
3292 /*-----------------------------------------------------------*/
3293 
3294 #if ( configUSE_QUEUE_SETS == 1 )
3295 
xQueueSelectFromSet(QueueSetHandle_t xQueueSet,TickType_t const xTicksToWait)3296     QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
3297                                                 TickType_t const xTicksToWait )
3298     {
3299         QueueSetMemberHandle_t xReturn = NULL;
3300 
3301         traceENTER_xQueueSelectFromSet( xQueueSet, xTicksToWait );
3302 
3303         ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait );
3304 
3305         traceRETURN_xQueueSelectFromSet( xReturn );
3306 
3307         return xReturn;
3308     }
3309 
3310 #endif /* configUSE_QUEUE_SETS */
3311 /*-----------------------------------------------------------*/
3312 
3313 #if ( configUSE_QUEUE_SETS == 1 )
3314 
xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet)3315     QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
3316     {
3317         QueueSetMemberHandle_t xReturn = NULL;
3318 
3319         traceENTER_xQueueSelectFromSetFromISR( xQueueSet );
3320 
3321         ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL );
3322 
3323         traceRETURN_xQueueSelectFromSetFromISR( xReturn );
3324 
3325         return xReturn;
3326     }
3327 
3328 #endif /* configUSE_QUEUE_SETS */
3329 /*-----------------------------------------------------------*/
3330 
3331 #if ( configUSE_QUEUE_SETS == 1 )
3332 
prvNotifyQueueSetContainer(const Queue_t * const pxQueue)3333     static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
3334     {
3335         Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
3336         BaseType_t xReturn = pdFALSE;
3337 
3338         /* This function must be called form a critical section. */
3339 
3340         /* The following line is not reachable in unit tests because every call
3341          * to prvNotifyQueueSetContainer is preceded by a check that
3342          * pxQueueSetContainer != NULL */
3343         configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
3344         configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
3345 
3346         if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
3347         {
3348             const int8_t cTxLock = pxQueueSetContainer->cTxLock;
3349 
3350             traceQUEUE_SET_SEND( pxQueueSetContainer );
3351 
3352             /* The data copied is the handle of the queue that contains data. */
3353             xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK );
3354 
3355             if( cTxLock == queueUNLOCKED )
3356             {
3357                 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
3358                 {
3359                     if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
3360                     {
3361                         /* The task waiting has a higher priority. */
3362                         xReturn = pdTRUE;
3363                     }
3364                     else
3365                     {
3366                         mtCOVERAGE_TEST_MARKER();
3367                     }
3368                 }
3369                 else
3370                 {
3371                     mtCOVERAGE_TEST_MARKER();
3372                 }
3373             }
3374             else
3375             {
3376                 prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock );
3377             }
3378         }
3379         else
3380         {
3381             mtCOVERAGE_TEST_MARKER();
3382         }
3383 
3384         return xReturn;
3385     }
3386 
3387 #endif /* configUSE_QUEUE_SETS */
3388