• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * FreeRTOS Kernel V10.2.1
3  * Copyright (C) 2019 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a copy of
6  * this software and associated documentation files (the "Software"), to deal in
7  * the Software without restriction, including without limitation the rights to
8  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9  * the Software, and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in all
13  * copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * http://www.FreeRTOS.org
23  * http://aws.amazon.com/freertos
24  *
25  * 1 tab == 4 spaces!
26  */
27 
28 #include <stdlib.h>
29 #include <string.h>
30 
31 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
32 all the API functions to use the MPU wrappers.  That should only be done when
33 task.h is included from an application file. */
34 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
35 
36 #include "esp_osal.h"
37 #include "task.h"
38 #include "queue.h"
39 #include "los_list.h"
40 #include "los_sem.h"
41 #include "los_sched.h"
42 #if ( configUSE_CO_ROUTINES == 1 )
43 	#include "croutine.h"
44 #endif
45 
46 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
47 because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
48 for the header files above, but not in this file, in order to generate the
49 correct privileged Vs unprivileged linkage and placement. */
50 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
51 
52 
53 /* Constants used with the cRxLock and cTxLock structure members. */
54 #define queueUNLOCKED					( ( int8_t ) -1 )
55 #define queueLOCKED_UNMODIFIED			( ( int8_t ) 0 )
56 
57 /* When the Queue_t structure is used to represent a base queue its pcHead and
58 pcTail members are used as pointers into the queue storage area.  When the
59 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
60 not necessary, and the pcHead pointer is set to NULL to indicate that the
61 structure instead holds a pointer to the mutex holder (if any).  Map alternative
62 names to the pcHead and structure member to ensure the readability of the code
63 is maintained.  The QueuePointers_t and SemaphoreData_t types are used to form
64 a union as their usage is mutually exclusive dependent on what the queue is
65 being used for. */
66 #define uxQueueType						pcHead
67 #define queueQUEUE_IS_MUTEX				NULL
68 
69 typedef struct QueuePointers
70 {
71 	int8_t *pcTail;					/*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
72 	int8_t *pcReadFrom;				/*< Points to the last place that a queued item was read from when the structure is used as a queue. */
73 } QueuePointers_t;
74 
75 typedef struct SemaphoreData
76 {
77 	TaskHandle_t xMutexHolder;		 /*< The handle of the task that holds the mutex. */
78 	UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
79 } SemaphoreData_t;
80 
81 /* Semaphores do not actually store or copy data, so have an item size of
82 zero. */
83 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
84 #define queueMUTEX_GIVE_BLOCK_TIME		 ( ( TickType_t ) 0U )
85 
86 #define queueYIELD_IF_USING_PREEMPTION()
87 
88 typedef struct xTIME_OUT
89 {
90 	UINT64 xTimeOnEntering;
91 } TimeOut_t;
92 /*
93  * Definition of the queue used by the scheduler.
94  * Items are queued by copy, not reference.  See the following link for the
95  * rationale: https://www.freertos.org/Embedded-RTOS-Queues.html
96  */
97 typedef struct QueueDefinition 		/* The old naming convention is used to prevent breaking kernel aware debuggers. */
98 {
99 	int8_t *pcHead;					/*< Points to the beginning of the queue storage area. */
100 	int8_t *pcWriteTo;				/*< Points to the free next place in the storage area. */
101 
102 	union
103 	{
104 		QueuePointers_t xQueue;		/*< Data required exclusively when this structure is used as a queue. */
105 		SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
106 	} u;
107 
108 	LOS_DL_LIST xTasksWaitingToSend;
109 	LOS_DL_LIST xTasksWaitingToReceive;
110 	volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
111 	UBaseType_t uxLength;			/*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
112 	UBaseType_t uxItemSize;			/*< The size of each items that the queue will hold. */
113 
114 	volatile int8_t cRxLock;		/*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
115 	volatile int8_t cTxLock;		/*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
116 
117 	#if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
118 		uint8_t ucStaticallyAllocated;	/*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
119 	#endif
120 	portMUX_TYPE mux;		//Mutex required due to SMP
121 
122 } xQUEUE;
123 
124 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
125 name below to enable the use of older kernel aware debuggers. */
126 typedef xQUEUE Queue_t;
127 
128 /*
129  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not
130  * prevent an ISR from adding or removing items to the queue, but does prevent
131  * an ISR from removing tasks from the queue event lists.  If an ISR finds a
132  * queue is locked it will instead increment the appropriate queue lock count
133  * to indicate that a task may require unblocking.  When the queue in unlocked
134  * these lock counts are inspected, and the appropriate action taken.
135  */
136 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
137 
138 /*
139  * Uses a critical section to determine if there is any data in a queue.
140  *
141  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
142  */
143 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
144 
145 /*
146  * Uses a critical section to determine if there is any space in a queue.
147  *
148  * @return pdTRUE if there is no space, otherwise pdFALSE;
149  */
150 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
151 
152 /*
153  * Copies an item into the queue, either at the front of the queue or the
154  * back of the queue.
155  */
156 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
157 
158 /*
159  * Copies an item out of a queue.
160  */
161 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
162 
163 /*
164  * Called after a Queue_t structure has been allocated either statically or
165  * dynamically to fill in the structure's members.
166  */
167 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
168 
169 /*
170  * Mutexes are a special type of queue.  When a mutex is created, first the
171  * queue is created, then prvInitialiseMutex() is called to configure the queue
172  * as a mutex.
173  */
174 #if( configUSE_MUTEXES == 1 )
175 	static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
176 #endif
177 
178 #if( configUSE_MUTEXES == 1 )
179 	/*
180 	 * If a task waiting for a mutex causes the mutex holder to inherit a
181 	 * priority, but the waiting task times out, then the holder should
182 	 * disinherit the priority - but only down to the highest priority of any
183 	 * other tasks that are waiting for the same mutex.  This function returns
184 	 * that priority.
185 	 */
186 	static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
187 #endif
188 /*-----------------------------------------------------------*/
189 
190 /*
191  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from
192  * accessing the queue event lists.
193  */
194 #define prvLockQueue( pxQueue )								\
195 	taskENTER_CRITICAL( &pxQueue->mux);									\
196 	{														\
197 		if( ( pxQueue )->cRxLock == queueUNLOCKED )			\
198 		{													\
199 			( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED;	\
200 		}													\
201 		if( ( pxQueue )->cTxLock == queueUNLOCKED )			\
202 		{													\
203 			( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED;	\
204 		}													\
205 	}														\
206 	taskEXIT_CRITICAL( &pxQueue->mux)
207 /*-----------------------------------------------------------*/
208 extern UINT64 OsTickCount;
209 // ��¼�и������ȼ���������Ҫ�л�
vTaskMissedYield(void)210 static void vTaskMissedYield( void )
211 {
212 }
vTaskInternalSetTimeOutState(TimeOut_t * const pxTimeOut)213 static void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
214 {
215 	pxTimeOut->xTimeOnEntering = OsTickCount;
216 }
xTaskCheckForTimeOut(TimeOut_t * const pxTimeOut,TickType_t * const pxTicksToWait)217 static BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
218 {
219 	BaseType_t xReturn;
220 
221 	if(!pxTimeOut || !pxTicksToWait) return pdFALSE;
222 	{
223 		const UINT64 xElapsedTime = OsTickCount - pxTimeOut->xTimeOnEntering;
224 
225 		if( *pxTicksToWait == portMAX_DELAY ) xReturn = pdFALSE;
226 		else if( xElapsedTime < *pxTicksToWait )
227 		{
228 			*pxTicksToWait -= xElapsedTime;
229 			vTaskInternalSetTimeOutState( pxTimeOut );
230 			xReturn = pdFALSE;
231 		}
232 		else
233 		{
234 			*pxTicksToWait = 0;
235 			xReturn = pdTRUE;
236 		}
237 	}
238 	return xReturn;
239 }
240 // ��������
vTaskPlaceOnEventList(LOS_DL_LIST * pxEventList,const TickType_t xTicksToWait)241 static void vTaskPlaceOnEventList( LOS_DL_LIST *pxEventList, const TickType_t xTicksToWait )
242 {
243 	if (!pxEventList) return;
244 	OsSchedTaskWait(pxEventList, xTicksToWait);
245 }
246 // ��������
xTaskRemoveFromEventList(LOS_DL_LIST * pxEventList)247 static BaseType_t xTaskRemoveFromEventList(LOS_DL_LIST *pxEventList )
248 {
249 	if (!pxEventList)return pdFALSE;
250 	if (LOS_ListEmpty(pxEventList)) return pdFALSE;
251 	LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(pxEventList));
252 	// if(!(resumedTask->taskStatus & OS_TASK_STATUS_READY))
253 	OsSchedTaskWake(resumedTask);
254 	return pdTRUE;
255 }
256 // ��������һ�����ص�ǰ���?
pvTaskIncrementMutexHeldCount(void)257 static void *pvTaskIncrementMutexHeldCount( void )
258 {
259 	return (TaskHandle_t)g_losTask.runTask->taskID;
260 }
261 //
xTaskPriorityInherit(TaskHandle_t const pxMutexHolder)262 static BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
263 {
264 	BaseType_t xReturn = pdFALSE;
265 	if( pxMutexHolder == NULL ) return pdFALSE;
266 	{
267 		LosTaskCB * const runningTask = g_losTask.runTask;
268 		LosTaskCB * const muxPended = OS_TCB_FROM_TID((UINT32)pxMutexHolder);
269 		// ���������������ȼ����ڵ�ǰ���ȼ� ��̳е�ǰ���ȼ�? ��ʹ��ǰ�������?
270 		if( muxPended->priority > runningTask->priority ) {
271 			OsSchedModifyTaskSchedParam(muxPended, runningTask->priority);
272 			// OsSchedTaskWake(muxPended);
273 			xReturn = pdTRUE;
274 		} else {
275 			if( muxPended->basePriority > runningTask->priority ) {
276 				xReturn = pdTRUE;
277 			}
278 		}
279 	}
280 
281 	return xReturn;
282 }
283 // ��ǰ����������
xTaskPriorityDisinherit(TaskHandle_t const pxMutexHolder)284 static BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
285 {
286 	LosTaskCB *pxTCB;
287 	if( pxMutexHolder == NULL ) return pdFALSE;
288 	pxTCB = OS_TCB_FROM_TID((UINT32)pxMutexHolder);
289 	if( pxTCB->priority != pxTCB->basePriority ) {
290 		OsSchedModifyTaskSchedParam(pxTCB, pxTCB->basePriority); // �ָ����ȼ�
291 		// if(pxTCB != g_losTask.runTask)
292 		OsSchedTaskWake(pxTCB);
293 		return pdTRUE;
294 	}
295 	return pdFALSE;
296 }
297 // ��ʱ������
vTaskPriorityDisinheritAfterTimeout(TaskHandle_t const pxMutexHolder,UBaseType_t uxHighestPriorityWaitingTask)298 static void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask )
299 {
300 	if( pxMutexHolder == NULL ) return ;
301 	{
302 		UINT16 uxPriorityToUse;
303 		LosTaskCB * const pxTCB = OS_TCB_FROM_TID((UINT32)pxMutexHolder);
304 		// ȡ������ȼ�?
305 		if( pxTCB->basePriority > uxHighestPriorityWaitingTask ) {
306 			uxPriorityToUse = uxHighestPriorityWaitingTask;
307 		} else {
308 			uxPriorityToUse = pxTCB->basePriority;
309 		}
310 
311 		if( pxTCB->priority != uxPriorityToUse ) {
312 			OsSchedModifyTaskSchedParam(pxTCB, uxPriorityToUse);  // �ָ����ȼ�
313 			// if(pxTCB != g_losTask.runTask)
314 			pxTCB->taskStatus &= ~OS_TASK_STATUS_TIMEOUT;
315 			OsSchedTaskWake(pxTCB);
316 		}
317 	}
318 }
319 /*-----------------------------------------------------------*/
xQueueGenericReset(QueueHandle_t xQueue,BaseType_t xNewQueue)320 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
321 {
322 Queue_t * const pxQueue = xQueue;
323 
324 	configASSERT( pxQueue );
325 
326 	if( xNewQueue == pdTRUE )
327 	{
328 		vPortCPUInitializeMutex(&pxQueue->mux);
329 	}
330 
331 	taskENTER_CRITICAL( &pxQueue->mux);
332 	{
333 		pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
334 		pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
335 		pxQueue->pcWriteTo = pxQueue->pcHead;
336 		pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
337 		pxQueue->cRxLock = queueUNLOCKED;
338 		pxQueue->cTxLock = queueUNLOCKED;
339 
340 		if( xNewQueue == pdFALSE )
341 		{
342 			/* If there are tasks blocked waiting to read from the queue, then
343 			the tasks will remain blocked as after this function exits the queue
344 			will still be empty.  If there are tasks blocked waiting to write to
345 			the queue, then one should be unblocked as after this function exits
346 			it will be possible to write to it. */
347 			if( !LOS_ListEmpty( &( pxQueue->xTasksWaitingToSend ) ))
348 			{
349 				if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
350 				{
351 					queueYIELD_IF_USING_PREEMPTION();
352 				}
353 			}
354 		}
355 		else
356 		{
357 			/* Ensure the event queues start in the correct state. */
358 			LOS_ListInit(&( pxQueue->xTasksWaitingToSend ));
359 			LOS_ListInit(&( pxQueue->xTasksWaitingToReceive ));
360 		}
361 	}
362 	taskEXIT_CRITICAL( &pxQueue->mux);
363 
364 	/* A value is returned for calling semantic consistency with previous
365 	versions. */
366 	return pdPASS;
367 }
368 /*-----------------------------------------------------------*/
369 
370 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
371 
xQueueGenericCreateStatic(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,StaticQueue_t * pxStaticQueue,const uint8_t ucQueueType)372 	QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )
373 	{
374 	Queue_t *pxNewQueue;
375 
376 		configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
377 
378 		/* The StaticQueue_t structure and the queue storage area must be
379 		supplied. */
380 		configASSERT( pxStaticQueue != NULL );
381 
382 		/* A queue storage area should be provided if the item size is not 0, and
383 		should not be provided if the item size is 0. */
384 		configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );
385 		configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );
386 
387 		#if( configASSERT_DEFINED == 1 )
388 		{
389 			/* Sanity check that the size of the structure used to declare a
390 			variable of type StaticQueue_t or StaticSemaphore_t equals the size of
391 			the real queue and semaphore structures. */
392 			volatile size_t xSize = sizeof( StaticQueue_t );
393 			configASSERT( xSize == sizeof( Queue_t ) );
394 			( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */
395 		}
396 		#endif /* configASSERT_DEFINED */
397 
398 		/* The address of a statically allocated queue was passed in, use it.
399 		The address of a statically allocated storage area was also passed in
400 		but is already set. */
401 		pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
402 
403 		if( pxNewQueue != NULL )
404 		{
405 			#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
406 			{
407 				/* Queues can be allocated wither statically or dynamically, so
408 				note this queue was allocated statically in case the queue is
409 				later deleted. */
410 				pxNewQueue->ucStaticallyAllocated = pdTRUE;
411 			}
412 			#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
413 
414 			prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
415 		}
416 		else
417 		{
418 			traceQUEUE_CREATE_FAILED( ucQueueType );
419 			mtCOVERAGE_TEST_MARKER();
420 		}
421 
422 		return pxNewQueue;
423 	}
424 
425 #endif /* configSUPPORT_STATIC_ALLOCATION */
426 /*-----------------------------------------------------------*/
427 
428 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
429 
xQueueGenericCreate(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,const uint8_t ucQueueType)430 	QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
431 	{
432 	Queue_t *pxNewQueue;
433 	size_t xQueueSizeInBytes;
434 	uint8_t *pucQueueStorage;
435 
436 		configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
437 
438 		if( uxItemSize == ( UBaseType_t ) 0 )
439 		{
440 			/* There is not going to be a queue storage area. */
441 			xQueueSizeInBytes = ( size_t ) 0;
442 		}
443 		else
444 		{
445 			/* Allocate enough space to hold the maximum number of items that
446 			can be in the queue at any time. */
447 			xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
448 		}
449 
450 		/* Check for multiplication overflow. */
451 		configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
452 
453 		/* Check for addition overflow. */
454 		configASSERT( ( sizeof( Queue_t ) + xQueueSizeInBytes ) >  xQueueSizeInBytes );
455 
456 		/* Allocate the queue and storage area.  Justification for MISRA
457 		deviation as follows:  pvPortMalloc() always ensures returned memory
458 		blocks are aligned per the requirements of the MCU stack.  In this case
459 		pvPortMalloc() must return a pointer that is guaranteed to meet the
460 		alignment requirements of the Queue_t structure - which in this case
461 		is an int8_t *.  Therefore, whenever the stack alignment requirements
462 		are greater than or equal to the pointer to char requirements the cast
463 		is safe.  In other cases alignment requirements are not strict (one or
464 		two bytes). */
465 		pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
466 
467 		if( pxNewQueue != NULL )
468 		{
469 			/* Jump past the queue structure to find the location of the queue
470 			storage area. */
471 			pucQueueStorage = ( uint8_t * ) pxNewQueue;
472 			pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
473 
474 			#if( configSUPPORT_STATIC_ALLOCATION == 1 )
475 			{
476 				/* Queues can be created either statically or dynamically, so
477 				note this task was created dynamically in case it is later
478 				deleted. */
479 				pxNewQueue->ucStaticallyAllocated = pdFALSE;
480 			}
481 			#endif /* configSUPPORT_STATIC_ALLOCATION */
482 
483 			prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
484 		}
485 		else
486 		{
487 			traceQUEUE_CREATE_FAILED( ucQueueType );
488 			mtCOVERAGE_TEST_MARKER();
489 		}
490 
491 		return pxNewQueue;
492 	}
493 
494 #endif /* configSUPPORT_STATIC_ALLOCATION */
495 /*-----------------------------------------------------------*/
496 
prvInitialiseNewQueue(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,const uint8_t ucQueueType,Queue_t * pxNewQueue)497 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )
498 {
499 	/* Remove compiler warnings about unused parameters should
500 	configUSE_TRACE_FACILITY not be set to 1. */
501 	( void ) ucQueueType;
502 
503 	if( uxItemSize == ( UBaseType_t ) 0 )
504 	{
505 		/* No RAM was allocated for the queue storage area, but PC head cannot
506 		be set to NULL because NULL is used as a key to say the queue is used as
507 		a mutex.  Therefore just set pcHead to point to the queue as a benign
508 		value that is known to be within the memory map. */
509 		pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
510 	}
511 	else
512 	{
513 		/* Set the head to the start of the queue storage area. */
514 		pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
515 	}
516 
517 	/* Initialise the queue members as described where the queue type is
518 	defined. */
519 	pxNewQueue->uxLength = uxQueueLength;
520 	pxNewQueue->uxItemSize = uxItemSize;
521 	( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
522 
523 	#if ( configUSE_TRACE_FACILITY == 1 )
524 	{
525 		pxNewQueue->ucQueueType = ucQueueType;
526 	}
527 	#endif /* configUSE_TRACE_FACILITY */
528 
529 	#if( configUSE_QUEUE_SETS == 1 )
530 	{
531 		pxNewQueue->pxQueueSetContainer = NULL;
532 	}
533 	#endif /* configUSE_QUEUE_SETS */
534 
535 	traceQUEUE_CREATE( pxNewQueue );
536 }
537 /*-----------------------------------------------------------*/
538 
539 #if( configUSE_MUTEXES == 1 )
540 
prvInitialiseMutex(Queue_t * pxNewQueue)541 	static void prvInitialiseMutex( Queue_t *pxNewQueue )
542 	{
543 		if( pxNewQueue != NULL )
544 		{
545 			/* The queue create function will set all the queue structure members
546 			correctly for a generic queue, but this function is creating a
547 			mutex.  Overwrite those members that need to be set differently -
548 			in particular the information required for priority inheritance. */
549 			pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
550 			pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
551 
552 			/* In case this is a recursive mutex. */
553 			pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
554 			vPortCPUInitializeMutex(&pxNewQueue->mux);
555 
556 			traceCREATE_MUTEX( pxNewQueue );
557 
558 			/* Start with the semaphore in the expected state. */
559 			( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
560 		}
561 		else
562 		{
563 			traceCREATE_MUTEX_FAILED();
564 		}
565 	}
566 
567 #endif /* configUSE_MUTEXES */
568 /*-----------------------------------------------------------*/
569 
570 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
571 _Static_assert(sizeof(StaticSemaphore_t) >= sizeof(struct QueueDefinition),
572                "Incorrect size of StaticSemaphore_t");
573 
xQueueCreateMutex(const uint8_t ucQueueType)574 	QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
575 	{
576 	QueueHandle_t xNewQueue;
577 	const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
578 
579 		xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
580 		prvInitialiseMutex( ( Queue_t * ) xNewQueue );
581 
582 		return xNewQueue;
583 	}
584 
585 #endif /* configUSE_MUTEXES */
586 /*-----------------------------------------------------------*/
587 
588 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
589 
xQueueCreateMutexStatic(const uint8_t ucQueueType,StaticQueue_t * pxStaticQueue)590 	QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )
591 	{
592 	QueueHandle_t xNewQueue;
593 	const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
594 
595 		/* Prevent compiler warnings about unused parameters if
596 		configUSE_TRACE_FACILITY does not equal 1. */
597 		( void ) ucQueueType;
598 
599 		xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
600 		prvInitialiseMutex( ( Queue_t * ) xNewQueue );
601 
602 		return xNewQueue;
603 	}
604 
605 #endif /* configUSE_MUTEXES */
606 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
607 
xQueueGiveMutexRecursive(QueueHandle_t xMutex)608 	BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
609 	{
610 	BaseType_t xReturn;
611 	Queue_t * const pxMutex = ( Queue_t * ) xMutex;
612 
613 		configASSERT( pxMutex );
614 
615 		/* If this is the task that holds the mutex then xMutexHolder will not
616 		change outside of this task.  If this task does not hold the mutex then
617 		pxMutexHolder can never coincidentally equal the tasks handle, and as
618 		this is the only condition we are interested in it does not matter if
619 		pxMutexHolder is accessed simultaneously by another task.  Therefore no
620 		mutual exclusion is required to test the pxMutexHolder variable. */
621 		if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
622 		{
623 			traceGIVE_MUTEX_RECURSIVE( pxMutex );
624 
625 			/* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
626 			the task handle, therefore no underflow check is required.  Also,
627 			uxRecursiveCallCount is only modified by the mutex holder, and as
628 			there can only be one, no mutual exclusion is required to modify the
629 			uxRecursiveCallCount member. */
630 			( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;
631 
632 			/* Has the recursive call count unwound to 0? */
633 			if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )
634 			{
635 				/* Return the mutex.  This will automatically unblock any other
636 				task that might be waiting to access the mutex. */
637 				( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
638 			}
639 
640 			xReturn = pdPASS;
641 		}
642 		else
643 		{
644 			/* The mutex cannot be given because the calling task is not the
645 			holder. */
646 			xReturn = pdFAIL;
647 
648 			traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
649 		}
650 
651 		return xReturn;
652 	}
653 
654 #endif /* configUSE_RECURSIVE_MUTEXES */
655 /*-----------------------------------------------------------*/
656 
657 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
658 
xQueueTakeMutexRecursive(QueueHandle_t xMutex,TickType_t xTicksToWait)659 	BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
660 	{
661 	BaseType_t xReturn;
662 	Queue_t * const pxMutex = ( Queue_t * ) xMutex;
663 
664 		configASSERT( pxMutex );
665 
666 		/* Comments regarding mutual exclusion as per those within
667 		xQueueGiveMutexRecursive(). */
668 
669 		traceTAKE_MUTEX_RECURSIVE( pxMutex );
670 
671 		if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
672 		{
673 			( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
674 			xReturn = pdPASS;
675 		}
676 		else
677 		{
678 			xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
679 
680 			/* pdPASS will only be returned if the mutex was successfully
681 			obtained.  The calling task may have entered the Blocked state
682 			before reaching here. */
683 			if( xReturn != pdFAIL )
684 			{
685 				( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
686 			}
687 			else
688 			{
689 				traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
690 			}
691 		}
692 
693 		return xReturn;
694 	}
695 
696 #endif /* configUSE_RECURSIVE_MUTEXES */
697 /*-----------------------------------------------------------*/
698 
699 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
700 
xQueueCreateCountingSemaphoreStatic(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount,StaticQueue_t * pxStaticQueue)701 	QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )
702 	{
703 	QueueHandle_t xHandle;
704 
705 		configASSERT( uxMaxCount != 0 );
706 		configASSERT( uxInitialCount <= uxMaxCount );
707 
708 		xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
709 
710 		if( xHandle != NULL )
711 		{
712 			( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
713 
714 			traceCREATE_COUNTING_SEMAPHORE();
715 		}
716 		else
717 		{
718 			traceCREATE_COUNTING_SEMAPHORE_FAILED();
719 		}
720 
721 		return xHandle;
722 	}
723 
724 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
725 /*-----------------------------------------------------------*/
726 
727 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
728 
xQueueCreateCountingSemaphore(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount)729 	QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
730 	{
731 	QueueHandle_t xHandle;
732 
733 		configASSERT( uxMaxCount != 0 );
734 		configASSERT( uxInitialCount <= uxMaxCount );
735 
736 		xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
737 
738 		if( xHandle != NULL )
739 		{
740 			( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
741 
742 			traceCREATE_COUNTING_SEMAPHORE();
743 		}
744 		else
745 		{
746 			traceCREATE_COUNTING_SEMAPHORE_FAILED();
747 		}
748 
749 		return xHandle;
750 	}
751 
752 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
753 /*-----------------------------------------------------------*/
754 
xQueueGenericSend(QueueHandle_t xQueue,const void * const pvItemToQueue,TickType_t xTicksToWait,const BaseType_t xCopyPosition)755 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
756 {
757 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
758 TimeOut_t xTimeOut;
759 Queue_t * const pxQueue = xQueue;
760 
761 	configASSERT( pxQueue );
762 	configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
763 	configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
764 	#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
765 	{
766 		configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
767 	}
768 	#endif
769 
770 #if ( configUSE_MUTEXES == 1 && configCHECK_MUTEX_GIVEN_BY_OWNER == 1)
771 	configASSERT(pxQueue->uxQueueType != queueQUEUE_IS_MUTEX
772 				 || pxQueue->u.xSemaphore.xMutexHolder == NULL
773 				 || pxQueue->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle());
774 #endif
775 
776 	/*lint -save -e904 This function relaxes the coding standard somewhat to
777 	allow return statements within the function itself.  This is done in the
778 	interest of execution time efficiency. */
779 	for( ;; )
780 	{
781 		taskENTER_CRITICAL( &pxQueue->mux);
782 		{
783 			/* Is there room on the queue now?  The running task must be the
784 			highest priority task wanting to access the queue.  If the head item
785 			in the queue is to be overwritten then it does not matter if the
786 			queue is full. */
787 			if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
788 			{
789 				traceQUEUE_SEND( pxQueue );
790 
791 				#if 1
792 				{
793 					xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
794 					xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) );
795 				}
796 				#endif /* configUSE_QUEUE_SETS */
797 
798 				taskEXIT_CRITICAL( &pxQueue->mux);
799 				return pdPASS;
800 			}
801 			else
802 			{
803 				if( xTicksToWait == ( TickType_t ) 0 )
804 				{
805 					/* The queue was full and no block time is specified (or
806 					the block time has expired) so leave now. */
807 					taskEXIT_CRITICAL( &pxQueue->mux);
808 
809 					/* Return to the original privilege level before exiting
810 					the function. */
811 					traceQUEUE_SEND_FAILED( pxQueue );
812 					return errQUEUE_FULL;
813 				}
814 				else if( xEntryTimeSet == pdFALSE )
815 				{
816 					/* The queue was full and a block time was specified so
817 					configure the timeout structure. */
818 					vTaskInternalSetTimeOutState( &xTimeOut );
819 					xEntryTimeSet = pdTRUE;
820 				}
821 			}
822 		}
823 		taskEXIT_CRITICAL( &pxQueue->mux);
824 
825 		/* Interrupts and other tasks can send to and receive from the queue
826 		now the critical section has been exited. */
827 
828 		taskENTER_CRITICAL( &pxQueue->mux);
829 		prvLockQueue( pxQueue );
830 
831 		/* Update the timeout state to see if it has expired yet. */
832 		if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
833 		{
834 			if( prvIsQueueFull( pxQueue ) != pdFALSE )
835 			{
836 				traceBLOCKING_ON_QUEUE_SEND( pxQueue );
837 				vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
838 
839 				/* Unlocking the queue means queue events can effect the
840 				event list.  It is possible that interrupts occurring now
841 				remove this task from the event list again - but as the
842 				scheduler is suspended the task will go onto the pending
843 				ready last instead of the actual ready list. */
844 				prvUnlockQueue( pxQueue );
845 
846 				/* Resuming the scheduler will move tasks from the pending
847 				ready list into the ready list - so it is feasible that this
848 				task is already in a ready list before it yields - in which
849 				case the yield will not cause a context switch unless there
850 				is also a higher priority task in the pending ready list. */
851 				taskEXIT_CRITICAL( &pxQueue->mux);
852 				portYIELD_WITHIN_API();
853 
854 			}
855 			else
856 			{
857 				/* Try again. */
858 				prvUnlockQueue( pxQueue );
859 				taskEXIT_CRITICAL( &pxQueue->mux);
860 			}
861 		}
862 		else
863 		{
864 			/* The timeout has expired. */
865 			prvUnlockQueue( pxQueue );
866 			taskEXIT_CRITICAL( &pxQueue->mux);
867 
868 			traceQUEUE_SEND_FAILED( pxQueue );
869 			return errQUEUE_FULL;
870 		}
871 	} /*lint -restore */
872 }
873 /*-----------------------------------------------------------*/
874 
xQueueGenericSendFromISR(QueueHandle_t xQueue,const void * const pvItemToQueue,BaseType_t * const pxHigherPriorityTaskWoken,const BaseType_t xCopyPosition)875 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
876 {
877 BaseType_t xReturn;
878 UBaseType_t uxSavedInterruptStatus;
879 Queue_t * const pxQueue = xQueue;
880 
881 	configASSERT( pxQueue );
882 	configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
883 	configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
884 
885 	/* RTOS ports that support interrupt nesting have the concept of a maximum
886 	system call (or maximum API call) interrupt priority.  Interrupts that are
887 	above the maximum system call priority are kept permanently enabled, even
888 	when the RTOS kernel is in a critical section, but cannot make any calls to
889 	FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
890 	then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
891 	failure if a FreeRTOS API function is called from an interrupt that has been
892 	assigned a priority above the configured maximum system call priority.
893 	Only FreeRTOS functions that end in FromISR can be called from interrupts
894 	that have been assigned a priority at or (logically) below the maximum
895 	system call	interrupt priority.  FreeRTOS maintains a separate interrupt
896 	safe API to ensure interrupt entry is as fast and as simple as possible.
897 	More information (albeit Cortex-M specific) is provided on the following
898 	link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
899 	portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
900 
901 	/* Similar to xQueueGenericSend, except without blocking if there is no room
902 	in the queue.  Also don't directly wake a task that was blocked on a queue
903 	read, instead return a flag to say whether a context switch is required or
904 	not (i.e. has a task with a higher priority than us been woken by this
905 	post). */
906 	uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
907 	{
908 		taskENTER_CRITICAL_ISR(&pxQueue->mux);
909 
910 		if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
911 		{
912 			const int8_t cTxLock = pxQueue->cTxLock;
913 
914 			traceQUEUE_SEND_FROM_ISR( pxQueue );
915 
916 			/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
917 			semaphore or mutex.  That means prvCopyDataToQueue() cannot result
918 			in a task disinheriting a priority and prvCopyDataToQueue() can be
919 			called here even though the disinherit function does not check if
920 			the scheduler is suspended before accessing the ready lists. */
921 			( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
922 
923 			/* The event list is not altered if the queue is locked.  This will
924 			be done when the queue is unlocked later. */
925 			if( cTxLock == queueUNLOCKED )
926 			{
927 				#if 1
928 				if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
929 				{
930 					/* The task waiting has a higher priority so record that a
931 					context	switch is required. */
932 					if( pxHigherPriorityTaskWoken != NULL )
933 					{
934 						*pxHigherPriorityTaskWoken = pdTRUE;
935 					}
936 				}
937 				#endif /* configUSE_QUEUE_SETS */
938 			}
939 			else
940 			{
941 				/* Increment the lock count so the task that unlocks the queue
942 				knows that data was posted while it was locked. */
943 				pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
944 			}
945 
946 			xReturn = pdPASS;
947 		}
948 		else
949 		{
950 			traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
951 			xReturn = errQUEUE_FULL;
952 		}
953 
954 		taskEXIT_CRITICAL_ISR(&pxQueue->mux);
955 	}
956 	portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
957 
958 	return xReturn;
959 }
960 /*-----------------------------------------------------------*/
961 
xQueueGiveFromISR(QueueHandle_t xQueue,BaseType_t * const pxHigherPriorityTaskWoken)962 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
963 {
964 BaseType_t xReturn;
965 UBaseType_t uxSavedInterruptStatus;
966 Queue_t * const pxQueue = xQueue;
967 
968 	/* Similar to xQueueGenericSendFromISR() but used with semaphores where the
969 	item size is 0.  Don't directly wake a task that was blocked on a queue
970 	read, instead return a flag to say whether a context switch is required or
971 	not (i.e. has a task with a higher priority than us been woken by this
972 	post). */
973 
974 	configASSERT( pxQueue );
975 
976 	/* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
977 	if the item size is not 0. */
978 	configASSERT( pxQueue->uxItemSize == 0 );
979 
980 	/* Normally a mutex would not be given from an interrupt, especially if
981 	there is a mutex holder, as priority inheritance makes no sense for an
982 	interrupts, only tasks. */
983 	configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );
984 
985 	/* RTOS ports that support interrupt nesting have the concept of a maximum
986 	system call (or maximum API call) interrupt priority.  Interrupts that are
987 	above the maximum system call priority are kept permanently enabled, even
988 	when the RTOS kernel is in a critical section, but cannot make any calls to
989 	FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
990 	then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
991 	failure if a FreeRTOS API function is called from an interrupt that has been
992 	assigned a priority above the configured maximum system call priority.
993 	Only FreeRTOS functions that end in FromISR can be called from interrupts
994 	that have been assigned a priority at or (logically) below the maximum
995 	system call	interrupt priority.  FreeRTOS maintains a separate interrupt
996 	safe API to ensure interrupt entry is as fast and as simple as possible.
997 	More information (albeit Cortex-M specific) is provided on the following
998 	link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
999 	portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1000 
1001 	uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1002 	{
1003 		taskENTER_CRITICAL_ISR(&pxQueue->mux);
1004 
1005 		const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1006 
1007 		/* When the queue is used to implement a semaphore no data is ever
1008 		moved through the queue but it is still valid to see if the queue 'has
1009 		space'. */
1010 		if( uxMessagesWaiting < pxQueue->uxLength )
1011 		{
1012 			const int8_t cTxLock = pxQueue->cTxLock;
1013 
1014 			traceQUEUE_GIVE_FROM_ISR( pxQueue );
1015 
1016 			/* A task can only have an inherited priority if it is a mutex
1017 			holder - and if there is a mutex holder then the mutex cannot be
1018 			given from an ISR.  As this is the ISR version of the function it
1019 			can be assumed there is no mutex holder and no need to determine if
1020 			priority disinheritance is needed.  Simply increase the count of
1021 			messages (semaphores) available. */
1022 			pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
1023 
1024 			/* The event list is not altered if the queue is locked.  This will
1025 			be done when the queue is unlocked later. */
1026 			if( cTxLock == queueUNLOCKED )
1027 			{
1028 				#if 1
1029 				if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1030 				{
1031 					/* The task waiting has a higher priority so record that a
1032 					context	switch is required. */
1033 					if( pxHigherPriorityTaskWoken != NULL )
1034 					{
1035 						*pxHigherPriorityTaskWoken = pdTRUE;
1036 					}
1037 				}
1038 				#endif /* configUSE_QUEUE_SETS */
1039 			}
1040 			else
1041 			{
1042 				/* Increment the lock count so the task that unlocks the queue
1043 				knows that data was posted while it was locked. */
1044 				pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
1045 			}
1046 
1047 			xReturn = pdPASS;
1048 		}
1049 		else
1050 		{
1051 			traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue );
1052 			xReturn = errQUEUE_FULL;
1053 		}
1054 		taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1055 	}
1056 	portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1057 
1058 	return xReturn;
1059 }
1060 /*-----------------------------------------------------------*/
1061 
xQueueReceive(QueueHandle_t xQueue,void * const pvBuffer,TickType_t xTicksToWait)1062 BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
1063 {
1064 BaseType_t xEntryTimeSet = pdFALSE;
1065 TimeOut_t xTimeOut;
1066 Queue_t * const pxQueue = xQueue;
1067 
1068 	/* Check the pointer is not NULL. */
1069 	configASSERT( ( pxQueue ) );
1070 
1071 	/* The buffer into which data is received can only be NULL if the data size
1072 	is zero (so no data is copied into the buffer. */
1073 	configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1074 
1075 	/* Cannot block if the scheduler is suspended. */
1076 	#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1077 	{
1078 		configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1079 	}
1080 	#endif
1081 
1082 
1083 	/*lint -save -e904  This function relaxes the coding standard somewhat to
1084 	allow return statements within the function itself.  This is done in the
1085 	interest of execution time efficiency. */
1086 	for( ;; )
1087 	{
1088 		taskENTER_CRITICAL( &pxQueue->mux);
1089 		{
1090 			const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1091 
1092 			/* Is there data in the queue now?  To be running the calling task
1093 			must be the highest priority task wanting to access the queue. */
1094 			if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1095 			{
1096 				/* Data available, remove one item. */
1097 				prvCopyDataFromQueue( pxQueue, pvBuffer );
1098 				traceQUEUE_RECEIVE( pxQueue );
1099 				pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
1100 
1101 				/* There is now space in the queue, were any tasks waiting to
1102 				post to the queue?  If so, unblock the highest priority waiting
1103 				task. */
1104 				xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) );
1105 
1106 				taskEXIT_CRITICAL( &pxQueue->mux);
1107 				return pdPASS;
1108 			}
1109 			else
1110 			{
1111 				if( xTicksToWait == ( TickType_t ) 0 )
1112 				{
1113 					/* The queue was empty and no block time is specified (or
1114 					the block time has expired) so leave now. */
1115 					taskEXIT_CRITICAL( &pxQueue->mux);
1116 					traceQUEUE_RECEIVE_FAILED( pxQueue );
1117 					return errQUEUE_EMPTY;
1118 				}
1119 				else if( xEntryTimeSet == pdFALSE )
1120 				{
1121 					/* The queue was empty and a block time was specified so
1122 					configure the timeout structure. */
1123 					vTaskInternalSetTimeOutState( &xTimeOut );
1124 					xEntryTimeSet = pdTRUE;
1125 				}
1126 			}
1127 		}
1128 		taskEXIT_CRITICAL( &pxQueue->mux);
1129 
1130 		/* Interrupts and other tasks can send to and receive from the queue
1131 		now the critical section has been exited. */
1132 
1133 		taskENTER_CRITICAL( &pxQueue->mux);
1134 		prvLockQueue( pxQueue );
1135 
1136 		/* Update the timeout state to see if it has expired yet. */
1137 		if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1138 		{
1139 			/* The timeout has not expired.  If the queue is still empty place
1140 			the task on the list of tasks waiting to receive from the queue. */
1141 			if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1142 			{
1143 				traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1144 				vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1145 				prvUnlockQueue( pxQueue );
1146 				taskEXIT_CRITICAL( &pxQueue->mux);
1147 				portYIELD_WITHIN_API();
1148 			}
1149 			else
1150 			{
1151 				/* The queue contains data again.  Loop back to try and read the
1152 				data. */
1153 				prvUnlockQueue( pxQueue );
1154 				taskEXIT_CRITICAL( &pxQueue->mux);
1155 			}
1156 		}
1157 		else
1158 		{
1159 			/* Timed out.  If there is no data in the queue exit, otherwise loop
1160 			back and attempt to read the data. */
1161 			prvUnlockQueue( pxQueue );
1162 			taskEXIT_CRITICAL( &pxQueue->mux);
1163 
1164 			if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1165 			{
1166 				traceQUEUE_RECEIVE_FAILED( pxQueue );
1167 				return errQUEUE_EMPTY;
1168 			}
1169 		}
1170 	} /*lint -restore */
1171 }
1172 /*-----------------------------------------------------------*/
1173 
xQueueSemaphoreTake(QueueHandle_t xQueue,TickType_t xTicksToWait)1174 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait )
1175 {
1176 BaseType_t xEntryTimeSet = pdFALSE;
1177 TimeOut_t xTimeOut;
1178 Queue_t * const pxQueue = xQueue;
1179 
1180 #if( configUSE_MUTEXES == 1 )
1181 	BaseType_t xInheritanceOccurred = pdFALSE;
1182 #endif
1183 
1184 	/* Check the queue pointer is not NULL. */
1185 	configASSERT( ( pxQueue ) );
1186 
1187 	/* Check this really is a semaphore, in which case the item size will be
1188 	0. */
1189 	configASSERT( pxQueue->uxItemSize == 0 );
1190 
1191 	/* Cannot block if the scheduler is suspended. */
1192 	#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1193 	{
1194 		configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1195 	}
1196 	#endif
1197 
1198 
1199 	/*lint -save -e904 This function relaxes the coding standard somewhat to allow return
1200 	statements within the function itself.  This is done in the interest
1201 	of execution time efficiency. */
1202 	for( ;; )
1203 	{
1204 		taskENTER_CRITICAL( &pxQueue->mux);
1205 		{
1206 			/* Semaphores are queues with an item size of 0, and where the
1207 			number of messages in the queue is the semaphore's count value. */
1208 			const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
1209 
1210 			/* Is there data in the queue now?  To be running the calling task
1211 			must be the highest priority task wanting to access the queue. */
1212 			if( uxSemaphoreCount > ( UBaseType_t ) 0 )
1213 			{
1214 				traceQUEUE_SEMAPHORE_RECEIVE( pxQueue );
1215 
1216 				/* Semaphores are queues with a data size of zero and where the
1217 				messages waiting is the semaphore's count.  Reduce the count. */
1218 				pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1;
1219 
1220 				#if ( configUSE_MUTEXES == 1 )
1221 				{
1222 					if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1223 					{
1224 						/* Record the information required to implement
1225 						priority inheritance should it become necessary. */
1226 						pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();
1227 					}
1228 				}
1229 				#endif /* configUSE_MUTEXES */
1230 
1231 				/* Check to see if other tasks are blocked waiting to give the
1232 				semaphore, and if so, unblock the highest priority such task. */
1233 				xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) );
1234 
1235 				taskEXIT_CRITICAL( &pxQueue->mux);
1236 				return pdPASS;
1237 			}
1238 			else
1239 			{
1240 				if( xTicksToWait == ( TickType_t ) 0 )
1241 				{
1242 					/* For inheritance to have occurred there must have been an
1243 					initial timeout, and an adjusted timeout cannot become 0, as
1244 					if it were 0 the function would have exited. */
1245 					#if( configUSE_MUTEXES == 1 )
1246 					{
1247 						configASSERT( xInheritanceOccurred == pdFALSE );
1248 					}
1249 					#endif /* configUSE_MUTEXES */
1250 
1251 					/* The semaphore count was 0 and no block time is specified
1252 					(or the block time has expired) so exit now. */
1253 					taskEXIT_CRITICAL( &pxQueue->mux);
1254 					traceQUEUE_RECEIVE_FAILED( pxQueue );
1255 					return errQUEUE_EMPTY;
1256 				}
1257 				else if( xEntryTimeSet == pdFALSE )
1258 				{
1259 					/* The semaphore count was 0 and a block time was specified
1260 					so configure the timeout structure ready to block. */
1261 					vTaskInternalSetTimeOutState( &xTimeOut );
1262 					xEntryTimeSet = pdTRUE;
1263 				}
1264 			}
1265 		}
1266 		taskEXIT_CRITICAL( &pxQueue->mux);
1267 
1268 		/* Interrupts and other tasks can give to and take from the semaphore
1269 		now the critical section has been exited. */
1270 
1271 		taskENTER_CRITICAL( &pxQueue->mux);
1272 		prvLockQueue( pxQueue );
1273 
1274 		/* Update the timeout state to see if it has expired yet. */
1275 		if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1276 		{
1277 			/* A block time is specified and not expired.  If the semaphore
1278 			count is 0 then enter the Blocked state to wait for a semaphore to
1279 			become available.  As semaphores are implemented with queues the
1280 			queue being empty is equivalent to the semaphore count being 0. */
1281 			if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1282 			{
1283 				traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1284 
1285 				#if ( configUSE_MUTEXES == 1 )
1286 				{
1287 					if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) { // ���ȼ��̳�
1288 						xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
1289 					}
1290 				}
1291 				#endif
1292 
1293 				vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1294 				prvUnlockQueue( pxQueue );
1295 				taskEXIT_CRITICAL( &pxQueue->mux);
1296 				portYIELD_WITHIN_API();
1297 			}
1298 			else
1299 			{
1300 				/* There was no timeout and the semaphore count was not 0, so
1301 				attempt to take the semaphore again. */
1302 				prvUnlockQueue( pxQueue );
1303 				taskEXIT_CRITICAL( &pxQueue->mux);
1304 			}
1305 		}
1306 		else
1307 		{
1308 			/* Timed out. */
1309 			prvUnlockQueue( pxQueue );
1310 			taskEXIT_CRITICAL( &pxQueue->mux);
1311 
1312 			/* If the semaphore count is 0 exit now as the timeout has
1313 			expired.  Otherwise return to attempt to take the semaphore that is
1314 			known to be available.  As semaphores are implemented by queues the
1315 			queue being empty is equivalent to the semaphore count being 0. */
1316 			if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1317 			{
1318 				#if ( configUSE_MUTEXES == 1 )
1319 				{
1320 					/* xInheritanceOccurred could only have be set if
1321 					pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
1322 					test the mutex type again to check it is actually a mutex. */
1323 					if( xInheritanceOccurred != pdFALSE )
1324 					{
1325 						taskENTER_CRITICAL( &pxQueue->mux);
1326 						{
1327 							UBaseType_t uxHighestWaitingPriority;
1328 
1329 							// ���ȼ���ԭ
1330 							uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
1331 							vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
1332 						}
1333 						taskEXIT_CRITICAL( &pxQueue->mux);
1334 					}
1335 				}
1336 				#endif /* configUSE_MUTEXES */
1337 
1338 				traceQUEUE_RECEIVE_FAILED( pxQueue );
1339 				return errQUEUE_EMPTY;
1340 			}
1341 		}
1342 	} /*lint -restore */
1343 }
1344 /*-----------------------------------------------------------*/
1345 
xQueueReceiveFromISR(QueueHandle_t xQueue,void * const pvBuffer,BaseType_t * const pxHigherPriorityTaskWoken)1346 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
1347 {
1348 BaseType_t xReturn;
1349 UBaseType_t uxSavedInterruptStatus;
1350 Queue_t * const pxQueue = xQueue;
1351 
1352 	configASSERT( pxQueue );
1353 	configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1354 
1355 	/* RTOS ports that support interrupt nesting have the concept of a maximum
1356 	system call (or maximum API call) interrupt priority.  Interrupts that are
1357 	above the maximum system call priority are kept permanently enabled, even
1358 	when the RTOS kernel is in a critical section, but cannot make any calls to
1359 	FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1360 	then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1361 	failure if a FreeRTOS API function is called from an interrupt that has been
1362 	assigned a priority above the configured maximum system call priority.
1363 	Only FreeRTOS functions that end in FromISR can be called from interrupts
1364 	that have been assigned a priority at or (logically) below the maximum
1365 	system call	interrupt priority.  FreeRTOS maintains a separate interrupt
1366 	safe API to ensure interrupt entry is as fast and as simple as possible.
1367 	More information (albeit Cortex-M specific) is provided on the following
1368 	link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1369 	portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1370 
1371 	uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1372 	{
1373 		taskENTER_CRITICAL_ISR(&pxQueue->mux);
1374 
1375 		const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1376 
1377 		/* Cannot block in an ISR, so check there is data available. */
1378 		if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1379 		{
1380 			const int8_t cRxLock = pxQueue->cRxLock;
1381 
1382 			traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
1383 
1384 			prvCopyDataFromQueue( pxQueue, pvBuffer );
1385 			pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
1386 
1387 			/* If the queue is locked the event list will not be modified.
1388 			Instead update the lock count so the task that unlocks the queue
1389 			will know that an ISR has removed data while the queue was
1390 			locked. */
1391 			if( cRxLock == queueUNLOCKED )
1392 			{
1393 				if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1394 				{
1395 					if( pxHigherPriorityTaskWoken != NULL )
1396 					{
1397 						*pxHigherPriorityTaskWoken = pdTRUE;
1398 					}
1399 				}
1400 			}
1401 			else
1402 			{
1403 				/* Increment the lock count so the task that unlocks the queue
1404 				knows that data was removed while it was locked. */
1405 				pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
1406 			}
1407 
1408 			xReturn = pdPASS;
1409 		}
1410 		else
1411 		{
1412 			xReturn = pdFAIL;
1413 			traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
1414 		}
1415 		taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1416 	}
1417 	portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1418 
1419 	return xReturn;
1420 }
1421 /*-----------------------------------------------------------*/
uxQueueMessagesWaiting(const QueueHandle_t xQueue)1422 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
1423 {
1424 UBaseType_t uxReturn;
1425 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1426 
1427 	configASSERT( xQueue );
1428 
1429 	taskENTER_CRITICAL( &pxQueue->mux);
1430 	{
1431 		uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1432 	}
1433 	taskEXIT_CRITICAL( &pxQueue->mux);
1434 
1435 	return uxReturn;
1436 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1437 /*-----------------------------------------------------------*/
vQueueDelete(QueueHandle_t xQueue)1438 void vQueueDelete( QueueHandle_t xQueue )
1439 {
1440 Queue_t * const pxQueue = xQueue;
1441 
1442 	configASSERT( pxQueue );
1443 	traceQUEUE_DELETE( pxQueue );
1444 
1445 	#if ( configQUEUE_REGISTRY_SIZE > 0 )
1446 	{
1447 		vQueueUnregisterQueue( pxQueue );
1448 	}
1449 	#endif
1450 
1451 	#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
1452 	{
1453 		/* The queue can only have been allocated dynamically - free it
1454 		again. */
1455 		vPortFree( pxQueue );
1456 	}
1457 	#elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
1458 	{
1459 		/* The queue could have been allocated statically or dynamically, so
1460 		check before attempting to free the memory. */
1461 		if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
1462 		{
1463 			vPortFree( pxQueue );
1464 		}
1465 		else
1466 		{
1467 			mtCOVERAGE_TEST_MARKER();
1468 		}
1469 	}
1470 	#else
1471 	{
1472 		/* The queue must have been statically allocated, so is not going to be
1473 		deleted.  Avoid compiler warnings about the unused parameter. */
1474 		( void ) pxQueue;
1475 	}
1476 	#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
1477 }
1478 /*-----------------------------------------------------------*/
1479 
1480 #if( configUSE_MUTEXES == 1 )
1481 
prvGetDisinheritPriorityAfterTimeout(const Queue_t * const pxQueue)1482 	static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
1483 	{
1484 	UBaseType_t uxHighestPriorityOfWaitingTasks;
1485 
1486 		/* If a task waiting for a mutex causes the mutex holder to inherit a
1487 		priority, but the waiting task times out, then the holder should
1488 		disinherit the priority - but only down to the highest priority of any
1489 		other tasks that are waiting for the same mutex.  For this purpose,
1490 		return the priority of the highest priority task that is waiting for the
1491 		mutex. */
1492 		#if 0
1493 		if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U )
1494 		#else
1495 		if( !LOS_ListEmpty( &( pxQueue->xTasksWaitingToReceive )))
1496 		#endif
1497 		{
1498 			LosTaskCB *pxTCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(pxQueue->xTasksWaitingToReceive)));
1499 			uxHighestPriorityOfWaitingTasks = pxTCB ? pxTCB->priority : OS_TASK_PRIORITY_LOWEST;// ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) );
1500 		}
1501 		else
1502 		{
1503 			uxHighestPriorityOfWaitingTasks = OS_TASK_PRIORITY_LOWEST;
1504 		}
1505 
1506 		return uxHighestPriorityOfWaitingTasks;
1507 	}
1508 
1509 #endif /* configUSE_MUTEXES */
1510 /*-----------------------------------------------------------*/
1511 
prvCopyDataToQueue(Queue_t * const pxQueue,const void * pvItemToQueue,const BaseType_t xPosition)1512 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
1513 {
1514 BaseType_t xReturn = pdFALSE;
1515 UBaseType_t uxMessagesWaiting;
1516 
1517 	/* This function is called from a critical section. */
1518 
1519 	uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1520 
1521 	if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
1522 	{
1523 		#if ( configUSE_MUTEXES == 1 )
1524 		{
1525 			if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1526 			{
1527 				/* The mutex is no longer being held. */
1528 				if (1 == uxMessagesWaiting) {
1529 					xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
1530 				}
1531 				pxQueue->u.xSemaphore.xMutexHolder = NULL;
1532 			}
1533 		}
1534 		#endif /* configUSE_MUTEXES */
1535 	}
1536 	else if( xPosition == queueSEND_TO_BACK )
1537 	{
1538 		( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0.  Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
1539 		pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
1540 		if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
1541 		{
1542 			pxQueue->pcWriteTo = pxQueue->pcHead;
1543 		}
1544 	}
1545 	else
1546 	{
1547 		( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports.  Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes.  Assert checks null pointer only used when length is 0. */
1548 		pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
1549 		if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
1550 		{
1551 			pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
1552 		}
1553 
1554 		if( xPosition == queueOVERWRITE )
1555 		{
1556 			if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1557 			{
1558 				/* An item is not being added but overwritten, so subtract
1559 				one from the recorded number of items in the queue so when
1560 				one is added again below the number of recorded items remains
1561 				correct. */
1562 				--uxMessagesWaiting;
1563 			}
1564 		}
1565 	}
1566 
1567 	pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
1568 
1569 	return xReturn;
1570 }
1571 /*-----------------------------------------------------------*/
1572 
prvCopyDataFromQueue(Queue_t * const pxQueue,void * const pvBuffer)1573 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
1574 {
1575 	if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
1576 	{
1577 		pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
1578 		if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
1579 		{
1580 			pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
1581 		}
1582 		( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0.  Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
1583 	}
1584 }
1585 /*-----------------------------------------------------------*/
1586 
prvUnlockQueue(Queue_t * const pxQueue)1587 static void prvUnlockQueue( Queue_t * const pxQueue )
1588 {
1589 	/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
1590 
1591 	/* The lock counts contains the number of extra data items placed or
1592 	removed from the queue while the queue was locked.  When a queue is
1593 	locked items can be added or removed, but the event lists cannot be
1594 	updated. */
1595 	taskENTER_CRITICAL( &pxQueue->mux);
1596 	{
1597 		int8_t cTxLock = pxQueue->cTxLock;
1598 
1599 		/* See if data was added to the queue while it was locked. */
1600 		while( cTxLock > queueLOCKED_UNMODIFIED )
1601 		{
1602 			/* Data was posted while the queue was locked.  Are any tasks
1603 			blocked waiting for data to become available? */
1604 			#if 1
1605 			{
1606 				/* Tasks that are removed from the event list will get added to
1607 				the pending ready list as the scheduler is still suspended. */
1608 				#if 0
1609 				if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1610 				#else
1611 				if( !LOS_ListEmpty( &( pxQueue->xTasksWaitingToReceive ) ) )
1612 				#endif
1613 				{
1614 					if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1615 					{
1616 						/* The task waiting has a higher priority so record that
1617 						a context switch is required. */
1618 						vTaskMissedYield();
1619 					}
1620 				}
1621 				else
1622 				{
1623 					break;
1624 				}
1625 			}
1626 			#endif /* configUSE_QUEUE_SETS */
1627 
1628 			--cTxLock;
1629 		}
1630 
1631 		pxQueue->cTxLock = queueUNLOCKED;
1632 	}
1633 	taskEXIT_CRITICAL( &pxQueue->mux);
1634 
1635 	/* Do the same for the Rx lock. */
1636 	taskENTER_CRITICAL( &pxQueue->mux);
1637 	{
1638 		int8_t cRxLock = pxQueue->cRxLock;
1639 
1640 		while( cRxLock > queueLOCKED_UNMODIFIED )
1641 		{
1642 			#if 0
1643 			if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1644 			#else
1645 			if (!LOS_ListEmpty(&( pxQueue->xTasksWaitingToSend )))
1646 			#endif
1647 			{
1648 				if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1649 				{
1650 					vTaskMissedYield();
1651 				}
1652 
1653 				--cRxLock;
1654 			}
1655 			else
1656 			{
1657 				break;
1658 			}
1659 		}
1660 
1661 		pxQueue->cRxLock = queueUNLOCKED;
1662 	}
1663 	taskEXIT_CRITICAL( &pxQueue->mux);
1664 }
1665 /*-----------------------------------------------------------*/
1666 
prvIsQueueEmpty(const Queue_t * pxQueue)1667 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
1668 {
1669 BaseType_t xReturn;
1670 Queue_t *pxQ = (Queue_t *)pxQueue;
1671 	taskENTER_CRITICAL( &pxQ->mux );
1672 	{
1673 		if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )
1674 		{
1675 			xReturn = pdTRUE;
1676 		}
1677 		else
1678 		{
1679 			xReturn = pdFALSE;
1680 		}
1681 	}
1682 	taskEXIT_CRITICAL( &pxQ->mux );
1683 
1684 	return xReturn;
1685 }
1686 /*-----------------------------------------------------------*/
1687 
xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)1688 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
1689 {
1690 BaseType_t xReturn;
1691 Queue_t * const pxQueue = xQueue;
1692 
1693 	configASSERT( pxQueue );
1694 	if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
1695 	{
1696 		xReturn = pdTRUE;
1697 	}
1698 	else
1699 	{
1700 		xReturn = pdFALSE;
1701 	}
1702 
1703 	return xReturn;
1704 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
1705 /*-----------------------------------------------------------*/
1706 
prvIsQueueFull(const Queue_t * pxQueue)1707 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
1708 {
1709 BaseType_t xReturn;
1710 
1711 	{
1712 		if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
1713 		{
1714 			xReturn = pdTRUE;
1715 		}
1716 		else
1717 		{
1718 			xReturn = pdFALSE;
1719 		}
1720 	}
1721 
1722 	return xReturn;
1723 }
1724 /*-----------------------------------------------------------*/
1725 
xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)1726 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
1727 {
1728 BaseType_t xReturn;
1729 Queue_t * const pxQueue = xQueue;
1730 
1731 	configASSERT( pxQueue );
1732 	if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
1733 	{
1734 		xReturn = pdTRUE;
1735 	}
1736 	else
1737 	{
1738 		xReturn = pdFALSE;
1739 	}
1740 
1741 	return xReturn;
1742 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
1743 /*-----------------------------------------------------------*/
1744 
1745