1 // Copyright 2015-2019 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include <stdlib.h>
16 #include <string.h>
17 #include "esp_osal/esp_osal.h"
18 #include "esp_osal/task.h"
19 #include "esp_osal/semphr.h"
20 #include "ringbuf.h"
21
22 //32-bit alignment macros
23 #define rbALIGN_MASK (0x03)
24 #define rbALIGN_SIZE( xSize ) ( ( xSize + rbALIGN_MASK ) & ~rbALIGN_MASK )
25 #define rbCHECK_ALIGNED( pvPtr ) ( ( ( UBaseType_t ) ( pvPtr ) & rbALIGN_MASK ) == 0 )
26
27 //Ring buffer flags
28 #define rbALLOW_SPLIT_FLAG ( ( UBaseType_t ) 1 ) //The ring buffer allows items to be split
29 #define rbBYTE_BUFFER_FLAG ( ( UBaseType_t ) 2 ) //The ring buffer is a byte buffer
30 #define rbBUFFER_FULL_FLAG ( ( UBaseType_t ) 4 ) //The ring buffer is currently full (write pointer == free pointer)
31 #define rbBUFFER_STATIC_FLAG ( ( UBaseType_t ) 8 ) //The ring buffer is statically allocated
32
33 //Item flags
34 #define rbITEM_FREE_FLAG ( ( UBaseType_t ) 1 ) //Item has been retrieved and returned by application, free to overwrite
35 #define rbITEM_DUMMY_DATA_FLAG ( ( UBaseType_t ) 2 ) //Data from here to end of the ring buffer is dummy data. Restart reading at start of head of the buffer
36 #define rbITEM_SPLIT_FLAG ( ( UBaseType_t ) 4 ) //Valid for RINGBUF_TYPE_ALLOWSPLIT, indicating that rest of the data is wrapped around
37 #define rbITEM_WRITTEN_FLAG ( ( UBaseType_t ) 8 ) //Item has been written to by the application, thus it is free to be read
38
39 //Static allocation related
40 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
41 #define rbGET_TX_SEM_HANDLE( pxRingbuffer ) ( (SemaphoreHandle_t) &(pxRingbuffer->xTransSemStatic) )
42 #define rbGET_RX_SEM_HANDLE( pxRingbuffer ) ( (SemaphoreHandle_t) &(pxRingbuffer->xRecvSemStatic) )
43 #else
44 #define rbGET_TX_SEM_HANDLE( pxRingbuffer ) ( pxRingbuffer->xTransSemHandle )
45 #define rbGET_RX_SEM_HANDLE( pxRingbuffer ) ( pxRingbuffer->xRecvSemHandle )
46 #endif
47
48 typedef struct {
49 //This size of this structure must be 32-bit aligned
50 size_t xItemLen;
51 UBaseType_t uxItemFlags;
52 } ItemHeader_t;
53
54 #define rbHEADER_SIZE sizeof(ItemHeader_t)
55 typedef struct RingbufferDefinition Ringbuffer_t;
56 typedef BaseType_t (*CheckItemFitsFunction_t)(Ringbuffer_t *pxRingbuffer, size_t xItemSize);
57 typedef void (*CopyItemFunction_t)(Ringbuffer_t *pxRingbuffer, const uint8_t *pcItem, size_t xItemSize);
58 typedef BaseType_t (*CheckItemAvailFunction_t) (Ringbuffer_t *pxRingbuffer);
59 typedef void *(*GetItemFunction_t)(Ringbuffer_t *pxRingbuffer, BaseType_t *pxIsSplit, size_t xMaxSize, size_t *pxItemSize);
60 typedef void (*ReturnItemFunction_t)(Ringbuffer_t *pxRingbuffer, uint8_t *pvItem);
61 typedef size_t (*GetCurMaxSizeFunction_t)(Ringbuffer_t *pxRingbuffer);
62
63 typedef struct RingbufferDefinition {
64 size_t xSize; //Size of the data storage
65 size_t xMaxItemSize; //Maximum item size
66 UBaseType_t uxRingbufferFlags; //Flags to indicate the type and status of ring buffer
67
68 CheckItemFitsFunction_t xCheckItemFits; //Function to check if item can currently fit in ring buffer
69 CopyItemFunction_t vCopyItem; //Function to copy item to ring buffer
70 GetItemFunction_t pvGetItem; //Function to get item from ring buffer
71 ReturnItemFunction_t vReturnItem; //Function to return item to ring buffer
72 GetCurMaxSizeFunction_t xGetCurMaxSize; //Function to get current free size
73
74 uint8_t *pucAcquire; //Acquire Pointer. Points to where the next item should be acquired.
75 uint8_t *pucWrite; //Write Pointer. Points to where the next item should be written
76 uint8_t *pucRead; //Read Pointer. Points to where the next item should be read from
77 uint8_t *pucFree; //Free Pointer. Points to the last item that has yet to be returned to the ring buffer
78 uint8_t *pucHead; //Pointer to the start of the ring buffer storage area
79 uint8_t *pucTail; //Pointer to the end of the ring buffer storage area
80
81 BaseType_t xItemsWaiting; //Number of items/bytes(for byte buffers) currently in ring buffer that have not yet been read
82 /*
83 * TransSem: Binary semaphore used to indicate to a blocked transmitting tasks
84 * that more free space has become available or that the block has
85 * timed out.
86 *
87 * RecvSem: Binary semaphore used to indicate to a blocked receiving task that
88 * new data/item has been written to the ring buffer.
89 *
90 * Note - When static allocation is enabled, the two semaphores are always
91 * statically stored in the ring buffer's control structure
92 * regardless of whether the ring buffer is allocated dynamically or
93 * statically. When static allocation is disabled, the two semaphores
94 * are allocated dynamically and their handles stored instead, thus
95 * making the ring buffer's control structure slightly smaller when
96 * static allocation is disabled.
97 */
98 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
99 StaticSemaphore_t xTransSemStatic;
100 StaticSemaphore_t xRecvSemStatic;
101 #else
102 SemaphoreHandle_t xTransSemHandle;
103 SemaphoreHandle_t xRecvSemHandle;
104 #endif
105 portMUX_TYPE mux; //Spinlock required for SMP
106 } Ringbuffer_t;
107
108 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
109 #if __GNUC_PREREQ(4, 6)
110 _Static_assert(sizeof(StaticRingbuffer_t) == sizeof(Ringbuffer_t), "StaticRingbuffer_t != Ringbuffer_t");
111 #endif
112 #endif
113 /*
114 Remark: A counting semaphore for items_buffered_sem would be more logical, but counting semaphores in
115 FreeRTOS need a maximum count, and allocate more memory the larger the maximum count is. Here, we
116 would need to set the maximum to the maximum amount of times a null-byte unit first in the buffer,
117 which is quite high and so would waste a fair amount of memory.
118 */
119
120 /* --------------------------- Static Declarations -------------------------- */
121 /*
122 * WARNING: All of the following static functions (except generic functions)
123 * ARE NOT THREAD SAFE. Therefore they should only be called within a critical
124 * section (using spin locks)
125 */
126
127
128 //Initialize a ring buffer after space has been allocated for it
129 static void prvInitializeNewRingbuffer(size_t xBufferSize,
130 RingbufferType_t xBufferType,
131 Ringbuffer_t *pxNewRingbuffer,
132 uint8_t *pucRingbufferStorage);
133
134 //Calculate current amount of free space (in bytes) in the ring buffer
135 static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer);
136
137 //Checks if an item/data is currently available for retrieval
138 static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer);
139
140 //Checks if an item will currently fit in a no-split/allow-split ring buffer
141 static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
142
143 //Checks if an item will currently fit in a byte buffer
144 static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize);
145
146 //Copies an item to a no-split ring buffer. Only call this function after calling prvCheckItemFitsDefault()
147 static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
148
149 //Copies an item to a allow-split ring buffer. Only call this function after calling prvCheckItemFitsDefault()
150 static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
151
152 //Copies an item to a byte buffer. Only call this function after calling prvCheckItemFitsByteBuffer()
153 static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize);
154
155 //Retrieve item from no-split/allow-split ring buffer. *pxIsSplit is set to pdTRUE if the retrieved item is split
156 static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer,
157 BaseType_t *pxIsSplit,
158 size_t xUnusedParam,
159 size_t *pxItemSize);
160
161 //Retrieve data from byte buffer. If xMaxSize is 0, all continuous data is retrieved
162 static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer,
163 BaseType_t *pxUnusedParam,
164 size_t xMaxSize,
165 size_t *pxItemSize);
166
167 //Return an item to a split/no-split ring buffer
168 static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
169
170 //Return data to a byte buffer
171 static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem);
172
173 //Get the maximum size an item that can currently have if sent to a no-split ring buffer
174 static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer);
175
176 //Get the maximum size an item that can currently have if sent to a allow-split ring buffer
177 static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer);
178
179 //Get the maximum size an item that can currently have if sent to a byte buffer
180 static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer);
181
182 /**
183 * Generic function used to retrieve an item/data from ring buffers. If called on
184 * an allow-split buffer, and pvItem2 and xItemSize2 are not NULL, both parts of
185 * a split item will be retrieved. xMaxSize will only take effect if called on
186 * byte buffers.
187 */
188 static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
189 void **pvItem1,
190 void **pvItem2,
191 size_t *xItemSize1,
192 size_t *xItemSize2,
193 size_t xMaxSize,
194 TickType_t xTicksToWait);
195
196 //Generic function used to retrieve an item/data from ring buffers in an ISR
197 static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
198 void **pvItem1,
199 void **pvItem2,
200 size_t *xItemSize1,
201 size_t *xItemSize2,
202 size_t xMaxSize);
203
204 /* --------------------------- Static Definitions --------------------------- */
205
prvInitializeNewRingbuffer(size_t xBufferSize,RingbufferType_t xBufferType,Ringbuffer_t * pxNewRingbuffer,uint8_t * pucRingbufferStorage)206 static void prvInitializeNewRingbuffer(size_t xBufferSize,
207 RingbufferType_t xBufferType,
208 Ringbuffer_t *pxNewRingbuffer,
209 uint8_t *pucRingbufferStorage)
210 {
211 //Initialize values
212 pxNewRingbuffer->xSize = xBufferSize;
213 pxNewRingbuffer->pucHead = pucRingbufferStorage;
214 pxNewRingbuffer->pucTail = pucRingbufferStorage + xBufferSize;
215 pxNewRingbuffer->pucFree = pucRingbufferStorage;
216 pxNewRingbuffer->pucRead = pucRingbufferStorage;
217 pxNewRingbuffer->pucWrite = pucRingbufferStorage;
218 pxNewRingbuffer->pucAcquire = pucRingbufferStorage;
219 pxNewRingbuffer->xItemsWaiting = 0;
220 pxNewRingbuffer->uxRingbufferFlags = 0;
221
222 //Initialize type dependent values and function pointers
223 if (xBufferType == RINGBUF_TYPE_NOSPLIT) {
224 pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
225 pxNewRingbuffer->vCopyItem = prvCopyItemNoSplit;
226 pxNewRingbuffer->pvGetItem = prvGetItemDefault;
227 pxNewRingbuffer->vReturnItem = prvReturnItemDefault;
228 /*
229 * Worst case scenario is when the read/write/acquire/free pointers are all
230 * pointing to the halfway point of the buffer.
231 */
232 pxNewRingbuffer->xMaxItemSize = rbALIGN_SIZE(pxNewRingbuffer->xSize / 2) - rbHEADER_SIZE;
233 pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeNoSplit;
234 } else if (xBufferType == RINGBUF_TYPE_ALLOWSPLIT) {
235 pxNewRingbuffer->uxRingbufferFlags |= rbALLOW_SPLIT_FLAG;
236 pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsDefault;
237 pxNewRingbuffer->vCopyItem = prvCopyItemAllowSplit;
238 pxNewRingbuffer->pvGetItem = prvGetItemDefault;
239 pxNewRingbuffer->vReturnItem = prvReturnItemDefault;
240 //Worst case an item is split into two, incurring two headers of overhead
241 pxNewRingbuffer->xMaxItemSize = pxNewRingbuffer->xSize - (sizeof(ItemHeader_t) * 2);
242 pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeAllowSplit;
243 } else { //Byte Buffer
244 pxNewRingbuffer->uxRingbufferFlags |= rbBYTE_BUFFER_FLAG;
245 pxNewRingbuffer->xCheckItemFits = prvCheckItemFitsByteBuffer;
246 pxNewRingbuffer->vCopyItem = prvCopyItemByteBuf;
247 pxNewRingbuffer->pvGetItem = prvGetItemByteBuf;
248 pxNewRingbuffer->vReturnItem = prvReturnItemByteBuf;
249 //Byte buffers do not incur any overhead
250 pxNewRingbuffer->xMaxItemSize = pxNewRingbuffer->xSize;
251 pxNewRingbuffer->xGetCurMaxSize = prvGetCurMaxSizeByteBuf;
252 }
253 xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxNewRingbuffer));
254 vPortCPUInitializeMutex(&pxNewRingbuffer->mux);
255 }
256
prvGetFreeSize(Ringbuffer_t * pxRingbuffer)257 static size_t prvGetFreeSize(Ringbuffer_t *pxRingbuffer)
258 {
259 size_t xReturn;
260 if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
261 xReturn = 0;
262 } else {
263 BaseType_t xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
264 //Check if xFreeSize has underflowed
265 if (xFreeSize <= 0) {
266 xFreeSize += pxRingbuffer->xSize;
267 }
268 xReturn = xFreeSize;
269 }
270 configASSERT(xReturn <= pxRingbuffer->xSize);
271 return xReturn;
272 }
273
prvCheckItemFitsDefault(Ringbuffer_t * pxRingbuffer,size_t xItemSize)274 static BaseType_t prvCheckItemFitsDefault( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
275 {
276 //Check arguments and buffer state
277 configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in no-split/allow-split ring buffers
278 configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
279
280 size_t xTotalItemSize = rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE; //Rounded up aligned item size with header
281 if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
282 //Buffer is either complete empty or completely full
283 return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
284 }
285 if (pxRingbuffer->pucFree > pxRingbuffer->pucAcquire) {
286 //Free space does not wrap around
287 return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) ? pdTRUE : pdFALSE;
288 }
289 //Free space wraps around
290 if (xTotalItemSize <= pxRingbuffer->pucTail - pxRingbuffer->pucAcquire) {
291 return pdTRUE; //Item fits without wrapping around
292 }
293 //Check if item fits by wrapping
294 if (pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) {
295 //Allow split wrapping incurs an extra header
296 return (xTotalItemSize + rbHEADER_SIZE <= pxRingbuffer->xSize - (pxRingbuffer->pucAcquire - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
297 } else {
298 return (xTotalItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucHead) ? pdTRUE : pdFALSE;
299 }
300 }
301
prvCheckItemFitsByteBuffer(Ringbuffer_t * pxRingbuffer,size_t xItemSize)302 static BaseType_t prvCheckItemFitsByteBuffer( Ringbuffer_t *pxRingbuffer, size_t xItemSize)
303 {
304 //Check arguments and buffer state
305 configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check acquire pointer is within bounds
306
307 if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
308 //Buffer is either complete empty or completely full
309 return (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) ? pdFALSE : pdTRUE;
310 }
311 if (pxRingbuffer->pucFree > pxRingbuffer->pucAcquire) {
312 //Free space does not wrap around
313 return (xItemSize <= pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) ? pdTRUE : pdFALSE;
314 }
315 //Free space wraps around
316 return (xItemSize <= pxRingbuffer->xSize - (pxRingbuffer->pucAcquire - pxRingbuffer->pucFree)) ? pdTRUE : pdFALSE;
317 }
318
prvAcquireItemNoSplit(Ringbuffer_t * pxRingbuffer,size_t xItemSize)319 static uint8_t* prvAcquireItemNoSplit(Ringbuffer_t *pxRingbuffer, size_t xItemSize)
320 {
321 //Check arguments and buffer state
322 size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
323 size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
324 configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in no-split ring buffers
325 configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
326 configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
327
328 //If remaining length can't fit item, set as dummy data and wrap around
329 if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
330 ItemHeader_t *pxDummy = (ItemHeader_t *)pxRingbuffer->pucAcquire;
331 pxDummy->uxItemFlags = rbITEM_DUMMY_DATA_FLAG; //Set remaining length as dummy data
332 pxDummy->xItemLen = 0; //Dummy data should have no length
333 pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to wrap around
334 }
335
336 //Item should be guaranteed to fit at this point. Set item header and copy data
337 ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
338 pxHeader->xItemLen = xItemSize;
339 pxHeader->uxItemFlags = 0;
340
341 //hold the buffer address without touching pucWrite
342 uint8_t* item_address = pxRingbuffer->pucAcquire + rbHEADER_SIZE;
343 pxRingbuffer->pucAcquire += rbHEADER_SIZE + xAlignedItemSize; //Advance pucAcquire past header and the item to next aligned address
344
345 //After the allocation, add some padding after the buffer and correct the flags
346 //If current remaining length can't fit a header, wrap around write pointer
347 if (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire < rbHEADER_SIZE) {
348 pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Wrap around pucAcquire
349 }
350 //Check if buffer is full
351 if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
352 //Mark the buffer as full to distinguish with an empty buffer
353 pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
354 }
355 return item_address;
356 }
357
prvSendItemDoneNoSplit(Ringbuffer_t * pxRingbuffer,uint8_t * pucItem)358 static void prvSendItemDoneNoSplit(Ringbuffer_t *pxRingbuffer, uint8_t* pucItem)
359 {
360 //Check arguments and buffer state
361 configASSERT(rbCHECK_ALIGNED(pucItem));
362 configASSERT(pucItem >= pxRingbuffer->pucHead);
363 configASSERT(pucItem <= pxRingbuffer->pucTail); //Inclusive of pucTail in the case of zero length item at the very end
364
365 //Get and check header of the item
366 ItemHeader_t *pxCurHeader = (ItemHeader_t *)(pucItem - rbHEADER_SIZE);
367 configASSERT(pxCurHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
368 configASSERT((pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) == 0); //Dummy items should never have been written
369 configASSERT((pxCurHeader->uxItemFlags & rbITEM_WRITTEN_FLAG) == 0); //Indicates item has already been written before
370 pxCurHeader->uxItemFlags &= ~rbITEM_SPLIT_FLAG; //Clear wrap flag if set (not strictly necessary)
371 pxCurHeader->uxItemFlags |= rbITEM_WRITTEN_FLAG; //Mark as written
372
373 pxRingbuffer->xItemsWaiting++;
374
375 /*
376 * Items might not be written in the order they were acquired. Move the
377 * write pointer up to the next item that has not been marked as written (by
378 * written flag) or up till the acquire pointer. When advancing the write
379 * pointer, items that have already been written or items with dummy data
380 * should be skipped over
381 */
382 pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucWrite;
383 //Skip over Items that have already been written or are dummy items
384 while (((pxCurHeader->uxItemFlags & rbITEM_WRITTEN_FLAG) || (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG)) && pxRingbuffer->pucWrite != pxRingbuffer->pucAcquire) {
385 if (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
386 pxCurHeader->uxItemFlags |= rbITEM_WRITTEN_FLAG; //Mark as freed (not strictly necessary but adds redundancy)
387 pxRingbuffer->pucWrite = pxRingbuffer->pucHead; //Wrap around due to dummy data
388 } else {
389 //Item with data that has already been written, advance write pointer past this item
390 size_t xAlignedItemSize = rbALIGN_SIZE(pxCurHeader->xItemLen);
391 pxRingbuffer->pucWrite += xAlignedItemSize + rbHEADER_SIZE;
392 //Redundancy check to ensure write pointer has not overshot buffer bounds
393 configASSERT(pxRingbuffer->pucWrite <= pxRingbuffer->pucHead + pxRingbuffer->xSize);
394 }
395 //Check if pucAcquire requires wrap around
396 if ((pxRingbuffer->pucTail - pxRingbuffer->pucWrite) < rbHEADER_SIZE) {
397 pxRingbuffer->pucWrite = pxRingbuffer->pucHead;
398 }
399 pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucWrite; //Update header to point to item
400 }
401 }
402
prvCopyItemNoSplit(Ringbuffer_t * pxRingbuffer,const uint8_t * pucItem,size_t xItemSize)403 static void prvCopyItemNoSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
404 {
405 uint8_t* item_addr = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
406 memcpy(item_addr, pucItem, xItemSize);
407 prvSendItemDoneNoSplit(pxRingbuffer, item_addr);
408 }
409
prvCopyItemAllowSplit(Ringbuffer_t * pxRingbuffer,const uint8_t * pucItem,size_t xItemSize)410 static void prvCopyItemAllowSplit(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
411 {
412 //Check arguments and buffer state
413 size_t xAlignedItemSize = rbALIGN_SIZE(xItemSize); //Rounded up aligned item size
414 size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
415 configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucAcquire)); //pucAcquire is always aligned in split ring buffers
416 configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check write pointer is within bounds
417 configASSERT(xRemLen >= rbHEADER_SIZE); //Remaining length must be able to at least fit an item header
418
419 //Split item if necessary
420 if (xRemLen < xAlignedItemSize + rbHEADER_SIZE) {
421 //Write first part of the item
422 ItemHeader_t *pxFirstHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
423 pxFirstHeader->uxItemFlags = 0;
424 pxFirstHeader->xItemLen = xRemLen - rbHEADER_SIZE; //Fill remaining length with first part
425 pxRingbuffer->pucAcquire += rbHEADER_SIZE; //Advance pucAcquire past header
426 xRemLen -= rbHEADER_SIZE;
427 if (xRemLen > 0) {
428 memcpy(pxRingbuffer->pucAcquire, pucItem, xRemLen);
429 pxRingbuffer->xItemsWaiting++;
430 //Update item arguments to account for data already copied
431 pucItem += xRemLen;
432 xItemSize -= xRemLen;
433 xAlignedItemSize -= xRemLen;
434 pxFirstHeader->uxItemFlags |= rbITEM_SPLIT_FLAG; //There must be more data
435 } else {
436 //Remaining length was only large enough to fit header
437 pxFirstHeader->uxItemFlags |= rbITEM_DUMMY_DATA_FLAG; //Item will completely be stored in 2nd part
438 }
439 pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to start of buffer
440 }
441
442 //Item (whole or second part) should be guaranteed to fit at this point
443 ItemHeader_t *pxSecondHeader = (ItemHeader_t *)pxRingbuffer->pucAcquire;
444 pxSecondHeader->xItemLen = xItemSize;
445 pxSecondHeader->uxItemFlags = 0;
446 pxRingbuffer->pucAcquire += rbHEADER_SIZE; //Advance acquire pointer past header
447 memcpy(pxRingbuffer->pucAcquire, pucItem, xItemSize);
448 pxRingbuffer->xItemsWaiting++;
449 pxRingbuffer->pucAcquire += xAlignedItemSize; //Advance pucAcquire past item to next aligned address
450
451 //If current remaining length can't fit a header, wrap around write pointer
452 if (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire < rbHEADER_SIZE) {
453 pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Wrap around pucAcquire
454 }
455 //Check if buffer is full
456 if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
457 //Mark the buffer as full to distinguish with an empty buffer
458 pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG;
459 }
460
461 //currently the Split mode is not supported, pucWrite tracks the pucAcquire
462 pxRingbuffer->pucWrite = pxRingbuffer->pucAcquire;
463 }
464
prvCopyItemByteBuf(Ringbuffer_t * pxRingbuffer,const uint8_t * pucItem,size_t xItemSize)465 static void prvCopyItemByteBuf(Ringbuffer_t *pxRingbuffer, const uint8_t *pucItem, size_t xItemSize)
466 {
467 //Check arguments and buffer state
468 configASSERT(pxRingbuffer->pucAcquire >= pxRingbuffer->pucHead && pxRingbuffer->pucAcquire < pxRingbuffer->pucTail); //Check acquire pointer is within bounds
469
470 size_t xRemLen = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire; //Length from pucAcquire until end of buffer
471 if (xRemLen < xItemSize) {
472 //Copy as much as possible into remaining length
473 memcpy(pxRingbuffer->pucAcquire, pucItem, xRemLen);
474 pxRingbuffer->xItemsWaiting += xRemLen;
475 //Update item arguments to account for data already written
476 pucItem += xRemLen;
477 xItemSize -= xRemLen;
478 pxRingbuffer->pucAcquire = pxRingbuffer->pucHead; //Reset acquire pointer to start of buffer
479 }
480 //Copy all or remaining portion of the item
481 memcpy(pxRingbuffer->pucAcquire, pucItem, xItemSize);
482 pxRingbuffer->xItemsWaiting += xItemSize;
483 pxRingbuffer->pucAcquire += xItemSize;
484
485 //Wrap around pucAcquire if it reaches the end
486 if (pxRingbuffer->pucAcquire == pxRingbuffer->pucTail) {
487 pxRingbuffer->pucAcquire = pxRingbuffer->pucHead;
488 }
489 //Check if buffer is full
490 if (pxRingbuffer->pucAcquire == pxRingbuffer->pucFree) {
491 pxRingbuffer->uxRingbufferFlags |= rbBUFFER_FULL_FLAG; //Mark the buffer as full to avoid confusion with an empty buffer
492 }
493
494 //Currently, acquiring memory is not supported in byte mode. pucWrite tracks the pucAcquire.
495 pxRingbuffer->pucWrite = pxRingbuffer->pucAcquire;
496 }
497
prvCheckItemAvail(Ringbuffer_t * pxRingbuffer)498 static BaseType_t prvCheckItemAvail(Ringbuffer_t *pxRingbuffer)
499 {
500 if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && pxRingbuffer->pucRead != pxRingbuffer->pucFree) {
501 return pdFALSE; //Byte buffers do not allow multiple retrievals before return
502 }
503 if ((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))) {
504 return pdTRUE; //Items/data available for retrieval
505 } else {
506 return pdFALSE; //No items/data available for retrieval
507 }
508 }
509
prvGetItemDefault(Ringbuffer_t * pxRingbuffer,BaseType_t * pxIsSplit,size_t xUnusedParam,size_t * pxItemSize)510 static void *prvGetItemDefault(Ringbuffer_t *pxRingbuffer,
511 BaseType_t *pxIsSplit,
512 size_t xUnusedParam,
513 size_t *pxItemSize)
514 {
515 //Check arguments and buffer state
516 ItemHeader_t *pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
517 configASSERT(pxIsSplit != NULL);
518 configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
519 configASSERT(rbCHECK_ALIGNED(pxRingbuffer->pucRead)); //pucRead is always aligned in split ring buffers
520 configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
521 configASSERT((pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize) || (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG));
522
523 uint8_t *pcReturn;
524 //Wrap around if dummy data (dummy data indicates wrap around in no-split buffers)
525 if (pxHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
526 pxRingbuffer->pucRead = pxRingbuffer->pucHead;
527 //Check for errors with the next item
528 pxHeader = (ItemHeader_t *)pxRingbuffer->pucRead;
529 configASSERT(pxHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
530 }
531 pcReturn = pxRingbuffer->pucRead + rbHEADER_SIZE; //Get pointer to part of item containing data (point past the header)
532 if (pxHeader->xItemLen == 0) {
533 //Inclusive of pucTail for special case where item of zero length just fits at the end of the buffer
534 configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn <= pxRingbuffer->pucTail);
535 } else {
536 //Exclusive of pucTali if length is larger than zero, pcReturn should never point to pucTail
537 configASSERT(pcReturn >= pxRingbuffer->pucHead && pcReturn < pxRingbuffer->pucTail);
538 }
539 *pxItemSize = pxHeader->xItemLen; //Get length of item
540 pxRingbuffer->xItemsWaiting --; //Update item count
541 *pxIsSplit = (pxHeader->uxItemFlags & rbITEM_SPLIT_FLAG) ? pdTRUE : pdFALSE;
542
543 pxRingbuffer->pucRead += rbHEADER_SIZE + rbALIGN_SIZE(pxHeader->xItemLen); //Update pucRead
544 //Check if pucRead requires wrap around
545 if ((pxRingbuffer->pucTail - pxRingbuffer->pucRead) < rbHEADER_SIZE) {
546 pxRingbuffer->pucRead = pxRingbuffer->pucHead;
547 }
548 return (void *)pcReturn;
549 }
550
prvGetItemByteBuf(Ringbuffer_t * pxRingbuffer,BaseType_t * pxUnusedParam,size_t xMaxSize,size_t * pxItemSize)551 static void *prvGetItemByteBuf(Ringbuffer_t *pxRingbuffer,
552 BaseType_t *pxUnusedParam,
553 size_t xMaxSize,
554 size_t *pxItemSize)
555 {
556 //Check arguments and buffer state
557 configASSERT((pxRingbuffer->xItemsWaiting > 0) && ((pxRingbuffer->pucRead != pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG))); //Check there are items to be read
558 configASSERT(pxRingbuffer->pucRead >= pxRingbuffer->pucHead && pxRingbuffer->pucRead < pxRingbuffer->pucTail); //Check read pointer is within bounds
559 configASSERT(pxRingbuffer->pucRead == pxRingbuffer->pucFree);
560
561 uint8_t *ret = pxRingbuffer->pucRead;
562 if ((pxRingbuffer->pucRead > pxRingbuffer->pucWrite) || (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG)) { //Available data wraps around
563 //Return contiguous piece from read pointer until buffer tail, or xMaxSize
564 if (xMaxSize == 0 || pxRingbuffer->pucTail - pxRingbuffer->pucRead <= xMaxSize) {
565 //All contiguous data from read pointer to tail
566 *pxItemSize = pxRingbuffer->pucTail - pxRingbuffer->pucRead;
567 pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucTail - pxRingbuffer->pucRead;
568 pxRingbuffer->pucRead = pxRingbuffer->pucHead; //Wrap around read pointer
569 } else {
570 //Return xMaxSize amount of data
571 *pxItemSize = xMaxSize;
572 pxRingbuffer->xItemsWaiting -= xMaxSize;
573 pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
574 }
575 } else { //Available data is contiguous between read and write pointer
576 if (xMaxSize == 0 || pxRingbuffer->pucWrite - pxRingbuffer->pucRead <= xMaxSize) {
577 //Return all contiguous data from read to write pointer
578 *pxItemSize = pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
579 pxRingbuffer->xItemsWaiting -= pxRingbuffer->pucWrite - pxRingbuffer->pucRead;
580 pxRingbuffer->pucRead = pxRingbuffer->pucWrite;
581 } else {
582 //Return xMaxSize data from read pointer
583 *pxItemSize = xMaxSize;
584 pxRingbuffer->xItemsWaiting -= xMaxSize;
585 pxRingbuffer->pucRead += xMaxSize; //Advance read pointer past retrieved data
586
587 }
588 }
589 return (void *)ret;
590 }
591
prvReturnItemDefault(Ringbuffer_t * pxRingbuffer,uint8_t * pucItem)592 static void prvReturnItemDefault(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
593 {
594 //Check arguments and buffer state
595 configASSERT(rbCHECK_ALIGNED(pucItem));
596 configASSERT(pucItem >= pxRingbuffer->pucHead);
597 configASSERT(pucItem <= pxRingbuffer->pucTail); //Inclusive of pucTail in the case of zero length item at the very end
598
599 //Get and check header of the item
600 ItemHeader_t *pxCurHeader = (ItemHeader_t *)(pucItem - rbHEADER_SIZE);
601 configASSERT(pxCurHeader->xItemLen <= pxRingbuffer->xMaxItemSize);
602 configASSERT((pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) == 0); //Dummy items should never have been read
603 configASSERT((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) == 0); //Indicates item has already been returned before
604 pxCurHeader->uxItemFlags &= ~rbITEM_SPLIT_FLAG; //Clear wrap flag if set (not strictly necessary)
605 pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as free
606
607 /*
608 * Items might not be returned in the order they were retrieved. Move the free pointer
609 * up to the next item that has not been marked as free (by free flag) or up
610 * till the read pointer. When advancing the free pointer, items that have already been
611 * freed or items with dummy data should be skipped over
612 */
613 pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree;
614 //Skip over Items that have already been freed or are dummy items
615 while (((pxCurHeader->uxItemFlags & rbITEM_FREE_FLAG) || (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG)) && pxRingbuffer->pucFree != pxRingbuffer->pucRead) {
616 if (pxCurHeader->uxItemFlags & rbITEM_DUMMY_DATA_FLAG) {
617 pxCurHeader->uxItemFlags |= rbITEM_FREE_FLAG; //Mark as freed (not strictly necessary but adds redundancy)
618 pxRingbuffer->pucFree = pxRingbuffer->pucHead; //Wrap around due to dummy data
619 } else {
620 //Item with data that has already been freed, advance free pointer past this item
621 size_t xAlignedItemSize = rbALIGN_SIZE(pxCurHeader->xItemLen);
622 pxRingbuffer->pucFree += xAlignedItemSize + rbHEADER_SIZE;
623 //Redundancy check to ensure free pointer has not overshot buffer bounds
624 configASSERT(pxRingbuffer->pucFree <= pxRingbuffer->pucHead + pxRingbuffer->xSize);
625 }
626 //Check if pucRead requires wrap around
627 if ((pxRingbuffer->pucTail - pxRingbuffer->pucFree) < rbHEADER_SIZE) {
628 pxRingbuffer->pucFree = pxRingbuffer->pucHead;
629 }
630 pxCurHeader = (ItemHeader_t *)pxRingbuffer->pucFree; //Update header to point to item
631 }
632
633 //Check if the buffer full flag should be reset
634 if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
635 if (pxRingbuffer->pucFree != pxRingbuffer->pucAcquire) {
636 pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
637 } else if (pxRingbuffer->pucFree == pxRingbuffer->pucAcquire && pxRingbuffer->pucFree == pxRingbuffer->pucRead) {
638 //Special case where a full buffer is completely freed in one go
639 pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
640 }
641 }
642 }
643
prvReturnItemByteBuf(Ringbuffer_t * pxRingbuffer,uint8_t * pucItem)644 static void prvReturnItemByteBuf(Ringbuffer_t *pxRingbuffer, uint8_t *pucItem)
645 {
646 //Check pointer points to address inside buffer
647 configASSERT((uint8_t *)pucItem >= pxRingbuffer->pucHead);
648 configASSERT((uint8_t *)pucItem < pxRingbuffer->pucTail);
649 //Free the read memory. Simply moves free pointer to read pointer as byte buffers do not allow multiple outstanding reads
650 pxRingbuffer->pucFree = pxRingbuffer->pucRead;
651 //If buffer was full before, reset full flag as free pointer has moved
652 if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
653 pxRingbuffer->uxRingbufferFlags &= ~rbBUFFER_FULL_FLAG;
654 }
655 }
656
prvGetCurMaxSizeNoSplit(Ringbuffer_t * pxRingbuffer)657 static size_t prvGetCurMaxSizeNoSplit(Ringbuffer_t *pxRingbuffer)
658 {
659 BaseType_t xFreeSize;
660 //Check if buffer is full
661 if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
662 return 0;
663 }
664 if (pxRingbuffer->pucAcquire < pxRingbuffer->pucFree) {
665 //Free space is contiguous between pucAcquire and pucFree
666 xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
667 } else {
668 //Free space wraps around (or overlapped at pucHead), select largest
669 //contiguous free space as no-split items require contiguous space
670 size_t xSize1 = pxRingbuffer->pucTail - pxRingbuffer->pucAcquire;
671 size_t xSize2 = pxRingbuffer->pucFree - pxRingbuffer->pucHead;
672 xFreeSize = (xSize1 > xSize2) ? xSize1 : xSize2;
673 }
674
675 //No-split ring buffer items need space for a header
676 xFreeSize -= rbHEADER_SIZE;
677 //Limit free size to be within bounds
678 if (xFreeSize > pxRingbuffer->xMaxItemSize) {
679 xFreeSize = pxRingbuffer->xMaxItemSize;
680 } else if (xFreeSize < 0) {
681 //Occurs when free space is less than header size
682 xFreeSize = 0;
683 }
684 return xFreeSize;
685 }
686
prvGetCurMaxSizeAllowSplit(Ringbuffer_t * pxRingbuffer)687 static size_t prvGetCurMaxSizeAllowSplit(Ringbuffer_t *pxRingbuffer)
688 {
689 BaseType_t xFreeSize;
690 //Check if buffer is full
691 if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
692 return 0;
693 }
694 if (pxRingbuffer->pucAcquire == pxRingbuffer->pucHead && pxRingbuffer->pucFree == pxRingbuffer->pucHead) {
695 //Check for special case where pucAcquire and pucFree are both at pucHead
696 xFreeSize = pxRingbuffer->xSize - rbHEADER_SIZE;
697 } else if (pxRingbuffer->pucAcquire < pxRingbuffer->pucFree) {
698 //Free space is contiguous between pucAcquire and pucFree, requires single header
699 xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucAcquire) - rbHEADER_SIZE;
700 } else {
701 //Free space wraps around, requires two headers
702 xFreeSize = (pxRingbuffer->pucFree - pxRingbuffer->pucHead) +
703 (pxRingbuffer->pucTail - pxRingbuffer->pucAcquire) -
704 (rbHEADER_SIZE * 2);
705 }
706
707 //Limit free size to be within bounds
708 if (xFreeSize > pxRingbuffer->xMaxItemSize) {
709 xFreeSize = pxRingbuffer->xMaxItemSize;
710 } else if (xFreeSize < 0) {
711 xFreeSize = 0;
712 }
713 return xFreeSize;
714 }
715
prvGetCurMaxSizeByteBuf(Ringbuffer_t * pxRingbuffer)716 static size_t prvGetCurMaxSizeByteBuf(Ringbuffer_t *pxRingbuffer)
717 {
718 BaseType_t xFreeSize;
719 //Check if buffer is full
720 if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_FULL_FLAG) {
721 return 0;
722 }
723
724 /*
725 * Return whatever space is available depending on relative positions of the free
726 * pointer and Acquire pointer. There is no overhead of headers in this mode
727 */
728 xFreeSize = pxRingbuffer->pucFree - pxRingbuffer->pucAcquire;
729 if (xFreeSize <= 0) {
730 xFreeSize += pxRingbuffer->xSize;
731 }
732 return xFreeSize;
733 }
734
prvReceiveGeneric(Ringbuffer_t * pxRingbuffer,void ** pvItem1,void ** pvItem2,size_t * xItemSize1,size_t * xItemSize2,size_t xMaxSize,TickType_t xTicksToWait)735 static BaseType_t prvReceiveGeneric(Ringbuffer_t *pxRingbuffer,
736 void **pvItem1,
737 void **pvItem2,
738 size_t *xItemSize1,
739 size_t *xItemSize2,
740 size_t xMaxSize,
741 TickType_t xTicksToWait)
742 {
743 BaseType_t xReturn = pdFALSE;
744 BaseType_t xReturnSemaphore = pdFALSE;
745 TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
746 TickType_t xTicksRemaining = xTicksToWait;
747 while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
748 //Block until more free space becomes available or timeout
749 if (xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
750 xReturn = pdFALSE; //Timed out attempting to get semaphore
751 break;
752 }
753
754 //Semaphore obtained, check if item can be retrieved
755 portENTER_CRITICAL(&pxRingbuffer->mux);
756 if (prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
757 //Item is available for retrieval
758 BaseType_t xIsSplit;
759 if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
760 //Second argument (pxIsSplit) is unused for byte buffers
761 *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
762 } else {
763 //Third argument (xMaxSize) is unused for no-split/allow-split buffers
764 *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
765 }
766 //Check for item split if configured to do so
767 if ((pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) && (pvItem2 != NULL) && (xItemSize2 != NULL)) {
768 if (xIsSplit == pdTRUE) {
769 *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
770 configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
771 configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
772 } else {
773 *pvItem2 = NULL;
774 }
775 }
776 xReturn = pdTRUE;
777 if (pxRingbuffer->xItemsWaiting > 0) {
778 xReturnSemaphore = pdTRUE;
779 }
780 portEXIT_CRITICAL(&pxRingbuffer->mux);
781 break;
782 }
783 //No item available for retrieval, adjust ticks and take the semaphore again
784 if (xTicksToWait != portMAX_DELAY) {
785 xTicksRemaining = xTicksEnd - xTaskGetTickCount();
786 }
787 portEXIT_CRITICAL(&pxRingbuffer->mux);
788 /*
789 * Gap between critical section and re-acquiring of the semaphore. If
790 * semaphore is given now, priority inversion might occur (see docs)
791 */
792 }
793
794 if (xReturnSemaphore == pdTRUE) {
795 xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer)); //Give semaphore back so other tasks can retrieve
796 }
797 return xReturn;
798 }
799
prvReceiveGenericFromISR(Ringbuffer_t * pxRingbuffer,void ** pvItem1,void ** pvItem2,size_t * xItemSize1,size_t * xItemSize2,size_t xMaxSize)800 static BaseType_t prvReceiveGenericFromISR(Ringbuffer_t *pxRingbuffer,
801 void **pvItem1,
802 void **pvItem2,
803 size_t *xItemSize1,
804 size_t *xItemSize2,
805 size_t xMaxSize)
806 {
807 BaseType_t xReturn = pdFALSE;
808 BaseType_t xReturnSemaphore = pdFALSE;
809
810 portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
811 if(prvCheckItemAvail(pxRingbuffer) == pdTRUE) {
812 BaseType_t xIsSplit;
813 if (pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) {
814 //Second argument (pxIsSplit) is unused for byte buffers
815 *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, NULL, xMaxSize, xItemSize1);
816 } else {
817 //Third argument (xMaxSize) is unused for no-split/allow-split buffers
818 *pvItem1 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize1);
819 }
820 //Check for item split if configured to do so
821 if ((pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG) && pvItem2 != NULL && xItemSize2 != NULL) {
822 if (xIsSplit == pdTRUE) {
823 *pvItem2 = pxRingbuffer->pvGetItem(pxRingbuffer, &xIsSplit, 0, xItemSize2);
824 configASSERT(*pvItem2 < *pvItem1); //Check wrap around has occurred
825 configASSERT(xIsSplit == pdFALSE); //Second part should not have wrapped flag
826 } else {
827 *pvItem2 = NULL;
828 }
829 }
830 xReturn = pdTRUE;
831 if (pxRingbuffer->xItemsWaiting > 0) {
832 xReturnSemaphore = pdTRUE;
833 }
834 }
835 portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
836
837 if (xReturnSemaphore == pdTRUE) {
838 xSemaphoreGiveFromISR(rbGET_RX_SEM_HANDLE(pxRingbuffer), NULL); //Give semaphore back so other tasks can retrieve
839 }
840 return xReturn;
841 }
842
843 /* --------------------------- Public Definitions --------------------------- */
844
xRingbufferCreate(size_t xBufferSize,RingbufferType_t xBufferType)845 RingbufHandle_t xRingbufferCreate(size_t xBufferSize, RingbufferType_t xBufferType)
846 {
847 configASSERT(xBufferSize > 0);
848 configASSERT(xBufferType < RINGBUF_TYPE_MAX);
849
850 //Allocate memory
851 if (xBufferType != RINGBUF_TYPE_BYTEBUF) {
852 xBufferSize = rbALIGN_SIZE(xBufferSize); //xBufferSize is rounded up for no-split/allow-split buffers
853 }
854 Ringbuffer_t *pxNewRingbuffer = calloc(1, sizeof(Ringbuffer_t));
855 uint8_t *pucRingbufferStorage = malloc(xBufferSize);
856 if (pxNewRingbuffer == NULL || pucRingbufferStorage == NULL) {
857 goto err;
858 }
859
860 //Initialize Semaphores
861 #if ( configSUPPORT_STATIC_ALLOCATION == 1)
862 //We don't use the handles for static semaphores, and xSemaphoreCreateBinaryStatic will never fail thus no need to check static case
863 xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xTransSemStatic));
864 xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xRecvSemStatic));
865 #else
866 pxNewRingbuffer->xTransSemHandle = xSemaphoreCreateBinary();
867 pxNewRingbuffer->xRecvSemHandle = xSemaphoreCreateBinary();
868 if (pxNewRingbuffer->xTransSemHandle == NULL || pxNewRingbuffer->xRecvSemHandle == NULL) {
869 if (pxNewRingbuffer->xTransSemHandle != NULL) {
870 vSemaphoreDelete(pxNewRingbuffer->xTransSemHandle);
871 }
872 if (pxNewRingbuffer->xRecvSemHandle != NULL) {
873 vSemaphoreDelete(pxNewRingbuffer->xRecvSemHandle);
874 }
875 goto err;
876 }
877 #endif
878
879 prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
880 return (RingbufHandle_t)pxNewRingbuffer;
881
882 err:
883 //An error has occurred, Free memory and return NULL
884 free(pxNewRingbuffer);
885 free(pucRingbufferStorage);
886 return NULL;
887 }
888
xRingbufferCreateNoSplit(size_t xItemSize,size_t xItemNum)889 RingbufHandle_t xRingbufferCreateNoSplit(size_t xItemSize, size_t xItemNum)
890 {
891 return xRingbufferCreate((rbALIGN_SIZE(xItemSize) + rbHEADER_SIZE) * xItemNum, RINGBUF_TYPE_NOSPLIT);
892 }
893
894 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
xRingbufferCreateStatic(size_t xBufferSize,RingbufferType_t xBufferType,uint8_t * pucRingbufferStorage,StaticRingbuffer_t * pxStaticRingbuffer)895 RingbufHandle_t xRingbufferCreateStatic(size_t xBufferSize,
896 RingbufferType_t xBufferType,
897 uint8_t *pucRingbufferStorage,
898 StaticRingbuffer_t *pxStaticRingbuffer)
899 {
900 //Check arguments
901 configASSERT(xBufferSize > 0);
902 configASSERT(xBufferType < RINGBUF_TYPE_MAX);
903 configASSERT(pucRingbufferStorage != NULL && pxStaticRingbuffer != NULL);
904 if (xBufferType != RINGBUF_TYPE_BYTEBUF) {
905 //No-split/allow-split buffer sizes must be 32-bit aligned
906 configASSERT(rbCHECK_ALIGNED(xBufferSize));
907 }
908
909 Ringbuffer_t *pxNewRingbuffer = (Ringbuffer_t *)pxStaticRingbuffer;
910 xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xTransSemStatic));
911 xSemaphoreCreateBinaryStatic(&(pxNewRingbuffer->xRecvSemStatic));
912 prvInitializeNewRingbuffer(xBufferSize, xBufferType, pxNewRingbuffer, pucRingbufferStorage);
913 pxNewRingbuffer->uxRingbufferFlags |= rbBUFFER_STATIC_FLAG;
914 return (RingbufHandle_t)pxNewRingbuffer;
915 }
916 #endif
917
xRingbufferSendAcquire(RingbufHandle_t xRingbuffer,void ** ppvItem,size_t xItemSize,TickType_t xTicksToWait)918 BaseType_t xRingbufferSendAcquire(RingbufHandle_t xRingbuffer, void **ppvItem, size_t xItemSize, TickType_t xTicksToWait)
919 {
920 //Check arguments
921 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
922 configASSERT(pxRingbuffer);
923 configASSERT(ppvItem != NULL || xItemSize == 0);
924 //currently only supported in NoSplit buffers
925 configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
926
927 *ppvItem = NULL;
928 if (xItemSize > pxRingbuffer->xMaxItemSize) {
929 return pdFALSE; //Data will never ever fit in the queue.
930 }
931 if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
932 return pdTRUE; //Sending 0 bytes to byte buffer has no effect
933 }
934
935 //Attempt to send an item
936 BaseType_t xReturn = pdFALSE;
937 BaseType_t xReturnSemaphore = pdFALSE;
938 TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
939 TickType_t xTicksRemaining = xTicksToWait;
940 while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
941 //Block until more free space becomes available or timeout
942 if (xSemaphoreTake(rbGET_TX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
943 xReturn = pdFALSE;
944 break;
945 }
946
947 //Semaphore obtained, check if item can fit
948 portENTER_CRITICAL(&pxRingbuffer->mux);
949 if(pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
950 //Item will fit, copy item
951 *ppvItem = prvAcquireItemNoSplit(pxRingbuffer, xItemSize);
952 xReturn = pdTRUE;
953 //Check if the free semaphore should be returned to allow other tasks to send
954 if (prvGetFreeSize(pxRingbuffer) > 0) {
955 xReturnSemaphore = pdTRUE;
956 }
957 portEXIT_CRITICAL(&pxRingbuffer->mux);
958 break;
959 }
960 //Item doesn't fit, adjust ticks and take the semaphore again
961 if (xTicksToWait != portMAX_DELAY) {
962 xTicksRemaining = xTicksEnd - xTaskGetTickCount();
963 }
964 portEXIT_CRITICAL(&pxRingbuffer->mux);
965 /*
966 * Gap between critical section and re-acquiring of the semaphore. If
967 * semaphore is given now, priority inversion might occur (see docs)
968 */
969 }
970
971 if (xReturnSemaphore == pdTRUE) {
972 xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer)); //Give back semaphore so other tasks can acquire
973 }
974 return xReturn;
975 }
976
xRingbufferSendComplete(RingbufHandle_t xRingbuffer,void * pvItem)977 BaseType_t xRingbufferSendComplete(RingbufHandle_t xRingbuffer, void *pvItem)
978 {
979 //Check arguments
980 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
981 configASSERT(pxRingbuffer);
982 configASSERT(pvItem != NULL);
983 configASSERT((pxRingbuffer->uxRingbufferFlags & (rbBYTE_BUFFER_FLAG | rbALLOW_SPLIT_FLAG)) == 0);
984
985 portENTER_CRITICAL(&pxRingbuffer->mux);
986 prvSendItemDoneNoSplit(pxRingbuffer, pvItem);
987 portEXIT_CRITICAL(&pxRingbuffer->mux);
988
989 xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer));
990 return pdTRUE;
991 }
992
xRingbufferSend(RingbufHandle_t xRingbuffer,const void * pvItem,size_t xItemSize,TickType_t xTicksToWait)993 BaseType_t xRingbufferSend(RingbufHandle_t xRingbuffer,
994 const void *pvItem,
995 size_t xItemSize,
996 TickType_t xTicksToWait)
997 {
998 //Check arguments
999 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1000 configASSERT(pxRingbuffer);
1001 configASSERT(pvItem != NULL || xItemSize == 0);
1002 if (xItemSize > pxRingbuffer->xMaxItemSize) {
1003 return pdFALSE; //Data will never ever fit in the queue.
1004 }
1005 if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
1006 return pdTRUE; //Sending 0 bytes to byte buffer has no effect
1007 }
1008
1009 //Attempt to send an item
1010 BaseType_t xReturn = pdFALSE;
1011 BaseType_t xReturnSemaphore = pdFALSE;
1012 TickType_t xTicksEnd = xTaskGetTickCount() + xTicksToWait;
1013 TickType_t xTicksRemaining = xTicksToWait;
1014 while (xTicksRemaining <= xTicksToWait) { //xTicksToWait will underflow once xTaskGetTickCount() > ticks_end
1015 //Block until more free space becomes available or timeout
1016 if (xSemaphoreTake(rbGET_TX_SEM_HANDLE(pxRingbuffer), xTicksRemaining) != pdTRUE) {
1017 xReturn = pdFALSE;
1018 break;
1019 }
1020 //Semaphore obtained, check if item can fit
1021 portENTER_CRITICAL(&pxRingbuffer->mux);
1022 if(pxRingbuffer->xCheckItemFits(pxRingbuffer, xItemSize) == pdTRUE) {
1023 //Item will fit, copy item
1024 pxRingbuffer->vCopyItem(pxRingbuffer, pvItem, xItemSize);
1025 xReturn = pdTRUE;
1026 //Check if the free semaphore should be returned to allow other tasks to send
1027 if (prvGetFreeSize(pxRingbuffer) > 0) {
1028 xReturnSemaphore = pdTRUE;
1029 }
1030 portEXIT_CRITICAL(&pxRingbuffer->mux);
1031 break;
1032 }
1033 //Item doesn't fit, adjust ticks and take the semaphore again
1034 if (xTicksToWait != portMAX_DELAY) {
1035 xTicksRemaining = xTicksEnd - xTaskGetTickCount();
1036 }
1037 portEXIT_CRITICAL(&pxRingbuffer->mux);
1038 /*
1039 * Gap between critical section and re-acquiring of the semaphore. If
1040 * semaphore is given now, priority inversion might occur (see docs)
1041 */
1042 }
1043
1044 if (xReturn == pdTRUE) {
1045 //Indicate item was successfully sent
1046 xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer));
1047 }
1048 if (xReturnSemaphore == pdTRUE) {
1049 xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer)); //Give back semaphore so other tasks can send
1050 }
1051 return xReturn;
1052 }
1053
xRingbufferSendFromISR(RingbufHandle_t xRingbuffer,const void * pvItem,size_t xItemSize,BaseType_t * pxHigherPriorityTaskWoken)1054 BaseType_t xRingbufferSendFromISR(RingbufHandle_t xRingbuffer,
1055 const void *pvItem,
1056 size_t xItemSize,
1057 BaseType_t *pxHigherPriorityTaskWoken)
1058 {
1059 //Check arguments
1060 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1061 configASSERT(pxRingbuffer);
1062 configASSERT(pvItem != NULL || xItemSize == 0);
1063 if (xItemSize > pxRingbuffer->xMaxItemSize) {
1064 return pdFALSE; //Data will never ever fit in the queue.
1065 }
1066 if ((pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG) && xItemSize == 0) {
1067 return pdTRUE; //Sending 0 bytes to byte buffer has no effect
1068 }
1069
1070 //Attempt to send an item
1071 BaseType_t xReturn;
1072 BaseType_t xReturnSemaphore = pdFALSE;
1073 portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
1074 if (pxRingbuffer->xCheckItemFits(xRingbuffer, xItemSize) == pdTRUE) {
1075 pxRingbuffer->vCopyItem(xRingbuffer, pvItem, xItemSize);
1076 xReturn = pdTRUE;
1077 //Check if the free semaphore should be returned to allow other tasks to send
1078 if (prvGetFreeSize(pxRingbuffer) > 0) {
1079 xReturnSemaphore = pdTRUE;
1080 }
1081 } else {
1082 xReturn = pdFALSE;
1083 }
1084 portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
1085
1086 if (xReturn == pdTRUE) {
1087 //Indicate item was successfully sent
1088 xSemaphoreGiveFromISR(rbGET_RX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken);
1089 }
1090 if (xReturnSemaphore == pdTRUE) {
1091 xSemaphoreGiveFromISR(rbGET_TX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken); //Give back semaphore so other tasks can send
1092 }
1093 return xReturn;
1094 }
1095
xRingbufferReceive(RingbufHandle_t xRingbuffer,size_t * pxItemSize,TickType_t xTicksToWait)1096 void *xRingbufferReceive(RingbufHandle_t xRingbuffer, size_t *pxItemSize, TickType_t xTicksToWait)
1097 {
1098 //Check arguments
1099 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1100 configASSERT(pxRingbuffer);
1101
1102 //Attempt to retrieve an item
1103 void *pvTempItem;
1104 size_t xTempSize;
1105 if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, 0, xTicksToWait) == pdTRUE) {
1106 if (pxItemSize != NULL) {
1107 *pxItemSize = xTempSize;
1108 }
1109 return pvTempItem;
1110 } else {
1111 return NULL;
1112 }
1113 }
1114
xRingbufferReceiveFromISR(RingbufHandle_t xRingbuffer,size_t * pxItemSize)1115 void *xRingbufferReceiveFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize)
1116 {
1117 //Check arguments
1118 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1119 configASSERT(pxRingbuffer);
1120
1121 //Attempt to retrieve an item
1122 void *pvTempItem;
1123 size_t xTempSize;
1124 if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, 0) == pdTRUE) {
1125 if (pxItemSize != NULL) {
1126 *pxItemSize = xTempSize;
1127 }
1128 return pvTempItem;
1129 } else {
1130 return NULL;
1131 }
1132 }
1133
xRingbufferReceiveSplit(RingbufHandle_t xRingbuffer,void ** ppvHeadItem,void ** ppvTailItem,size_t * pxHeadItemSize,size_t * pxTailItemSize,TickType_t xTicksToWait)1134 BaseType_t xRingbufferReceiveSplit(RingbufHandle_t xRingbuffer,
1135 void **ppvHeadItem,
1136 void **ppvTailItem,
1137 size_t *pxHeadItemSize,
1138 size_t *pxTailItemSize,
1139 TickType_t xTicksToWait)
1140 {
1141 //Check arguments
1142 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1143 configASSERT(pxRingbuffer);
1144 configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
1145 configASSERT(ppvHeadItem != NULL && ppvTailItem != NULL);
1146
1147 //Attempt to retrieve multiple items
1148 void *pvTempHeadItem, *pvTempTailItem;
1149 size_t xTempHeadSize, xTempTailSize;
1150 if (prvReceiveGeneric(pxRingbuffer, &pvTempHeadItem, &pvTempTailItem, &xTempHeadSize, &xTempTailSize, 0, xTicksToWait) == pdTRUE) {
1151 //At least one item was retrieved
1152 *ppvHeadItem = pvTempHeadItem;
1153 if(pxHeadItemSize != NULL){
1154 *pxHeadItemSize = xTempHeadSize;
1155 }
1156 //Check to see if a second item was also retrieved
1157 if (pvTempTailItem != NULL) {
1158 *ppvTailItem = pvTempTailItem;
1159 if (pxTailItemSize != NULL) {
1160 *pxTailItemSize = xTempTailSize;
1161 }
1162 } else {
1163 *ppvTailItem = NULL;
1164 }
1165 return pdTRUE;
1166 } else {
1167 //No items retrieved
1168 *ppvHeadItem = NULL;
1169 *ppvTailItem = NULL;
1170 return pdFALSE;
1171 }
1172 }
1173
xRingbufferReceiveSplitFromISR(RingbufHandle_t xRingbuffer,void ** ppvHeadItem,void ** ppvTailItem,size_t * pxHeadItemSize,size_t * pxTailItemSize)1174 BaseType_t xRingbufferReceiveSplitFromISR(RingbufHandle_t xRingbuffer,
1175 void **ppvHeadItem,
1176 void **ppvTailItem,
1177 size_t *pxHeadItemSize,
1178 size_t *pxTailItemSize)
1179 {
1180 //Check arguments
1181 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1182 configASSERT(pxRingbuffer);
1183 configASSERT(pxRingbuffer->uxRingbufferFlags & rbALLOW_SPLIT_FLAG);
1184 configASSERT(ppvHeadItem != NULL && ppvTailItem != NULL);
1185
1186 //Attempt to retrieve multiple items
1187 void *pvTempHeadItem = NULL, *pvTempTailItem = NULL;
1188 size_t xTempHeadSize, xTempTailSize;
1189 if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempHeadItem, &pvTempTailItem, &xTempHeadSize, &xTempTailSize, 0) == pdTRUE) {
1190 //At least one item was received
1191 *ppvHeadItem = pvTempHeadItem;
1192 if (pxHeadItemSize != NULL) {
1193 *pxHeadItemSize = xTempHeadSize;
1194 }
1195 //Check to see if a second item was also retrieved
1196 if (pvTempTailItem != NULL) {
1197 *ppvTailItem = pvTempTailItem;
1198 if (pxTailItemSize != NULL) {
1199 *pxTailItemSize = xTempTailSize;
1200 }
1201 } else {
1202 *ppvTailItem = NULL;
1203 }
1204 return pdTRUE;
1205 } else {
1206 *ppvHeadItem = NULL;
1207 *ppvTailItem = NULL;
1208 return pdFALSE;
1209 }
1210 }
1211
xRingbufferReceiveUpTo(RingbufHandle_t xRingbuffer,size_t * pxItemSize,TickType_t xTicksToWait,size_t xMaxSize)1212 void *xRingbufferReceiveUpTo(RingbufHandle_t xRingbuffer,
1213 size_t *pxItemSize,
1214 TickType_t xTicksToWait,
1215 size_t xMaxSize)
1216 {
1217 //Check arguments
1218 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1219 configASSERT(pxRingbuffer);
1220 configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
1221 if (xMaxSize == 0) {
1222 return NULL;
1223 }
1224
1225 //Attempt to retrieve up to xMaxSize bytes
1226 void *pvTempItem;
1227 size_t xTempSize;
1228 if (prvReceiveGeneric(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, xMaxSize, xTicksToWait) == pdTRUE) {
1229 if (pxItemSize != NULL) {
1230 *pxItemSize = xTempSize;
1231 }
1232 return pvTempItem;
1233 } else {
1234 return NULL;
1235 }
1236 }
1237
xRingbufferReceiveUpToFromISR(RingbufHandle_t xRingbuffer,size_t * pxItemSize,size_t xMaxSize)1238 void *xRingbufferReceiveUpToFromISR(RingbufHandle_t xRingbuffer, size_t *pxItemSize, size_t xMaxSize)
1239 {
1240 //Check arguments
1241 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1242 configASSERT(pxRingbuffer);
1243 configASSERT(pxRingbuffer->uxRingbufferFlags & rbBYTE_BUFFER_FLAG); //This function should only be called for byte buffers
1244 if (xMaxSize == 0) {
1245 return NULL;
1246 }
1247
1248 //Attempt to retrieve up to xMaxSize bytes
1249 void *pvTempItem;
1250 size_t xTempSize;
1251 if (prvReceiveGenericFromISR(pxRingbuffer, &pvTempItem, NULL, &xTempSize, NULL, xMaxSize) == pdTRUE) {
1252 if (pxItemSize != NULL) {
1253 *pxItemSize = xTempSize;
1254 }
1255 return pvTempItem;
1256 } else {
1257 return NULL;
1258 }
1259 }
1260
vRingbufferReturnItem(RingbufHandle_t xRingbuffer,void * pvItem)1261 void vRingbufferReturnItem(RingbufHandle_t xRingbuffer, void *pvItem)
1262 {
1263 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1264 configASSERT(pxRingbuffer);
1265 configASSERT(pvItem != NULL);
1266
1267 portENTER_CRITICAL(&pxRingbuffer->mux);
1268 pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
1269 portEXIT_CRITICAL(&pxRingbuffer->mux);
1270 xSemaphoreGive(rbGET_TX_SEM_HANDLE(pxRingbuffer));
1271 }
1272
vRingbufferReturnItemFromISR(RingbufHandle_t xRingbuffer,void * pvItem,BaseType_t * pxHigherPriorityTaskWoken)1273 void vRingbufferReturnItemFromISR(RingbufHandle_t xRingbuffer, void *pvItem, BaseType_t *pxHigherPriorityTaskWoken)
1274 {
1275 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1276 configASSERT(pxRingbuffer);
1277 configASSERT(pvItem != NULL);
1278
1279 portENTER_CRITICAL_ISR(&pxRingbuffer->mux);
1280 pxRingbuffer->vReturnItem(pxRingbuffer, (uint8_t *)pvItem);
1281 portEXIT_CRITICAL_ISR(&pxRingbuffer->mux);
1282 xSemaphoreGiveFromISR(rbGET_TX_SEM_HANDLE(pxRingbuffer), pxHigherPriorityTaskWoken);
1283 }
1284
vRingbufferDelete(RingbufHandle_t xRingbuffer)1285 void vRingbufferDelete(RingbufHandle_t xRingbuffer)
1286 {
1287 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1288 configASSERT(pxRingbuffer);
1289
1290 vSemaphoreDelete(rbGET_TX_SEM_HANDLE(pxRingbuffer));
1291 vSemaphoreDelete(rbGET_RX_SEM_HANDLE(pxRingbuffer));
1292
1293 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
1294 if (pxRingbuffer->uxRingbufferFlags & rbBUFFER_STATIC_FLAG) {
1295 //Ring buffer was statically allocated, no need to free
1296 return;
1297 }
1298 #endif
1299 free(pxRingbuffer->pucHead);
1300 free(pxRingbuffer);
1301 }
1302
xRingbufferGetMaxItemSize(RingbufHandle_t xRingbuffer)1303 size_t xRingbufferGetMaxItemSize(RingbufHandle_t xRingbuffer)
1304 {
1305 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1306 configASSERT(pxRingbuffer);
1307 return pxRingbuffer->xMaxItemSize;
1308 }
1309
xRingbufferGetCurFreeSize(RingbufHandle_t xRingbuffer)1310 size_t xRingbufferGetCurFreeSize(RingbufHandle_t xRingbuffer)
1311 {
1312 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1313 configASSERT(pxRingbuffer);
1314
1315 size_t xFreeSize;
1316 portENTER_CRITICAL(&pxRingbuffer->mux);
1317 xFreeSize = pxRingbuffer->xGetCurMaxSize(pxRingbuffer);
1318 portEXIT_CRITICAL(&pxRingbuffer->mux);
1319 return xFreeSize;
1320 }
1321
xRingbufferAddToQueueSetRead(RingbufHandle_t xRingbuffer,QueueSetHandle_t xQueueSet)1322 BaseType_t xRingbufferAddToQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
1323 {
1324 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1325 configASSERT(pxRingbuffer);
1326
1327 BaseType_t xReturn;
1328 portENTER_CRITICAL(&pxRingbuffer->mux);
1329 //Cannot add semaphore to queue set if semaphore is not empty. Temporarily hold semaphore
1330 BaseType_t xHoldSemaphore = xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), 0);
1331 xReturn = xQueueAddToSet(rbGET_RX_SEM_HANDLE(pxRingbuffer), xQueueSet);
1332 if (xHoldSemaphore == pdTRUE) {
1333 //Return semaphore if temporarily held
1334 configASSERT(xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer)) == pdTRUE);
1335 }
1336 portEXIT_CRITICAL(&pxRingbuffer->mux);
1337 return xReturn;
1338 }
1339
xRingbufferCanRead(RingbufHandle_t xRingbuffer,QueueSetMemberHandle_t xMember)1340 BaseType_t xRingbufferCanRead(RingbufHandle_t xRingbuffer, QueueSetMemberHandle_t xMember)
1341 {
1342 //Check if the selected queue set member is the ring buffer's read semaphore
1343 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1344 configASSERT(pxRingbuffer);
1345 return (rbGET_RX_SEM_HANDLE(pxRingbuffer) == xMember) ? pdTRUE : pdFALSE;
1346 }
1347
xRingbufferRemoveFromQueueSetRead(RingbufHandle_t xRingbuffer,QueueSetHandle_t xQueueSet)1348 BaseType_t xRingbufferRemoveFromQueueSetRead(RingbufHandle_t xRingbuffer, QueueSetHandle_t xQueueSet)
1349 {
1350 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1351 configASSERT(pxRingbuffer);
1352
1353 BaseType_t xReturn;
1354 portENTER_CRITICAL(&pxRingbuffer->mux);
1355 //Cannot remove semaphore from queue set if semaphore is not empty. Temporarily hold semaphore
1356 BaseType_t xHoldSemaphore = xSemaphoreTake(rbGET_RX_SEM_HANDLE(pxRingbuffer), 0);
1357 xReturn = xQueueRemoveFromSet(rbGET_RX_SEM_HANDLE(pxRingbuffer), xQueueSet);
1358 if (xHoldSemaphore == pdTRUE) {
1359 //Return semaphore if temporarily held
1360 configASSERT(xSemaphoreGive(rbGET_RX_SEM_HANDLE(pxRingbuffer)) == pdTRUE);
1361 }
1362 portEXIT_CRITICAL(&pxRingbuffer->mux);
1363 return xReturn;
1364 }
1365
vRingbufferGetInfo(RingbufHandle_t xRingbuffer,UBaseType_t * uxFree,UBaseType_t * uxRead,UBaseType_t * uxWrite,UBaseType_t * uxAcquire,UBaseType_t * uxItemsWaiting)1366 void vRingbufferGetInfo(RingbufHandle_t xRingbuffer,
1367 UBaseType_t *uxFree,
1368 UBaseType_t *uxRead,
1369 UBaseType_t *uxWrite,
1370 UBaseType_t *uxAcquire,
1371 UBaseType_t *uxItemsWaiting)
1372 {
1373 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1374 configASSERT(pxRingbuffer);
1375
1376 portENTER_CRITICAL(&pxRingbuffer->mux);
1377 if (uxFree != NULL) {
1378 *uxFree = (UBaseType_t)(pxRingbuffer->pucFree - pxRingbuffer->pucHead);
1379 }
1380 if (uxRead != NULL) {
1381 *uxRead = (UBaseType_t)(pxRingbuffer->pucRead - pxRingbuffer->pucHead);
1382 }
1383 if (uxWrite != NULL) {
1384 *uxWrite = (UBaseType_t)(pxRingbuffer->pucWrite - pxRingbuffer->pucHead);
1385 }
1386 if (uxAcquire != NULL) {
1387 *uxAcquire = (UBaseType_t)(pxRingbuffer->pucAcquire - pxRingbuffer->pucHead);
1388 }
1389 if (uxItemsWaiting != NULL) {
1390 *uxItemsWaiting = (UBaseType_t)(pxRingbuffer->xItemsWaiting);
1391 }
1392 portEXIT_CRITICAL(&pxRingbuffer->mux);
1393 }
1394
xRingbufferPrintInfo(RingbufHandle_t xRingbuffer)1395 void xRingbufferPrintInfo(RingbufHandle_t xRingbuffer)
1396 {
1397 Ringbuffer_t *pxRingbuffer = (Ringbuffer_t *)xRingbuffer;
1398 configASSERT(pxRingbuffer);
1399 printf("Rb size:%d\tfree: %d\trptr: %d\tfreeptr: %d\twptr: %d, aptr: %d\n",
1400 pxRingbuffer->xSize, prvGetFreeSize(pxRingbuffer),
1401 pxRingbuffer->pucRead - pxRingbuffer->pucHead,
1402 pxRingbuffer->pucFree - pxRingbuffer->pucHead,
1403 pxRingbuffer->pucWrite - pxRingbuffer->pucHead,
1404 pxRingbuffer->pucAcquire - pxRingbuffer->pucHead);
1405 }
1406