1 /*
2 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef GRAPHIC_LITE_QUEUE_H
17 #define GRAPHIC_LITE_QUEUE_H
18
19 #include "securec.h"
20 #include <stdbool.h>
21 #include <stdint.h>
22
23 #include "graphic_thread.h"
24 #include "hal_cpu.h"
25 #include "hal_atomic.h"
26
27 #ifdef __cplusplus
28 #if __cplusplus
29 extern "C" {
30 #endif
31 #endif
32
33 typedef enum {
34 QUEUE_INVAL = -10,
35 QUEUE_FULL,
36 QUEUE_EMPTY
37 } QueueError;
38
39 /** @brief Lock free ring queue */
40 typedef struct {
41 uint32_t magic;
42 uint32_t unitNum; /* queue node limit */
43 uint8_t pad[56]; /* cache line pad */
44
45 /* producer status */
46 struct {
47 uint32_t size; /* queue size */
48 uint32_t mask; /* mask(size-1). */
49 volatile uint32_t head; /* producer head */
50 volatile uint32_t tail; /* producer tail */
51 uint8_t pad[48]; /* cache line pad */
52 } producer;
53
54 /* consumer status */
55 struct {
56 uint32_t size; /* queue size */
57 uint32_t mask; /* mask(size-1). */
58 volatile uint32_t head; /* consumer head */
59 volatile uint32_t tail; /* consumer tail */
60 uint8_t pad[48]; /* cache line pad */
61 } consumer;
62
63 uintptr_t nodes[0]; /* queue nodes */
64 } LockFreeQueue;
65
66 extern int32_t QueueSizeCalc(uint32_t unitNum, uint32_t* queueSize);
67
68 extern int32_t QueueCountGet(const LockFreeQueue* queue, uint32_t* count);
69
70 extern int32_t QueueInit(LockFreeQueue* queue, uint32_t unitNum);
71
QueueIsEmpty(LockFreeQueue * queue)72 static inline int32_t QueueIsEmpty(LockFreeQueue* queue)
73 {
74 uint32_t producerTail;
75 uint32_t consumerTail;
76 uint32_t mask;
77
78 if (queue == NULL) {
79 return QUEUE_INVAL;
80 }
81
82 producerTail = queue->producer.tail;
83 consumerTail = queue->consumer.tail;
84 mask = queue->producer.mask;
85
86 if (((producerTail - consumerTail) & mask) == 0) {
87 return 0;
88 }
89 return -1;
90 }
91
92 /** @brief Enqueue operation, thread unsafe */
QueueSingleProducerEnqueue(LockFreeQueue * queue,void * node)93 static inline int32_t QueueSingleProducerEnqueue(LockFreeQueue *queue, void *node)
94 {
95 uint32_t producerHead;
96 uint32_t producerNext;
97 uint32_t consumerTail;
98 uint32_t availableCount;
99 uint32_t mask;
100
101 if (queue == NULL || node == NULL) {
102 return QUEUE_INVAL;
103 }
104 mask = queue->producer.mask;
105
106 producerHead = queue->producer.head;
107 RMB();
108 consumerTail = queue->consumer.tail;
109
110 /*
111 * 1. In normal cases, producerHead > consumerTail and producerHead < consumerTail + mask
112 * 2. If only producerHead is reversed, producerHead > consumerTail - 0xFFFFFFFF and
113 * producerHead < consumerTail + mask - 0xFFFFFFFF
114 * The subtraction of two 32-bit integers results in 32-bit modulo.
115 * Therefore, the availableCount must be between 0 and the queue length.
116 */
117 availableCount = (mask + consumerTail) - producerHead;
118
119 if (availableCount < 1) {
120 return QUEUE_FULL;
121 }
122
123 producerNext = producerHead + 1;
124 queue->producer.head = producerNext;
125
126 queue->nodes[producerHead & mask] = (uintptr_t)node;
127
128 /*
129 * Make sure that the queue is filled with elements before updating the producer tail.
130 * Prevents problems when the producer tail is updated first:
131 * 1. The consumer thinks that the elements in this area have been queued and can be consumed,
132 * but the consumer actually reads dirty elements.
133 * 2. The process is abnormal. In this case, elements in the memory block in the queue are dirty elements.
134 */
135 WMB();
136
137 queue->producer.tail = producerNext;
138 return 0;
139 }
140
141 /** @brief Enqueue operation, thread safe */
QueueMultiProducerEnqueue(LockFreeQueue * queue,void * node)142 static inline uint32_t QueueMultiProducerEnqueue(LockFreeQueue* queue, void* node)
143 {
144 uint32_t producerHead;
145 uint32_t producerNext;
146 uint32_t consumerTail;
147 uint32_t availableCount;
148 bool success = false;
149 uint32_t mask;
150
151 if (queue == NULL || node == NULL) {
152 return QUEUE_INVAL;
153 }
154
155 mask = queue->producer.mask;
156 do {
157 producerHead = queue->producer.head;
158 /*
159 * Make sure the producer's head is read before the consumer's tail.
160 * If the consumer tail is read first, then the consumer consumes the queue,and then other producers
161 * produce the queue, the producer header may cross the consumer tail reversely.
162 */
163 RMB();
164 consumerTail = queue->consumer.tail;
165
166 /*
167 * 1. In normal cases, producerHead > consumerTail and producerHead < consumerTail + mask
168 * 2. If only producerHead is reversed, producerHead > consumerTail - 0xFFFFFFFF and
169 * producerHead < consumerTail + mask - 0xFFFFFFFF
170 * The subtraction of two 32-bit integers results in 32-bit modulo.
171 * Therefore, the availableCount must be between 0 and the queue length.
172 */
173
174 availableCount = (mask + consumerTail) - producerHead;
175
176 if (availableCount < 1) {
177 return QUEUE_FULL;
178 }
179
180 producerNext = producerHead + 1;
181 success = HalAtomicCmpAndSwap32(&queue->producer.head, producerHead, producerNext);
182 } while (success == false);
183
184 queue->nodes[producerHead & mask] = (uintptr_t)node;
185
186 /*
187 * Make sure that the queue is filled with elements before updating the producer tail.
188 * Prevents problems when the producer tail is updated first:
189 * 1. The consumer thinks that the elements in this area have been queued and can be consumed,
190 * but the consumer actually reads dirty elements.
191 * 2. The process is abnormal. In this case, elements in the memory block in the queue are dirty elements.
192 */
193
194 WMB();
195
196 /* Waiting for other producers to complete enqueuing. */
197 while (queue->producer.tail != producerHead) {
198 ThreadYield();
199 }
200
201 queue->producer.tail += 1;
202 return 0;
203 }
204
205 /** @brief Dequeue operation, thread unsafe */
QueueSingleConsumerDequeue(LockFreeQueue * queue,void ** node)206 static inline uint32_t QueueSingleConsumerDequeue(LockFreeQueue* queue, void** node)
207 {
208 uint32_t consumerHead;
209 uint32_t producerTail;
210 uint32_t consumerNext;
211 uint32_t availableCount;
212 uint32_t mask;
213
214 if (queue == NULL || node == NULL) {
215 return QUEUE_INVAL;
216 }
217 mask = queue->producer.mask;
218
219 consumerHead = queue->consumer.head;
220
221 /* Prevent producerTail from being read before consumerHead, causing queue head and tail reversal. */
222 RMB();
223
224 producerTail = queue->producer.tail;
225
226 /*
227 * 1. In normal cases, producerTail > consumerHead and producerTail < consumerHead + mask
228 * 2. If only producerTail is reversed, producerTail > consumerHead - 0xFFFFFFFF and
229 * producerTail < consumerHead + mask - 0xFFFFFFFF
230 * The subtraction of two 32-bit integers results in 32-bit modulo.
231 * Therefore, the availableCount must be between 0 and the queue length.
232 */
233 availableCount = (producerTail - consumerHead);
234
235 if (availableCount < 1) {
236 return QUEUE_EMPTY;
237 }
238
239 consumerNext = consumerHead + 1;
240 queue->consumer.head = consumerNext;
241
242 /* Prevent the read of queue->nodes before the read of ProdTail. */
243 RMB();
244
245 *node = (void *)(queue->nodes[consumerHead & mask]);
246
247 /*
248 * Ensure that the queue element is dequeued before updating the consumer's tail.
249 * After the consumer tail is updated, the producer considers that the elements in this area have been dequeued
250 * and can fill in new elements, which actually overwrites the elements that are not dequeued.
251 */
252 RMB();
253
254 queue->consumer.tail = consumerNext;
255 return 0;
256 }
257
258 /** @brief Dequeue operation, thread safe */
QueueMultiConsumerDequeue(LockFreeQueue * queue,void ** node)259 static inline uint32_t QueueMultiConsumerDequeue(LockFreeQueue *queue, void **node)
260 {
261 bool success = false;
262 uint32_t consumerHead;
263 uint32_t producerTail;
264 uint32_t consumerNext;
265 uint32_t availableCount;
266 uint32_t mask;
267
268 if (queue == NULL || node == NULL) {
269 return QUEUE_INVAL;
270 }
271 mask = queue->producer.mask;
272
273 do {
274 consumerHead = queue->consumer.head;
275
276 /*
277 * Make sure the consumer's head is read before the producer's tail.
278 * If the producer tail is read first, then other consumers consume the queue,
279 * and finally the generator produces the queue, the consumer head may cross the producer tail.
280 */
281 RMB();
282
283 producerTail = queue->producer.tail;
284
285 /*
286 * 1. In normal cases, producerTail > consumerHead and producerTail < consumerHead + mask
287 * 2. If only producerTail is reversed, producerTail > consumerHead - 0xFFFFFFFF and
288 * producerTail < consumerHead + mask - 0xFFFFFFFF
289 * The subtraction of two 32-bit integers results in 32-bit modulo.
290 * Therefore, the availableCount must be between 0 and the queue length.
291 */
292
293 availableCount = (producerTail - consumerHead);
294
295 if (availableCount < 1) {
296 return QUEUE_EMPTY;
297 }
298
299 consumerNext = consumerHead + 1;
300 success = HalAtomicCmpAndSwap32(&queue->consumer.head, consumerHead, consumerNext);
301 } while (success == false);
302
303 /* Prevent the read of queue->nodes before the read of ProdTail. */
304 RMB();
305
306 *node = (void *)(queue->nodes[consumerHead & mask]);
307
308 /*
309 * Ensure that the queue element is dequeued before updating the consumer's tail.
310 * After the consumer tail is updated, the producer considers that the elements in this area have been dequeued
311 * and can fill in new elements, which actually overwrites the elements that are not dequeued.
312 */
313 RMB();
314
315 /* Waiting for other consumers to finish dequeuing. */
316 while (queue->consumer.tail != consumerHead) {
317 ThreadYield();
318 }
319
320 queue->consumer.tail += 1;
321
322 return 0;
323 }
324
325 #ifdef __cplusplus
326 #if __cplusplus
327 }
328 #endif /* __cplusplus */
329 #endif /* __cplusplus */
330 #endif // GRAPHIC_LITE_QUEUE_H
331