• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2023 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // FixedQueue.h:
7 //   An array based fifo queue class that supports concurrent push and pop.
8 //
9 
10 #ifndef COMMON_FIXEDQUEUE_H_
11 #define COMMON_FIXEDQUEUE_H_
12 
13 #include "common/debug.h"
14 
15 #include <algorithm>
16 #include <array>
17 #include <atomic>
18 
19 namespace angle
20 {
21 // class FixedQueue: An array based fix storage fifo queue class that supports concurrent push and
22 // pop. Caller must ensure queue is not empty before pop and not full before push. This class
23 // supports concurrent push and pop from different threads. If caller want to push from two
24 // different threads, proper mutex must be used to ensure the access is serialized.
25 template <class T, size_t N, class Storage = std::array<T, N>>
26 class FixedQueue final : angle::NonCopyable
27 {
28   public:
29     using value_type      = typename Storage::value_type;
30     using size_type       = typename Storage::size_type;
31     using reference       = typename Storage::reference;
32     using const_reference = typename Storage::const_reference;
33 
34     FixedQueue();
35     ~FixedQueue();
36 
37     size_type size() const;
38     bool empty() const;
39     bool full() const;
40 
41     reference front();
42     const_reference front() const;
43 
44     void push(const value_type &value);
45     void push(value_type &&value);
46 
47     reference back();
48     const_reference back() const;
49 
50     void pop();
51     void clear();
52 
53   private:
54     Storage mData;
55     // The front and back indices are virtual indices (think about queue sizd is infinite). They
56     // will never wrap around when hit N. The wrap around occur when element is referenced. Virtual
57     // index for current head
58     size_type mFrontIndex;
59     // Virtual index for next write.
60     size_type mEndIndex;
61     // Atomic so that we can support concurrent push and pop.
62     std::atomic<size_type> mSize;
63 };
64 
65 template <class T, size_t N, class Storage>
FixedQueue()66 FixedQueue<T, N, Storage>::FixedQueue() : mFrontIndex(0), mEndIndex(0), mSize(0)
67 {}
68 
69 template <class T, size_t N, class Storage>
~FixedQueue()70 FixedQueue<T, N, Storage>::~FixedQueue()
71 {}
72 
73 template <class T, size_t N, class Storage>
size()74 ANGLE_INLINE typename FixedQueue<T, N, Storage>::size_type FixedQueue<T, N, Storage>::size() const
75 {
76     return mSize;
77 }
78 
79 template <class T, size_t N, class Storage>
empty()80 ANGLE_INLINE bool FixedQueue<T, N, Storage>::empty() const
81 {
82     return mSize == 0;
83 }
84 
85 template <class T, size_t N, class Storage>
full()86 ANGLE_INLINE bool FixedQueue<T, N, Storage>::full() const
87 {
88     return mSize >= N;
89 }
90 
91 template <class T, size_t N, class Storage>
front()92 ANGLE_INLINE typename FixedQueue<T, N, Storage>::reference FixedQueue<T, N, Storage>::front()
93 {
94     ASSERT(mSize > 0);
95     return mData[mFrontIndex % N];
96 }
97 
98 template <class T, size_t N, class Storage>
front()99 ANGLE_INLINE typename FixedQueue<T, N, Storage>::const_reference FixedQueue<T, N, Storage>::front()
100     const
101 {
102     ASSERT(mSize > 0);
103     return mData[mFrontIndex % N];
104 }
105 
106 template <class T, size_t N, class Storage>
push(const value_type & value)107 void FixedQueue<T, N, Storage>::push(const value_type &value)
108 {
109     ASSERT(mSize < N);
110     mData[mEndIndex % N] = value;
111     mEndIndex++;
112     // We must increment size last, after we wrote data. That way if another thread is doing
113     // `if(!dq.empty()){ s = dq.front(); }`, it will only see not empty until element is fully
114     // pushed.
115     mSize++;
116 }
117 
118 template <class T, size_t N, class Storage>
push(value_type && value)119 void FixedQueue<T, N, Storage>::push(value_type &&value)
120 {
121     ASSERT(mSize < N);
122     mData[mEndIndex % N] = std::move(value);
123     mEndIndex++;
124     // We must increment size last, after we wrote data. That way if another thread is doing
125     // `if(!dq.empty()){ s = dq.front(); }`, it will only see not empty until element is fully
126     // pushed.
127     mSize++;
128 }
129 
130 template <class T, size_t N, class Storage>
back()131 ANGLE_INLINE typename FixedQueue<T, N, Storage>::reference FixedQueue<T, N, Storage>::back()
132 {
133     ASSERT(mSize > 0);
134     return mData[(mEndIndex + (N - 1)) % N];
135 }
136 
137 template <class T, size_t N, class Storage>
back()138 ANGLE_INLINE typename FixedQueue<T, N, Storage>::const_reference FixedQueue<T, N, Storage>::back()
139     const
140 {
141     ASSERT(mSize > 0);
142     return mData[(mEndIndex + (N - 1)) % N];
143 }
144 
145 template <class T, size_t N, class Storage>
pop()146 void FixedQueue<T, N, Storage>::pop()
147 {
148     ASSERT(mSize > 0);
149     mData[mFrontIndex % N] = value_type();
150     mFrontIndex++;
151     // We must decrement size last, after we wrote data. That way if another thread is doing
152     // `if(!dq.full()){ dq.push; }`, it will only see not full until element is fully popped.
153     mSize--;
154 }
155 
156 template <class T, size_t N, class Storage>
clear()157 void FixedQueue<T, N, Storage>::clear()
158 {
159     // Size will change in the "pop()" and also by "push()" calls from other thread.
160     const size_type localSize = mSize;
161     for (size_type i = 0; i < localSize; i++)
162     {
163         pop();
164     }
165 }
166 }  // namespace angle
167 
168 #endif  // COMMON_FIXEDQUEUE_H_
169