// // Copyright 2023 The ANGLE Project Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // FixedQueue.h: // An array based fifo queue class that supports concurrent push and pop. // #ifndef COMMON_FIXEDQUEUE_H_ #define COMMON_FIXEDQUEUE_H_ #include "common/debug.h" #include #include #include namespace angle { // class FixedQueue: An array based fix storage fifo queue class that supports concurrent push and // pop. Caller must ensure queue is not empty before pop and not full before push. This class // supports concurrent push and pop from different threads. If caller want to push from two // different threads, proper mutex must be used to ensure the access is serialized. template > class FixedQueue final : angle::NonCopyable { public: using value_type = typename Storage::value_type; using size_type = typename Storage::size_type; using reference = typename Storage::reference; using const_reference = typename Storage::const_reference; FixedQueue(); ~FixedQueue(); size_type size() const; bool empty() const; bool full() const; reference front(); const_reference front() const; void push(const value_type &value); void push(value_type &&value); reference back(); const_reference back() const; void pop(); void clear(); private: Storage mData; // The front and back indices are virtual indices (think about queue sizd is infinite). They // will never wrap around when hit N. The wrap around occur when element is referenced. Virtual // index for current head size_type mFrontIndex; // Virtual index for next write. size_type mEndIndex; // Atomic so that we can support concurrent push and pop. std::atomic mSize; }; template FixedQueue::FixedQueue() : mFrontIndex(0), mEndIndex(0), mSize(0) {} template FixedQueue::~FixedQueue() {} template ANGLE_INLINE typename FixedQueue::size_type FixedQueue::size() const { return mSize; } template ANGLE_INLINE bool FixedQueue::empty() const { return mSize == 0; } template ANGLE_INLINE bool FixedQueue::full() const { return mSize >= N; } template ANGLE_INLINE typename FixedQueue::reference FixedQueue::front() { ASSERT(mSize > 0); return mData[mFrontIndex % N]; } template ANGLE_INLINE typename FixedQueue::const_reference FixedQueue::front() const { ASSERT(mSize > 0); return mData[mFrontIndex % N]; } template void FixedQueue::push(const value_type &value) { ASSERT(mSize < N); mData[mEndIndex % N] = value; mEndIndex++; // We must increment size last, after we wrote data. That way if another thread is doing // `if(!dq.empty()){ s = dq.front(); }`, it will only see not empty until element is fully // pushed. mSize++; } template void FixedQueue::push(value_type &&value) { ASSERT(mSize < N); mData[mEndIndex % N] = std::move(value); mEndIndex++; // We must increment size last, after we wrote data. That way if another thread is doing // `if(!dq.empty()){ s = dq.front(); }`, it will only see not empty until element is fully // pushed. mSize++; } template ANGLE_INLINE typename FixedQueue::reference FixedQueue::back() { ASSERT(mSize > 0); return mData[(mEndIndex + (N - 1)) % N]; } template ANGLE_INLINE typename FixedQueue::const_reference FixedQueue::back() const { ASSERT(mSize > 0); return mData[(mEndIndex + (N - 1)) % N]; } template void FixedQueue::pop() { ASSERT(mSize > 0); mData[mFrontIndex % N] = value_type(); mFrontIndex++; // We must decrement size last, after we wrote data. That way if another thread is doing // `if(!dq.full()){ dq.push; }`, it will only see not full until element is fully popped. mSize--; } template void FixedQueue::clear() { // Size will change in the "pop()" and also by "push()" calls from other thread. const size_type localSize = mSize; for (size_type i = 0; i < localSize; i++) { pop(); } } } // namespace angle #endif // COMMON_FIXEDQUEUE_H_