• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
18 #define INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
19 
20 #include <stdint.h>
21 #include <stdlib.h>
22 
23 #include <cstddef>
24 #include <iterator>
25 
26 #include "perfetto/base/logging.h"
27 #include "perfetto/ext/base/utils.h"
28 
29 namespace perfetto {
30 namespace base {
31 
32 // CircularQueue is a push-back-only / pop-front-only queue with the following
33 // characteristics:
34 // - The storage is based on a flat circular buffer. Beginning and end wrap
35 //   as necessary, to keep pushes and pops O(1) as long as capacity expansion is
36 //   not required.
37 // - Capacity is automatically expanded like in a std::vector. Expansion has a
38 //   O(N) cost.
39 // - It allows random access, allowing in-place std::sort.
40 // - Iterators are not stable. Mutating the container invalidates all iterators.
41 // - It doesn't bother with const-correctness.
42 //
43 // Implementation details:
44 // Internally, |begin|, |end| and iterators use 64-bit monotonic indexes, which
45 // are incremented as if the queue was backed by unlimited storage.
46 // Even assuming that elements are inserted and removed every nanosecond, 64 bit
47 // is enough for 584 years.
48 // Wrapping happens only when addressing elements in the underlying circular
49 // storage. This limits the complexity and avoiding dealing with modular
50 // arithmetic all over the places.
51 template <class T>
52 class CircularQueue {
53  public:
54   class Iterator {
55    public:
56     using difference_type = ptrdiff_t;
57     using value_type = T;
58     using pointer = T*;
59     using reference = T&;
60     using iterator_category = std::random_access_iterator_tag;
61 
Iterator(CircularQueue * queue,uint64_t pos,uint32_t generation)62     Iterator(CircularQueue* queue, uint64_t pos, uint32_t generation)
63         : queue_(queue),
64           pos_(pos)
65 #if PERFETTO_DCHECK_IS_ON()
66           ,
67           generation_(generation)
68 #endif
69     {
70       ignore_result(generation);
71     }
72 
73     Iterator(const Iterator&) noexcept = default;
74     Iterator& operator=(const Iterator&) noexcept = default;
75     Iterator(Iterator&&) noexcept = default;
76     Iterator& operator=(Iterator&&) noexcept = default;
77 
78     T* operator->() const {
79 #if PERFETTO_DCHECK_IS_ON()
80       PERFETTO_DCHECK(generation_ == queue_->generation());
81 #endif
82       return queue_->Get(pos_);
83     }
84 
85     T& operator*() const { return *(operator->()); }
86 
87     value_type& operator[](difference_type i) { return *(*this + i); }
88 
89     Iterator& operator++() {
90       Add(1);
91       return *this;
92     }
93 
94     Iterator operator++(int) {
95       Iterator ret = *this;
96       Add(1);
97       return ret;
98     }
99 
100     Iterator& operator--() {
101       Add(-1);
102       return *this;
103     }
104 
105     Iterator operator--(int) {
106       Iterator ret = *this;
107       Add(-1);
108       return ret;
109     }
110 
111     friend Iterator operator+(const Iterator& iter, difference_type offset) {
112       Iterator ret = iter;
113       ret.Add(offset);
114       return ret;
115     }
116 
117     Iterator& operator+=(difference_type offset) {
118       Add(offset);
119       return *this;
120     }
121 
122     friend Iterator operator-(const Iterator& iter, difference_type offset) {
123       Iterator ret = iter;
124       ret.Add(-offset);
125       return ret;
126     }
127 
128     Iterator& operator-=(difference_type offset) {
129       Add(-offset);
130       return *this;
131     }
132 
133     friend ptrdiff_t operator-(const Iterator& lhs, const Iterator& rhs) {
134       return static_cast<ptrdiff_t>(lhs.pos_) -
135              static_cast<ptrdiff_t>(rhs.pos_);
136     }
137 
138     friend bool operator==(const Iterator& lhs, const Iterator& rhs) {
139       return lhs.pos_ == rhs.pos_;
140     }
141 
142     friend bool operator!=(const Iterator& lhs, const Iterator& rhs) {
143       return lhs.pos_ != rhs.pos_;
144     }
145 
146     friend bool operator<(const Iterator& lhs, const Iterator& rhs) {
147       return lhs.pos_ < rhs.pos_;
148     }
149 
150     friend bool operator<=(const Iterator& lhs, const Iterator& rhs) {
151       return lhs.pos_ <= rhs.pos_;
152     }
153 
154     friend bool operator>(const Iterator& lhs, const Iterator& rhs) {
155       return lhs.pos_ > rhs.pos_;
156     }
157 
158     friend bool operator>=(const Iterator& lhs, const Iterator& rhs) {
159       return lhs.pos_ >= rhs.pos_;
160     }
161 
162    private:
Add(difference_type offset)163     inline void Add(difference_type offset) {
164       pos_ = static_cast<uint64_t>(static_cast<difference_type>(pos_) + offset);
165       PERFETTO_DCHECK(pos_ <= queue_->end_);
166     }
167 
168     CircularQueue* queue_;
169     uint64_t pos_;
170 
171 #if PERFETTO_DCHECK_IS_ON()
172     uint32_t generation_;
173 #endif
174   };
175 
176   explicit CircularQueue(size_t initial_capacity = 1024) {
177     Grow(initial_capacity);
178   }
179 
CircularQueue(CircularQueue && other)180   CircularQueue(CircularQueue&& other) noexcept
181       : entries_(std::move(other.entries_)),
182         capacity_(other.capacity_),
183         begin_(other.begin_),
184         end_(other.end_) {
185     increment_generation();
186     new (&other) CircularQueue();  // Reset the old queue so it's still usable.
187   }
188 
189   CircularQueue& operator=(CircularQueue&& other) noexcept {
190     this->~CircularQueue();                      // Destroy the current state.
191     new (this) CircularQueue(std::move(other));  // Use the move ctor above.
192     return *this;
193   }
194 
CircularQueue(const CircularQueue & other)195   explicit CircularQueue(const CircularQueue& other) noexcept {
196     Grow(other.capacity());
197     for (const auto& e : const_cast<CircularQueue&>(other))
198       emplace_back(e);
199     PERFETTO_DCHECK(size() == other.size());
200   }
201 
202   CircularQueue& operator=(const CircularQueue& other) noexcept {
203     this->~CircularQueue();           // Destroy the current state.
204     new (this) CircularQueue(other);  // Use the copy ctor above.
205     return *this;
206   }
207 
~CircularQueue()208   ~CircularQueue() {
209     if (!entries_) {
210       PERFETTO_DCHECK(empty());
211       return;
212     }
213     clear();  // Invoke destructors on all alive entries.
214     PERFETTO_DCHECK(empty());
215   }
216 
217   template <typename... Args>
emplace_back(Args &&...args)218   void emplace_back(Args&&... args) {
219     increment_generation();
220     if (PERFETTO_UNLIKELY(size() >= capacity_))
221       Grow();
222     T* slot = Get(end_++);
223     new (slot) T(std::forward<Args>(args)...);
224   }
225 
erase_front(size_t n)226   void erase_front(size_t n) {
227     increment_generation();
228     for (; n && (begin_ < end_); --n) {
229       Get(begin_)->~T();
230       begin_++;  // This needs to be its own statement, Get() checks begin_.
231     }
232   }
233 
pop_front()234   void pop_front() { erase_front(1); }
235 
clear()236   void clear() { erase_front(size()); }
237 
shrink_to_fit()238   void shrink_to_fit() {
239     // We only bother shrinking if we can fit in quarter of the capacity we are
240     // currently using. Moreover, don't bother shrinking below 4096 elements as
241     // that will cause a lot of reallocations for little benefit.
242     if (size() > capacity() / 2 || capacity() <= 4096) {
243       return;
244     }
245     ChangeCapacity(capacity() / 2);
246   }
247 
at(size_t idx)248   T& at(size_t idx) {
249     PERFETTO_DCHECK(idx < size());
250     return *Get(begin_ + idx);
251   }
252 
begin()253   Iterator begin() { return Iterator(this, begin_, generation()); }
end()254   Iterator end() { return Iterator(this, end_, generation()); }
front()255   T& front() { return *begin(); }
back()256   T& back() { return *(end() - 1); }
257 
empty()258   bool empty() const { return size() == 0; }
259 
size()260   size_t size() const {
261     PERFETTO_DCHECK(end_ - begin_ <= capacity_);
262     return static_cast<size_t>(end_ - begin_);
263   }
264 
capacity()265   size_t capacity() const { return capacity_; }
266 
267 #if PERFETTO_DCHECK_IS_ON()
generation()268   uint32_t generation() const { return generation_; }
increment_generation()269   void increment_generation() { ++generation_; }
270 #else
generation()271   uint32_t generation() const { return 0; }
increment_generation()272   void increment_generation() {}
273 #endif
274 
275  private:
276   void Grow(size_t new_capacity = 0) {
277     // Capacity must be always a power of two. This allows Get() to use a simple
278     // bitwise-AND for handling the wrapping instead of a full division.
279     new_capacity = new_capacity ? new_capacity : capacity_ * 2;
280     PERFETTO_CHECK((new_capacity & (new_capacity - 1)) == 0);  // Must be pow2.
281 
282     // On 32-bit systems this might hit the 4GB wall and overflow. We can't do
283     // anything other than crash in this case.
284     PERFETTO_CHECK(new_capacity > capacity_);
285 
286     ChangeCapacity(new_capacity);
287   }
288 
ChangeCapacity(size_t new_capacity)289   void ChangeCapacity(size_t new_capacity) {
290     // We should still have enough space to fit all the elements in the queue.
291     PERFETTO_CHECK(new_capacity >= size());
292 
293     AlignedUniquePtr<T[]> new_vec = AlignedAllocTyped<T[]>(new_capacity);
294 
295     // Move all elements in the expanded array.
296     size_t new_size = 0;
297     for (uint64_t i = begin_; i < end_; i++)
298       new (&new_vec[new_size++]) T(std::move(*Get(i)));  // Placement move ctor.
299 
300     // Even if all the elements are std::move()-d and likely empty, we are still
301     // required to call the dtor for them.
302     for (uint64_t i = begin_; i < end_; i++)
303       Get(i)->~T();
304 
305     begin_ = 0;
306     end_ = new_size;
307     capacity_ = new_capacity;
308     entries_ = std::move(new_vec);
309   }
310 
Get(uint64_t pos)311   inline T* Get(uint64_t pos) {
312     PERFETTO_DCHECK(pos >= begin_ && pos < end_);
313     PERFETTO_DCHECK((capacity_ & (capacity_ - 1)) == 0);  // Must be a pow2.
314     auto index = static_cast<size_t>(pos & (capacity_ - 1));
315     return &entries_[index];
316   }
317 
318   // Underlying storage. It's raw malloc-ed rather than being a unique_ptr<T[]>
319   // to allow having uninitialized entries inside it.
320   AlignedUniquePtr<T[]> entries_;
321   size_t capacity_ = 0;  // Number of allocated slots (NOT bytes) in |entries_|.
322 
323   // The |begin_| and |end_| indexes are monotonic and never wrap. Modular arith
324   // is used only when dereferencing entries in the vector.
325   uint64_t begin_ = 0;
326   uint64_t end_ = 0;
327 
328 // Generation is used in debug builds only for checking iterator validity.
329 #if PERFETTO_DCHECK_IS_ON()
330   uint32_t generation_ = 0;
331 #endif
332 };
333 
334 }  // namespace base
335 }  // namespace perfetto
336 
337 #endif  // INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
338