• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
6 #define BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
7 
8 #include <algorithm>
9 #include <cmath>
10 #include <limits>
11 #include <memory>
12 #include <utility>
13 #include <vector>
14 
15 #include "base/check.h"
16 #include "base/check_op.h"
17 #include "base/compiler_specific.h"
18 #include "base/containers/heap_array.h"
19 #include "base/containers/span.h"
20 #include "base/debug/alias.h"
21 #include "base/gtest_prod_util.h"
22 #include "base/memory/aligned_memory.h"
23 #include "base/memory/raw_ptr.h"
24 #include "base/memory/raw_ptr_exclusion.h"
25 #include "base/memory/raw_span.h"
26 #include "base/time/time.h"
27 
28 namespace base {
29 namespace sequence_manager {
30 namespace internal {
31 
32 // A LazilyDeallocatedDeque specialized for the SequenceManager's usage
33 // patterns. The queue generally grows while tasks are added and then removed
34 // until empty and the cycle repeats.
35 //
36 // The main difference between sequence_manager::LazilyDeallocatedDeque and
37 // others is memory management.  For performance (memory allocation isn't free)
38 // we don't automatically reclaiming memory when the queue becomes empty.
39 // Instead we rely on the surrounding code periodically calling
40 // MaybeShrinkQueue, ideally when the queue is empty.
41 //
42 // We keep track of the maximum recent queue size and rate limit
43 // MaybeShrinkQueue to avoid unnecessary churn.
44 //
45 // NB this queue isn't by itself thread safe.
46 template <typename T, TimeTicks (*now_source)() = TimeTicks::Now>
47 class LazilyDeallocatedDeque {
48  public:
49   enum {
50     // Minimum allocation for a ring. Note a ring of size 4 will only hold up to
51     // 3 elements.
52     kMinimumRingSize = 4,
53 
54     // Maximum "wasted" capacity allowed when considering if we should resize
55     // the backing store.
56     kReclaimThreshold = 16,
57 
58     // Used to rate limit how frequently MaybeShrinkQueue actually shrinks the
59     // queue.
60     kMinimumShrinkIntervalInSeconds = 5
61   };
62 
63   LazilyDeallocatedDeque() = default;
64   LazilyDeallocatedDeque(const LazilyDeallocatedDeque&) = delete;
65   LazilyDeallocatedDeque& operator=(const LazilyDeallocatedDeque&) = delete;
~LazilyDeallocatedDeque()66   ~LazilyDeallocatedDeque() { clear(); }
67 
empty()68   bool empty() const { return size_ == 0; }
69 
max_size()70   size_t max_size() const { return max_size_; }
71 
size()72   size_t size() const { return size_; }
73 
capacity()74   size_t capacity() const {
75     size_t capacity = 0;
76     for (const Ring* iter = head_.get(); iter; iter = iter->next_.get()) {
77       capacity += iter->capacity();
78     }
79     return capacity;
80   }
81 
clear()82   void clear() {
83     while (head_) {
84       head_ = std::move(head_->next_);
85     }
86 
87     tail_ = nullptr;
88     size_ = 0;
89   }
90 
91   // Assumed to be an uncommon operation.
push_front(T t)92   void push_front(T t) {
93     if (!head_) {
94       DCHECK(!tail_);
95       head_ = std::make_unique<Ring>(kMinimumRingSize);
96       tail_ = head_.get();
97     }
98 
99     // Grow if needed, by the minimum amount.
100     if (!head_->CanPush()) {
101       // TODO(alexclarke): Remove once we've understood the OOMs.
102       size_t size = size_;
103       base::debug::Alias(&size);
104 
105       std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(kMinimumRingSize);
106       new_ring->next_ = std::move(head_);
107       head_ = std::move(new_ring);
108     }
109 
110     head_->push_front(std::move(t));
111     max_size_ = std::max(max_size_, ++size_);
112   }
113 
114   // Assumed to be a common operation.
push_back(T t)115   void push_back(T t) {
116     if (!head_) {
117       DCHECK(!tail_);
118       head_ = std::make_unique<Ring>(kMinimumRingSize);
119       tail_ = head_.get();
120     }
121 
122     // Grow if needed.
123     if (!tail_->CanPush()) {
124       // TODO(alexclarke): Remove once we've understood the OOMs.
125       size_t size = size_;
126       base::debug::Alias(&size);
127 
128       // Doubling the size is a common strategy, but one which can be wasteful
129       // so we use a (somewhat) slower growth curve.
130       tail_->next_ = std::make_unique<Ring>(2 + tail_->capacity() +
131                                             (tail_->capacity() / 2));
132       tail_ = tail_->next_.get();
133     }
134 
135     tail_->push_back(std::move(t));
136     max_size_ = std::max(max_size_, ++size_);
137   }
138 
front()139   T& front() LIFETIME_BOUND {
140     DCHECK(head_);
141     return head_->front();
142   }
143 
front()144   const T& front() const LIFETIME_BOUND {
145     DCHECK(head_);
146     return head_->front();
147   }
148 
back()149   T& back() LIFETIME_BOUND {
150     DCHECK(tail_);
151     return tail_->back();
152   }
153 
back()154   const T& back() const LIFETIME_BOUND {
155     DCHECK(tail_);
156     return tail_->back();
157   }
158 
pop_front()159   void pop_front() {
160     DCHECK(head_);
161     DCHECK(!head_->empty());
162     DCHECK(tail_);
163     DCHECK_GT(size_, 0u);
164     head_->pop_front();
165 
166     // If the ring has become empty and we have several rings then, remove the
167     // head one (which we expect to have lower capacity than the remaining
168     // ones).
169     if (head_->empty() && head_->next_) {
170       head_ = std::move(head_->next_);
171     }
172 
173     --size_;
174   }
175 
swap(LazilyDeallocatedDeque & other)176   void swap(LazilyDeallocatedDeque& other) {
177     std::swap(head_, other.head_);
178     std::swap(tail_, other.tail_);
179     std::swap(size_, other.size_);
180     std::swap(max_size_, other.max_size_);
181     std::swap(next_resize_time_, other.next_resize_time_);
182   }
183 
MaybeShrinkQueue()184   void MaybeShrinkQueue() {
185     if (!tail_)
186       return;
187 
188     DCHECK_GE(max_size_, size_);
189 
190     // Rate limit how often we shrink the queue because it's somewhat expensive.
191     TimeTicks current_time = now_source();
192     if (current_time < next_resize_time_)
193       return;
194 
195     // Due to the way the Ring works we need 1 more slot than is used.
196     size_t new_capacity = max_size_ + 1;
197     if (new_capacity < kMinimumRingSize)
198       new_capacity = kMinimumRingSize;
199 
200     // Reset |max_size_| so that unless usage has spiked up we will consider
201     // reclaiming it next time.
202     max_size_ = size_;
203 
204     // Only realloc if the current capacity is sufficiently greater than the
205     // observed maximum size for the previous period.
206     if (new_capacity + kReclaimThreshold >= capacity())
207       return;
208 
209     SetCapacity(new_capacity);
210     next_resize_time_ = current_time + Seconds(kMinimumShrinkIntervalInSeconds);
211   }
212 
SetCapacity(size_t new_capacity)213   void SetCapacity(size_t new_capacity) {
214     std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(new_capacity);
215 
216     DCHECK_GE(new_capacity, size_ + 1);
217 
218     // Preserve the |size_| which counts down to zero in the while loop.
219     size_t real_size = size_;
220 
221     while (!empty()) {
222       DCHECK(new_ring->CanPush());
223       new_ring->push_back(std::move(head_->front()));
224       pop_front();
225     }
226 
227     size_ = real_size;
228 
229     DCHECK_EQ(head_.get(), tail_);
230     head_ = std::move(new_ring);
231     tail_ = head_.get();
232   }
233 
234  private:
235   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushFront);
236   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushBack);
237   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingCanPush);
238   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushPopPushPop);
239 
240   struct Ring {
RingRing241     explicit Ring(size_t capacity) {
242       DCHECK_GE(capacity, kMinimumRingSize);
243       std::tie(backing_store_, data_) = AlignedUninitCharArray<T>(capacity);
244     }
245     Ring(const Ring&) = delete;
246     Ring& operator=(const Ring&) = delete;
~RingRing247     ~Ring() {
248       while (!empty()) {
249         pop_front();
250       }
251     }
252 
emptyRing253     bool empty() const { return back_index_ == before_front_index_; }
254 
capacityRing255     size_t capacity() const { return data_.size(); }
256 
CanPushRing257     bool CanPush() const {
258       return before_front_index_ != CircularIncrement(back_index_);
259     }
260 
push_frontRing261     void push_front(T&& t) {
262       // Mustn't appear to become empty.
263       CHECK_NE(CircularDecrement(before_front_index_), back_index_);
264       std::construct_at(data_.get_at(before_front_index_), std::move(t));
265       before_front_index_ = CircularDecrement(before_front_index_);
266     }
267 
push_backRing268     void push_back(T&& t) {
269       back_index_ = CircularIncrement(back_index_);
270       CHECK(!empty());  // Mustn't appear to become empty.
271       std::construct_at(data_.get_at(back_index_), std::move(t));
272     }
273 
pop_frontRing274     void pop_front() {
275       CHECK(!empty());
276       before_front_index_ = CircularIncrement(before_front_index_);
277       data_[before_front_index_].~T();
278     }
279 
frontRing280     T& front() LIFETIME_BOUND {
281       CHECK(!empty());
282       return data_[CircularIncrement(before_front_index_)];
283     }
284 
frontRing285     const T& front() const LIFETIME_BOUND {
286       CHECK(!empty());
287       return data_[CircularIncrement(before_front_index_)];
288     }
289 
backRing290     T& back() LIFETIME_BOUND {
291       CHECK(!empty());
292       return data_[back_index_];
293     }
294 
backRing295     const T& back() const LIFETIME_BOUND {
296       CHECK(!empty());
297       return data_[back_index_];
298     }
299 
CircularDecrementRing300     size_t CircularDecrement(size_t index) const {
301       if (index == 0)
302         return capacity() - 1;
303       return index - 1;
304     }
305 
CircularIncrementRing306     size_t CircularIncrement(size_t index) const {
307       CHECK_LT(index, capacity());
308       ++index;
309       if (index == capacity()) {
310         return 0;
311       }
312       return index;
313     }
314 
315     AlignedHeapArray<char> backing_store_;
316     raw_span<T> data_;
317     // Indices into `data_` for one-before-the-first element and the last
318     // element. The back_index_ may be less than before_front_index_ if the
319     // elements wrap around the back of the array. If they are equal, then the
320     // Ring is empty.
321     size_t before_front_index_ = 0;
322     size_t back_index_ = 0;
323     std::unique_ptr<Ring> next_ = nullptr;
324   };
325 
326  public:
327   class Iterator {
328    public:
329     using value_type = T;
330     using pointer = const T*;
331     using reference = const T&;
332 
333     const T& operator->() const { return ring_->data_[index_]; }
334     const T& operator*() const { return ring_->data_[index_]; }
335 
336     Iterator& operator++() {
337       if (index_ == ring_->back_index_) {
338         ring_ = ring_->next_.get();
339         index_ =
340             ring_ ? ring_->CircularIncrement(ring_->before_front_index_) : 0;
341       } else {
342         index_ = ring_->CircularIncrement(index_);
343       }
344       return *this;
345     }
346 
347     operator bool() const { return !!ring_; }
348 
349    private:
Iterator(const Ring * ring)350     explicit Iterator(const Ring* ring) {
351       if (!ring || ring->empty()) {
352         ring_ = nullptr;
353         index_ = 0;
354         return;
355       }
356 
357       ring_ = ring;
358       index_ = ring_->CircularIncrement(ring->before_front_index_);
359     }
360 
361     raw_ptr<const Ring> ring_;
362     size_t index_;
363 
364     friend class LazilyDeallocatedDeque;
365   };
366 
begin()367   Iterator begin() const { return Iterator(head_.get()); }
368 
end()369   Iterator end() const { return Iterator(nullptr); }
370 
371  private:
372   // We maintain a list of Ring buffers, to enable us to grow without copying,
373   // but most of the time we aim to have only one active Ring.
374   std::unique_ptr<Ring> head_;
375 
376   // `tail_` is not a raw_ptr<...> for performance reasons (based on analysis of
377   // sampling profiler data and tab_search:top100:2020).
378   RAW_PTR_EXCLUSION Ring* tail_ = nullptr;
379 
380   size_t size_ = 0;
381   size_t max_size_ = 0;
382   TimeTicks next_resize_time_;
383 };
384 
385 }  // namespace internal
386 }  // namespace sequence_manager
387 }  // namespace base
388 
389 #endif  // BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
390