• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
6 #define BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
7 
8 #include <algorithm>
9 #include <cmath>
10 #include <memory>
11 #include <vector>
12 
13 #include "base/gtest_prod_util.h"
14 #include "base/logging.h"
15 #include "base/time/time.h"
16 
17 namespace base {
18 namespace sequence_manager {
19 namespace internal {
20 
21 // A LazilyDeallocatedDeque specialized for the SequenceManager's usage
22 // patterns. The queue generally grows while tasks are added and then removed
23 // until empty and the cycle repeats.
24 //
25 // The main difference between sequence_manager::LazilyDeallocatedDeque and
26 // others is memory management.  For performance (memory allocation isn't free)
27 // we don't automatically reclaiming memory when the queue becomes empty.
28 // Instead we rely on the surrounding code periodically calling
29 // MaybeShrinkQueue, ideally when the queue is empty.
30 //
31 // We keep track of the maximum recent queue size and rate limit
32 // MaybeShrinkQueue to avoid unnecessary churn.
33 //
34 // NB this queue isn't by itself thread safe.
35 template <typename T>
36 class LazilyDeallocatedDeque {
37  public:
38   enum {
39     // Minimum allocation for a ring. Note a ring of size 4 will only hold up to
40     // 3 elements.
41     kMinimumRingSize = 4,
42 
43     // Maximum "wasted" capacity allowed when considering if we should resize
44     // the backing store.
45     kReclaimThreshold = 16,
46 
47     // Used to rate limit how frequently MaybeShrinkQueue actually shrinks the
48     // queue.
49     kMinimumShrinkIntervalInSeconds = 5
50   };
51 
LazilyDeallocatedDeque()52   LazilyDeallocatedDeque() {}
53 
~LazilyDeallocatedDeque()54   ~LazilyDeallocatedDeque() { clear(); }
55 
empty()56   bool empty() const { return size_ == 0; }
57 
max_size()58   size_t max_size() const { return max_size_; }
59 
size()60   size_t size() const { return size_; }
61 
capacity()62   size_t capacity() const {
63     size_t capacity = 0;
64     for (const Ring* iter = head_.get(); iter; iter = iter->next_.get()) {
65       capacity += iter->capacity();
66     }
67     return capacity;
68   }
69 
clear()70   void clear() {
71     while (head_) {
72       head_ = std::move(head_->next_);
73     }
74 
75     tail_ = nullptr;
76     size_ = 0;
77   }
78 
79   // Assumed to be an uncommon operation.
push_front(T t)80   void push_front(T t) {
81     if (!head_) {
82       head_ = std::make_unique<Ring>(kMinimumRingSize);
83       tail_ = head_.get();
84     }
85 
86     // Grow if needed, by the minimum amount.
87     if (!head_->CanPush()) {
88       std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(kMinimumRingSize);
89       new_ring->next_ = std::move(head_);
90       head_ = std::move(new_ring);
91     }
92 
93     head_->push_front(std::move(t));
94     max_size_ = std::max(max_size_, ++size_);
95   }
96 
97   // Assumed to be a common operation.
push_back(T t)98   void push_back(T t) {
99     if (!head_) {
100       head_ = std::make_unique<Ring>(kMinimumRingSize);
101       tail_ = head_.get();
102     }
103 
104     // Grow if needed.
105     if (!tail_->CanPush()) {
106       tail_->next_ = std::make_unique<Ring>(tail_->capacity() * 2);
107       tail_ = tail_->next_.get();
108     }
109 
110     tail_->push_back(std::move(t));
111     max_size_ = std::max(max_size_, ++size_);
112   }
113 
front()114   T& front() {
115     DCHECK(head_);
116     return head_->front();
117   }
118 
front()119   const T& front() const {
120     DCHECK(head_);
121     return head_->front();
122   }
123 
back()124   T& back() {
125     DCHECK(tail_);
126     return tail_->back();
127   }
128 
back()129   const T& back() const {
130     DCHECK(tail_);
131     return tail_->back();
132   }
133 
pop_front()134   void pop_front() {
135     DCHECK(tail_);
136     DCHECK_GT(size_, 0u);
137     head_->pop_front();
138 
139     // If the ring has become empty and we have several rings then, remove the
140     // head one (which we expect to have lower capacity than the remaining
141     // ones).
142     if (head_->empty() && head_->next_) {
143       head_ = std::move(head_->next_);
144     }
145 
146     --size_;
147   }
148 
swap(LazilyDeallocatedDeque & other)149   void swap(LazilyDeallocatedDeque& other) {
150     std::swap(head_, other.head_);
151     std::swap(tail_, other.tail_);
152     std::swap(size_, other.size_);
153     std::swap(max_size_, other.max_size_);
154     std::swap(next_resize_time_, other.next_resize_time_);
155   }
156 
MaybeShrinkQueue()157   void MaybeShrinkQueue() {
158     if (!tail_)
159       return;
160 
161     DCHECK_GE(max_size_, size_);
162 
163     // Rate limit how often we shrink the queue because it's somewhat expensive.
164     TimeTicks current_time = TimeTicks::Now();
165     if (current_time < next_resize_time_)
166       return;
167 
168     // Due to the way the Ring works we need 1 more slot than is used.
169     size_t new_capacity = max_size_ + 1;
170     if (new_capacity < kMinimumRingSize)
171       new_capacity = kMinimumRingSize;
172 
173     // Reset |max_size_| so that unless usage has spiked up we will consider
174     // reclaiming it next time.
175     max_size_ = size_;
176 
177     // Only realloc if the current capacity is sufficiently the observed maximum
178     // size for the previous period.
179     if (new_capacity + kReclaimThreshold >= capacity())
180       return;
181 
182     SetCapacity(new_capacity);
183     next_resize_time_ =
184         current_time + TimeDelta::FromSeconds(kMinimumShrinkIntervalInSeconds);
185   }
186 
SetCapacity(size_t new_capacity)187   void SetCapacity(size_t new_capacity) {
188     std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(new_capacity);
189 
190     DCHECK_GE(new_capacity, size_ + 1);
191 
192     // Preserve the |size_| which counts down to zero in the while loop.
193     size_t real_size = size_;
194 
195     while (!empty()) {
196       DCHECK(new_ring->CanPush());
197       new_ring->push_back(std::move(head_->front()));
198       pop_front();
199     }
200 
201     size_ = real_size;
202 
203     DCHECK_EQ(head_.get(), tail_);
204     head_ = std::move(new_ring);
205     tail_ = head_.get();
206   }
207 
208  private:
209   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushFront);
210   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushBack);
211   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingCanPush);
212   FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushPopPushPop);
213 
214   struct Ring {
RingRing215     explicit Ring(size_t capacity)
216         : capacity_(capacity),
217           front_index_(0),
218           back_index_(0),
219           data_(reinterpret_cast<T*>(new char[sizeof(T) * capacity])),
220           next_(nullptr) {
221       DCHECK_GE(capacity_, kMinimumRingSize);
222     }
223 
~RingRing224     ~Ring() {
225       while (!empty()) {
226         pop_front();
227       }
228       delete[] reinterpret_cast<char*>(data_);
229     }
230 
emptyRing231     bool empty() const { return back_index_ == front_index_; }
232 
capacityRing233     size_t capacity() const { return capacity_; }
234 
CanPushRing235     bool CanPush() const {
236       return front_index_ != CircularIncrement(back_index_);
237     }
238 
push_frontRing239     void push_front(T&& t) {
240       // Mustn't appear to become empty.
241       DCHECK_NE(CircularDecrement(front_index_), back_index_);
242       new (&data_[front_index_]) T(std::move(t));
243       front_index_ = CircularDecrement(front_index_);
244     }
245 
push_backRing246     void push_back(T&& t) {
247       back_index_ = CircularIncrement(back_index_);
248       DCHECK(!empty());  // Mustn't appear to become empty.
249       new (&data_[back_index_]) T(std::move(t));
250     }
251 
CanPopRing252     bool CanPop() const { return front_index_ != back_index_; }
253 
pop_frontRing254     void pop_front() {
255       DCHECK(!empty());
256       front_index_ = CircularIncrement(front_index_);
257       data_[front_index_].~T();
258     }
259 
frontRing260     T& front() {
261       DCHECK(!empty());
262       return data_[CircularIncrement(front_index_)];
263     }
264 
frontRing265     const T& front() const {
266       DCHECK(!empty());
267       return data_[CircularIncrement(front_index_)];
268     }
269 
backRing270     T& back() {
271       DCHECK(!empty());
272       return data_[back_index_];
273     }
274 
backRing275     const T& back() const {
276       DCHECK(!empty());
277       return data_[back_index_];
278     }
279 
CircularDecrementRing280     size_t CircularDecrement(size_t index) const {
281       if (index == 0)
282         return capacity_ - 1;
283       return index - 1;
284     }
285 
CircularIncrementRing286     size_t CircularIncrement(size_t index) const {
287       DCHECK_LT(index, capacity_);
288       ++index;
289       if (index == capacity_)
290         return 0;
291       return index;
292     }
293 
294     size_t capacity_;
295     size_t front_index_;
296     size_t back_index_;
297     T* data_;
298     std::unique_ptr<Ring> next_;
299 
300     DISALLOW_COPY_AND_ASSIGN(Ring);
301   };
302 
303  public:
304   class Iterator {
305    public:
306     using value_type = T;
307     using pointer = const T*;
308     using reference = const T&;
309 
310     const T& operator->() const { return ring_->data_[index_]; }
311     const T& operator*() const { return ring_->data_[index_]; }
312 
313     Iterator& operator++() {
314       if (index_ == ring_->back_index_) {
315         ring_ = ring_->next_.get();
316         index_ = 0;
317       } else {
318         index_ = ring_->CircularIncrement(index_);
319       }
320       return *this;
321     }
322 
323     operator bool() const { return !!ring_; }
324 
325    private:
Iterator(const Ring * ring)326     explicit Iterator(const Ring* ring) {
327       if (!ring || ring->empty()) {
328         ring_ = nullptr;
329         index_ = 0;
330         return;
331       }
332 
333       ring_ = ring;
334       index_ = ring_->CircularIncrement(ring->front_index_);
335     }
336 
337     const Ring* ring_;
338     size_t index_;
339 
340     friend class LazilyDeallocatedDeque;
341   };
342 
begin()343   Iterator begin() const { return Iterator(head_.get()); }
344 
end()345   Iterator end() const { return Iterator(nullptr); }
346 
347  private:
348   // We maintain a list of Ring buffers, to enable us to grow without copying,
349   // but most of the time we aim to have only one active Ring.
350   std::unique_ptr<Ring> head_;
351   Ring* tail_ = nullptr;
352 
353   size_t size_ = 0;
354   size_t max_size_ = 0;
355   TimeTicks next_resize_time_;
356 
357   DISALLOW_COPY_AND_ASSIGN(LazilyDeallocatedDeque);
358 };
359 
360 }  // namespace internal
361 }  // namespace sequence_manager
362 }  // namespace base
363 
364 #endif  // BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
365