• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
18 #define ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
19 
20 #include <sys/mman.h>  // For the PROT_* and MAP_* constants.
21 
22 #include <algorithm>
23 #include <memory>
24 #include <string>
25 
26 #include <android-base/logging.h>
27 
28 #include "base/atomic.h"
29 #include "base/macros.h"
30 #include "base/mem_map.h"
31 #include "stack_reference.h"
32 
33 // This implements a double-ended queue (deque) with various flavors of PushBack operations,
34 // as well as PopBack and PopFront operations. We expect that all calls are performed
35 // by a single thread (normally the GC). There is one exception, which accounts for the
36 // name:
37 // - Multiple calls to AtomicPushBack*() and AtomicBumpBack() may be made concurrently,
38 // provided no other calls are made at the same time.
39 
40 namespace art {
41 namespace gc {
42 namespace accounting {
43 
44 // Internal representation is StackReference<T>, so this only works with mirror::Object or its
45 // subclasses.
46 template <typename T>
47 class AtomicStack {
48  public:
49   class ObjectComparator {
50    public:
51     // These two comparators are for std::binary_search.
operator()52     bool operator()(const T* a, const StackReference<T>& b) const NO_THREAD_SAFETY_ANALYSIS {
53       return a < b.AsMirrorPtr();
54     }
operator()55     bool operator()(const StackReference<T>& a, const T* b) const NO_THREAD_SAFETY_ANALYSIS {
56       return a.AsMirrorPtr() < b;
57     }
58     // This comparator is for std::sort.
operator()59     bool operator()(const StackReference<T>& a, const StackReference<T>& b) const
60         NO_THREAD_SAFETY_ANALYSIS {
61       return a.AsMirrorPtr() < b.AsMirrorPtr();
62     }
63   };
64 
65   // Capacity is how many elements we can store in the stack.
Create(const std::string & name,size_t growth_limit,size_t capacity)66   static AtomicStack* Create(const std::string& name, size_t growth_limit, size_t capacity) {
67     std::unique_ptr<AtomicStack> mark_stack(new AtomicStack(name, growth_limit, capacity));
68     mark_stack->Init();
69     return mark_stack.release();
70   }
71 
~AtomicStack()72   ~AtomicStack() {}
73 
Reset()74   void Reset() {
75     DCHECK(mem_map_.IsValid());
76     DCHECK(begin_ != nullptr);
77     front_index_.store(0, std::memory_order_relaxed);
78     back_index_.store(0, std::memory_order_relaxed);
79     debug_is_sorted_ = true;
80     mem_map_.MadviseDontNeedAndZero();
81   }
82 
83   // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
84 
85   // Returns false if we overflowed the stack.
AtomicPushBackIgnoreGrowthLimit(T * value)86   bool AtomicPushBackIgnoreGrowthLimit(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
87     return AtomicPushBackInternal(value, capacity_);
88   }
89 
90   // Returns false if we overflowed the stack.
AtomicPushBack(T * value)91   bool AtomicPushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
92     return AtomicPushBackInternal(value, growth_limit_);
93   }
94 
95   // Atomically bump the back index by the given number of
96   // slots. Returns false if we overflowed the stack.
AtomicBumpBack(size_t num_slots,StackReference<T> ** start_address,StackReference<T> ** end_address)97   bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address,
98                       StackReference<T>** end_address)
99       REQUIRES_SHARED(Locks::mutator_lock_) {
100     if (kIsDebugBuild) {
101       debug_is_sorted_ = false;
102     }
103     int32_t index;
104     int32_t new_index;
105     do {
106       index = back_index_.load(std::memory_order_relaxed);
107       new_index = index + num_slots;
108       if (UNLIKELY(static_cast<size_t>(new_index) >= growth_limit_)) {
109         // Stack overflow.
110         return false;
111       }
112     } while (!back_index_.CompareAndSetWeakRelaxed(index, new_index));
113     *start_address = begin_ + index;
114     *end_address = begin_ + new_index;
115     if (kIsDebugBuild) {
116       // Check that the memory is zero.
117       for (int32_t i = index; i < new_index; ++i) {
118         DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr))
119             << "i=" << i << " index=" << index << " new_index=" << new_index;
120       }
121     }
122     return true;
123   }
124 
AssertAllZero()125   void AssertAllZero() REQUIRES_SHARED(Locks::mutator_lock_) {
126     if (kIsDebugBuild) {
127       for (size_t i = 0; i < capacity_; ++i) {
128         DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i;
129       }
130     }
131   }
132 
133   // Bump the back index by the given number of slots. Returns false if this
134   // operation will overflow the stack. New elements should be written
135   // to [*start_address, *end_address).
BumpBack(size_t num_slots,StackReference<T> ** start_address,StackReference<T> ** end_address)136   bool BumpBack(size_t num_slots,
137                 StackReference<T>** start_address,
138                 StackReference<T>** end_address)
139       REQUIRES_SHARED(Locks::mutator_lock_) {
140     if (kIsDebugBuild) {
141       debug_is_sorted_ = false;
142     }
143     const int32_t index = back_index_.load(std::memory_order_relaxed);
144     const int32_t new_index = index + num_slots;
145     if (UNLIKELY(static_cast<size_t>(new_index) >= growth_limit_)) {
146       // Stack overflow.
147       return false;
148     }
149     back_index_.store(new_index, std::memory_order_relaxed);
150     *start_address = begin_ + index;
151     *end_address = begin_ + new_index;
152     if (kIsDebugBuild) {
153       // Check the memory is zero.
154       for (int32_t i = index; i < new_index; i++) {
155         DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr))
156             << "i=" << i << " index=" << index << " new_index=" << new_index;
157       }
158     }
159     return true;
160   }
161 
PushBack(T * value)162   void PushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
163     if (kIsDebugBuild) {
164       debug_is_sorted_ = false;
165     }
166     const int32_t index = back_index_.load(std::memory_order_relaxed);
167     DCHECK_LT(static_cast<size_t>(index), growth_limit_);
168     back_index_.store(index + 1, std::memory_order_relaxed);
169     begin_[index].Assign(value);
170   }
171 
PopBack()172   T* PopBack() REQUIRES_SHARED(Locks::mutator_lock_) {
173     DCHECK_GT(back_index_.load(std::memory_order_relaxed),
174               front_index_.load(std::memory_order_relaxed));
175     // Decrement the back index non atomically.
176     const int32_t index = back_index_.load(std::memory_order_relaxed) - 1;
177     back_index_.store(index, std::memory_order_relaxed);
178     T* ret = begin_[index].AsMirrorPtr();
179     // In debug builds we expect the stack elements to be null, which may not
180     // always be the case if the stack is being reused without resetting it
181     // in-between.
182     if (kIsDebugBuild) {
183       begin_[index].Clear();
184     }
185     return ret;
186   }
187 
188   // Take an item from the front of the stack.
PopFront()189   T PopFront() {
190     int32_t index = front_index_.load(std::memory_order_relaxed);
191     DCHECK_LT(index, back_index_.load(std::memory_order_relaxed));
192     front_index_.store(index + 1, std::memory_order_relaxed);
193     return begin_[index];
194   }
195 
196   // Pop a number of elements.
PopBackCount(int32_t n)197   void PopBackCount(int32_t n) {
198     DCHECK_GE(Size(), static_cast<size_t>(n));
199     back_index_.store(back_index_.load(std::memory_order_relaxed) - n, std::memory_order_relaxed);
200   }
201 
IsEmpty()202   bool IsEmpty() const {
203     return Size() == 0;
204   }
205 
IsFull()206   bool IsFull() const {
207     return Size() == growth_limit_;
208   }
209 
Size()210   size_t Size() const {
211     DCHECK_LE(front_index_.load(std::memory_order_relaxed),
212               back_index_.load(std::memory_order_relaxed));
213     return
214         back_index_.load(std::memory_order_relaxed) - front_index_.load(std::memory_order_relaxed);
215   }
216 
Begin()217   StackReference<T>* Begin() const {
218     return begin_ + front_index_.load(std::memory_order_relaxed);
219   }
End()220   StackReference<T>* End() const {
221     return begin_ + back_index_.load(std::memory_order_relaxed);
222   }
223 
Capacity()224   size_t Capacity() const {
225     return capacity_;
226   }
227 
228   // Will clear the stack.
Resize(size_t new_capacity)229   void Resize(size_t new_capacity) {
230     capacity_ = new_capacity;
231     growth_limit_ = new_capacity;
232     Init();
233   }
234 
Sort()235   void Sort() {
236     int32_t start_back_index = back_index_.load(std::memory_order_relaxed);
237     int32_t start_front_index = front_index_.load(std::memory_order_relaxed);
238     std::sort(Begin(), End(), ObjectComparator());
239     CHECK_EQ(start_back_index, back_index_.load(std::memory_order_relaxed));
240     CHECK_EQ(start_front_index, front_index_.load(std::memory_order_relaxed));
241     if (kIsDebugBuild) {
242       debug_is_sorted_ = true;
243     }
244   }
245 
ContainsSorted(const T * value)246   bool ContainsSorted(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) {
247     DCHECK(debug_is_sorted_);
248     return std::binary_search(Begin(), End(), value, ObjectComparator());
249   }
250 
Contains(const T * value)251   bool Contains(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) {
252     for (auto cur = Begin(), end = End(); cur != end; ++cur) {
253       if (cur->AsMirrorPtr() == value) {
254         return true;
255       }
256     }
257     return false;
258   }
259 
260  private:
AtomicStack(const std::string & name,size_t growth_limit,size_t capacity)261   AtomicStack(const std::string& name, size_t growth_limit, size_t capacity)
262       : name_(name),
263         back_index_(0),
264         front_index_(0),
265         begin_(nullptr),
266         growth_limit_(growth_limit),
267         capacity_(capacity),
268         debug_is_sorted_(true) {
269   }
270 
271   // Returns false if we overflowed the stack.
AtomicPushBackInternal(T * value,size_t limit)272   bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE
273       REQUIRES_SHARED(Locks::mutator_lock_) {
274     if (kIsDebugBuild) {
275       debug_is_sorted_ = false;
276     }
277     int32_t index;
278     do {
279       index = back_index_.load(std::memory_order_relaxed);
280       if (UNLIKELY(static_cast<size_t>(index) >= limit)) {
281         // Stack overflow.
282         return false;
283       }
284     } while (!back_index_.CompareAndSetWeakRelaxed(index, index + 1));
285     begin_[index].Assign(value);
286     return true;
287   }
288 
289   // Size in number of elements.
Init()290   void Init() {
291     std::string error_msg;
292     mem_map_ = MemMap::MapAnonymous(name_.c_str(),
293                                     capacity_ * sizeof(begin_[0]),
294                                     PROT_READ | PROT_WRITE,
295                                     /*low_4gb=*/ false,
296                                     &error_msg);
297     CHECK(mem_map_.IsValid()) << "couldn't allocate mark stack.\n" << error_msg;
298     uint8_t* addr = mem_map_.Begin();
299     CHECK(addr != nullptr);
300     debug_is_sorted_ = true;
301     begin_ = reinterpret_cast<StackReference<T>*>(addr);
302     Reset();
303   }
304 
305   // Name of the mark stack.
306   std::string name_;
307   // Memory mapping of the atomic stack.
308   MemMap mem_map_;
309   // Back index (index after the last element pushed).
310   AtomicInteger back_index_;
311   // Front index, used for implementing PopFront.
312   AtomicInteger front_index_;
313   // Base of the atomic stack.
314   StackReference<T>* begin_;
315   // Current maximum which we can push back to, must be <= capacity_.
316   size_t growth_limit_;
317   // Maximum number of elements.
318   size_t capacity_;
319   // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead.
320   bool debug_is_sorted_;
321 
322   DISALLOW_COPY_AND_ASSIGN(AtomicStack);
323 };
324 
325 using ObjectStack = AtomicStack<mirror::Object>;
326 
327 }  // namespace accounting
328 }  // namespace gc
329 }  // namespace art
330 
331 #endif  // ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
332