• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_ALLOCATION_RECORD_H_
18 #define ART_RUNTIME_GC_ALLOCATION_RECORD_H_
19 
20 #include <list>
21 #include <memory>
22 
23 #include "base/mutex.h"
24 #include "gc_root.h"
25 #include "obj_ptr.h"
26 
27 namespace art {
28 
29 class ArtMethod;
30 class IsMarkedVisitor;
31 class Thread;
32 
33 namespace mirror {
34 class Class;
35 class Object;
36 }  // namespace mirror
37 
38 namespace gc {
39 
40 class AllocRecordStackTraceElement {
41  public:
42   int32_t ComputeLineNumber() const REQUIRES_SHARED(Locks::mutator_lock_);
43 
44   AllocRecordStackTraceElement() = default;
AllocRecordStackTraceElement(ArtMethod * method,uint32_t dex_pc)45   AllocRecordStackTraceElement(ArtMethod* method, uint32_t dex_pc)
46       : method_(method),
47         dex_pc_(dex_pc) {}
48 
GetMethod()49   ArtMethod* GetMethod() const {
50     return method_;
51   }
52 
SetMethod(ArtMethod * m)53   void SetMethod(ArtMethod* m) {
54     method_ = m;
55   }
56 
GetDexPc()57   uint32_t GetDexPc() const {
58     return dex_pc_;
59   }
60 
SetDexPc(uint32_t pc)61   void SetDexPc(uint32_t pc) {
62     dex_pc_ = pc;
63   }
64 
65   bool operator==(const AllocRecordStackTraceElement& other) const {
66     return method_ == other.method_ && dex_pc_ == other.dex_pc_;
67   }
68 
69  private:
70   ArtMethod* method_ = nullptr;
71   uint32_t dex_pc_ = 0;
72 };
73 
74 class AllocRecordStackTrace {
75  public:
76   static constexpr size_t kHashMultiplier = 17;
77 
78   AllocRecordStackTrace() = default;
79 
AllocRecordStackTrace(AllocRecordStackTrace && r)80   AllocRecordStackTrace(AllocRecordStackTrace&& r)
81       : tid_(r.tid_),
82         stack_(std::move(r.stack_)) {}
83 
AllocRecordStackTrace(const AllocRecordStackTrace & r)84   AllocRecordStackTrace(const AllocRecordStackTrace& r)
85       : tid_(r.tid_),
86         stack_(r.stack_) {}
87 
GetTid()88   pid_t GetTid() const {
89     return tid_;
90   }
91 
SetTid(pid_t t)92   void SetTid(pid_t t) {
93     tid_ = t;
94   }
95 
GetDepth()96   size_t GetDepth() const {
97     return stack_.size();
98   }
99 
GetStackElement(size_t index)100   const AllocRecordStackTraceElement& GetStackElement(size_t index) const {
101     DCHECK_LT(index, GetDepth());
102     return stack_[index];
103   }
104 
AddStackElement(const AllocRecordStackTraceElement & element)105   void AddStackElement(const AllocRecordStackTraceElement& element) {
106     stack_.push_back(element);
107   }
108 
SetStackElementAt(size_t index,ArtMethod * m,uint32_t dex_pc)109   void SetStackElementAt(size_t index, ArtMethod* m, uint32_t dex_pc) {
110     DCHECK_LT(index, stack_.size());
111     stack_[index].SetMethod(m);
112     stack_[index].SetDexPc(dex_pc);
113   }
114 
115   bool operator==(const AllocRecordStackTrace& other) const {
116     if (this == &other) return true;
117     return tid_ == other.tid_ && stack_ == other.stack_;
118   }
119 
120  private:
121   pid_t tid_ = 0;
122   std::vector<AllocRecordStackTraceElement> stack_;
123 };
124 
125 struct HashAllocRecordTypes {
operatorHashAllocRecordTypes126   size_t operator()(const AllocRecordStackTraceElement& r) const {
127     return std::hash<void*>()(reinterpret_cast<void*>(r.GetMethod())) *
128         AllocRecordStackTrace::kHashMultiplier + std::hash<uint32_t>()(r.GetDexPc());
129   }
130 
operatorHashAllocRecordTypes131   size_t operator()(const AllocRecordStackTrace& r) const {
132     size_t depth = r.GetDepth();
133     size_t result = r.GetTid() * AllocRecordStackTrace::kHashMultiplier + depth;
134     for (size_t i = 0; i < depth; ++i) {
135       result = result * AllocRecordStackTrace::kHashMultiplier + (*this)(r.GetStackElement(i));
136     }
137     return result;
138   }
139 };
140 
141 template <typename T> struct HashAllocRecordTypesPtr {
operatorHashAllocRecordTypesPtr142   size_t operator()(const T* r) const {
143     if (r == nullptr) return 0;
144     return HashAllocRecordTypes()(*r);
145   }
146 };
147 
148 template <typename T> struct EqAllocRecordTypesPtr {
operatorEqAllocRecordTypesPtr149   bool operator()(const T* r1, const T* r2) const {
150     if (r1 == r2) return true;
151     if (r1 == nullptr || r2 == nullptr) return false;
152     return *r1 == *r2;
153   }
154 };
155 
156 class AllocRecord {
157  public:
158   // All instances of AllocRecord should be managed by an instance of AllocRecordObjectMap.
AllocRecord(size_t count,mirror::Class * klass,AllocRecordStackTrace && trace)159   AllocRecord(size_t count, mirror::Class* klass, AllocRecordStackTrace&& trace)
160       : byte_count_(count), klass_(klass), trace_(std::move(trace)) {}
161 
GetDepth()162   size_t GetDepth() const {
163     return trace_.GetDepth();
164   }
165 
GetStackTrace()166   const AllocRecordStackTrace* GetStackTrace() const {
167     return &trace_;
168   }
169 
ByteCount()170   size_t ByteCount() const {
171     return byte_count_;
172   }
173 
GetTid()174   pid_t GetTid() const {
175     return trace_.GetTid();
176   }
177 
GetClass()178   mirror::Class* GetClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
179     return klass_.Read();
180   }
181 
182   const char* GetClassDescriptor(std::string* storage) const
183       REQUIRES_SHARED(Locks::mutator_lock_);
184 
GetClassGcRoot()185   GcRoot<mirror::Class>& GetClassGcRoot() REQUIRES_SHARED(Locks::mutator_lock_) {
186     return klass_;
187   }
188 
StackElement(size_t index)189   const AllocRecordStackTraceElement& StackElement(size_t index) const {
190     return trace_.GetStackElement(index);
191   }
192 
193  private:
194   const size_t byte_count_;
195   // The klass_ could be a strong or weak root for GC
196   GcRoot<mirror::Class> klass_;
197   // TODO: Share between alloc records with identical stack traces.
198   AllocRecordStackTrace trace_;
199 };
200 
201 class AllocRecordObjectMap {
202  public:
203   static constexpr size_t kDefaultNumAllocRecords = 512 * 1024;
204   static constexpr size_t kDefaultNumRecentRecords = 64 * 1024 - 1;
205   static constexpr size_t kDefaultAllocStackDepth = 16;
206   static constexpr size_t kMaxSupportedStackDepth = 128;
207 
208   // GcRoot<mirror::Object> pointers in the list are weak roots, and the last recent_record_max_
209   // number of AllocRecord::klass_ pointers are strong roots (and the rest of klass_ pointers are
210   // weak roots). The last recent_record_max_ number of pairs in the list are always kept for DDMS's
211   // recent allocation tracking, but GcRoot<mirror::Object> pointers in these pairs can become null.
212   // Both types of pointers need read barriers, do not directly access them.
213   using EntryPair = std::pair<GcRoot<mirror::Object>, AllocRecord>;
214   typedef std::list<EntryPair> EntryList;
215 
216   // Caller needs to check that it is enabled before calling since we read the stack trace before
217   // checking the enabled boolean.
218   void RecordAllocation(Thread* self,
219                         ObjPtr<mirror::Object>* obj,
220                         size_t byte_count)
221       REQUIRES(!Locks::alloc_tracker_lock_)
222       REQUIRES_SHARED(Locks::mutator_lock_);
223 
224   static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
225 
226   AllocRecordObjectMap() REQUIRES(Locks::alloc_tracker_lock_);
227   ~AllocRecordObjectMap();
228 
Put(mirror::Object * obj,AllocRecord && record)229   void Put(mirror::Object* obj, AllocRecord&& record)
230       REQUIRES_SHARED(Locks::mutator_lock_)
231       REQUIRES(Locks::alloc_tracker_lock_) {
232     if (entries_.size() == alloc_record_max_) {
233       entries_.pop_front();
234     }
235     entries_.push_back(EntryPair(GcRoot<mirror::Object>(obj), std::move(record)));
236   }
237 
Size()238   size_t Size() const REQUIRES_SHARED(Locks::alloc_tracker_lock_) {
239     return entries_.size();
240   }
241 
GetRecentAllocationSize()242   size_t GetRecentAllocationSize() const REQUIRES_SHARED(Locks::alloc_tracker_lock_) {
243     CHECK_LE(recent_record_max_, alloc_record_max_);
244     size_t sz = entries_.size();
245     return std::min(recent_record_max_, sz);
246   }
247 
248   void VisitRoots(RootVisitor* visitor)
249       REQUIRES_SHARED(Locks::mutator_lock_)
250       REQUIRES(Locks::alloc_tracker_lock_);
251 
252   void SweepAllocationRecords(IsMarkedVisitor* visitor)
253       REQUIRES_SHARED(Locks::mutator_lock_)
254       REQUIRES(Locks::alloc_tracker_lock_);
255 
256   // Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and
257   // AllowNewAllocationRecords(), in which case new allocation records can be added although they
258   // should be disallowed. However, this is GC-safe because new objects are not processed in this GC
259   // cycle. The only downside of not handling this case is that such new allocation records can be
260   // swept from the list. But missing the first few records is acceptable for using the button to
261   // enable allocation tracking.
262   void DisallowNewAllocationRecords()
263       REQUIRES_SHARED(Locks::mutator_lock_)
264       REQUIRES(Locks::alloc_tracker_lock_);
265   void AllowNewAllocationRecords()
266       REQUIRES_SHARED(Locks::mutator_lock_)
267       REQUIRES(Locks::alloc_tracker_lock_);
268   void BroadcastForNewAllocationRecords()
269       REQUIRES(Locks::alloc_tracker_lock_);
270 
271   // TODO: Is there a better way to hide the entries_'s type?
Begin()272   EntryList::iterator Begin()
273       REQUIRES_SHARED(Locks::mutator_lock_)
274       REQUIRES(Locks::alloc_tracker_lock_) {
275     return entries_.begin();
276   }
277 
End()278   EntryList::iterator End()
279       REQUIRES_SHARED(Locks::mutator_lock_)
280       REQUIRES(Locks::alloc_tracker_lock_) {
281     return entries_.end();
282   }
283 
RBegin()284   EntryList::reverse_iterator RBegin()
285       REQUIRES_SHARED(Locks::mutator_lock_)
286       REQUIRES(Locks::alloc_tracker_lock_) {
287     return entries_.rbegin();
288   }
289 
REnd()290   EntryList::reverse_iterator REnd()
291       REQUIRES_SHARED(Locks::mutator_lock_)
292       REQUIRES(Locks::alloc_tracker_lock_) {
293     return entries_.rend();
294   }
295 
296   void Clear() REQUIRES(Locks::alloc_tracker_lock_);
297 
298  private:
299   size_t alloc_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumAllocRecords;
300   size_t recent_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumRecentRecords;
301   size_t max_stack_depth_ = kDefaultAllocStackDepth;
302   pid_t alloc_ddm_thread_id_  GUARDED_BY(Locks::alloc_tracker_lock_) = 0;
303   bool allow_new_record_ GUARDED_BY(Locks::alloc_tracker_lock_) = true;
304   ConditionVariable new_record_condition_ GUARDED_BY(Locks::alloc_tracker_lock_);
305   // see the comment in typedef of EntryList
306   EntryList entries_ GUARDED_BY(Locks::alloc_tracker_lock_);
307 
308   void SetMaxStackDepth(size_t max_stack_depth) REQUIRES(Locks::alloc_tracker_lock_);
309 };
310 
311 }  // namespace gc
312 }  // namespace art
313 #endif  // ART_RUNTIME_GC_ALLOCATION_RECORD_H_
314