• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "allocation_record.h"
18 
19 #include "art_method-inl.h"
20 #include "base/logging.h"  // For VLOG
21 #include "base/pointer_size.h"
22 #include "base/stl_util.h"
23 #include "instrumentation.h"
24 #include "obj_ptr-inl.h"
25 #include "object_callbacks.h"
26 #include "stack.h"
27 #include "thread-inl.h"  // For GetWeakRefAccessEnabled().
28 
29 #include <android-base/properties.h>
30 
31 namespace art HIDDEN {
32 namespace gc {
33 
ComputeLineNumber() const34 int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
35   DCHECK(method_ != nullptr);
36   int32_t line_number = method_->GetLineNumFromDexPC(dex_pc_);
37   if (line_number == -1 && !method_->IsProxyMethod()) {
38     // If we failed to map the dex pc to a line number, then most probably there is no debug info.
39     // Make the line_number same as the dex pc - it can be decoded later using a map file.
40     // See b/30183883 and b/228000954.
41     line_number = static_cast<int32_t>(dex_pc_);
42   }
43   return line_number;
44 }
45 
GetClassDescriptor(std::string * storage) const46 const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
47   // klass_ could contain null only if we implement class unloading.
48   return klass_.IsNull() ? "null" : klass_.Read()->GetDescriptor(storage);
49 }
50 
SetMaxStackDepth(size_t max_stack_depth)51 void AllocRecordObjectMap::SetMaxStackDepth(size_t max_stack_depth) {
52   // Log fatal since this should already be checked when calling VMDebug.setAllocTrackerStackDepth.
53   CHECK_LE(max_stack_depth, kMaxSupportedStackDepth)
54       << "Allocation record max stack depth is too large";
55   max_stack_depth_ = max_stack_depth;
56 }
57 
~AllocRecordObjectMap()58 AllocRecordObjectMap::~AllocRecordObjectMap() {
59   Clear();
60 }
61 
VisitRoots(RootVisitor * visitor)62 void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
63   // When we are compacting in userfaultfd GC, the class GC-roots are already
64   // updated in SweepAllocationRecords()->SweepClassObject().
65   if (Runtime::Current()->GetHeap()->IsPerformingUffdCompaction()) {
66     return;
67   }
68   CHECK_LE(recent_record_max_, alloc_record_max_);
69   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
70   size_t count = recent_record_max_;
71   // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
72   // klass_ fields as strong roots.
73   for (auto it = entries_.rbegin(), end = entries_.rend(); it != end; ++it) {
74     AllocRecord& record = it->second;
75     if (count > 0) {
76       buffered_visitor.VisitRootIfNonNull(record.GetClassGcRoot());
77       --count;
78     }
79     // Visit all of the stack frames to make sure no methods in the stack traces get unloaded by
80     // class unloading.
81     for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
82       const AllocRecordStackTraceElement& element = record.StackElement(i);
83       DCHECK(element.GetMethod() != nullptr);
84       element.GetMethod()->VisitRoots(buffered_visitor, kRuntimePointerSize);
85     }
86   }
87 }
88 
SweepClassObject(AllocRecord * record,IsMarkedVisitor * visitor)89 static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
90     REQUIRES_SHARED(Locks::mutator_lock_)
91     REQUIRES(Locks::alloc_tracker_lock_) {
92   GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
93   // This does not need a read barrier because this is called by GC.
94   mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
95   if (old_object != nullptr) {
96     // The class object can become null if we implement class unloading.
97     // In that case we might still want to keep the class name string (not implemented).
98     mirror::Object* new_object = visitor->IsMarked(old_object);
99     DCHECK(new_object != nullptr);
100     if (UNLIKELY(old_object != new_object)) {
101       // We can't use AsClass() as it uses IsClass in a DCHECK, which expects
102       // the class' contents to be there. This is not the case in userfaultfd
103       // GC.
104       klass = GcRoot<mirror::Class>(ObjPtr<mirror::Class>::DownCast(new_object));
105     }
106   }
107 }
108 
SweepAllocationRecords(IsMarkedVisitor * visitor)109 void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
110   VLOG(heap) << "Start SweepAllocationRecords()";
111   size_t count_deleted = 0, count_moved = 0, count = 0;
112   // Only the first (size - recent_record_max_) number of records can be deleted.
113   const size_t delete_bound = std::max(entries_.size(), recent_record_max_) - recent_record_max_;
114   for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
115     ++count;
116     // This does not need a read barrier because this is called by GC.
117     mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
118     AllocRecord& record = it->second;
119     mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
120     if (new_object == nullptr) {
121       if (count > delete_bound) {
122         it->first = GcRoot<mirror::Object>(nullptr);
123         SweepClassObject(&record, visitor);
124         ++it;
125       } else {
126         it = entries_.erase(it);
127         ++count_deleted;
128       }
129     } else {
130       if (old_object != new_object) {
131         it->first = GcRoot<mirror::Object>(new_object);
132         ++count_moved;
133       }
134       SweepClassObject(&record, visitor);
135       ++it;
136     }
137   }
138   VLOG(heap) << "Deleted " << count_deleted << " allocation records";
139   VLOG(heap) << "Updated " << count_moved << " allocation records";
140 }
141 
AllowNewAllocationRecords()142 void AllocRecordObjectMap::AllowNewAllocationRecords() {
143   CHECK(!gUseReadBarrier);
144   allow_new_record_ = true;
145   new_record_condition_.Broadcast(Thread::Current());
146 }
147 
DisallowNewAllocationRecords()148 void AllocRecordObjectMap::DisallowNewAllocationRecords() {
149   CHECK(!gUseReadBarrier);
150   allow_new_record_ = false;
151 }
152 
BroadcastForNewAllocationRecords()153 void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
154   new_record_condition_.Broadcast(Thread::Current());
155 }
156 
SetAllocTrackingEnabled(bool enable)157 void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
158   Thread* self = Thread::Current();
159   Heap* heap = Runtime::Current()->GetHeap();
160   if (enable) {
161     {
162       MutexLock mu(self, *Locks::alloc_tracker_lock_);
163       if (heap->IsAllocTrackingEnabled()) {
164         return;  // Already enabled, bail.
165       }
166       AllocRecordObjectMap* records = heap->GetAllocationRecords();
167       if (records == nullptr) {
168         records = new AllocRecordObjectMap;
169         heap->SetAllocationRecords(records);
170       }
171       CHECK(records != nullptr);
172       records->SetMaxStackDepth(heap->GetAllocTrackerStackDepth());
173       size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
174                   sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
175       LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
176                 << records->max_stack_depth_ << " frames, taking up to "
177                 << PrettySize(sz * records->alloc_record_max_) << ")";
178     }
179     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
180     {
181       MutexLock mu(self, *Locks::alloc_tracker_lock_);
182       heap->SetAllocTrackingEnabled(true);
183     }
184   } else {
185     // Delete outside of the critical section to avoid possible lock violations like the runtime
186     // shutdown lock.
187     {
188       MutexLock mu(self, *Locks::alloc_tracker_lock_);
189       if (!heap->IsAllocTrackingEnabled()) {
190         return;  // Already disabled, bail.
191       }
192       heap->SetAllocTrackingEnabled(false);
193       LOG(INFO) << "Disabling alloc tracker";
194       AllocRecordObjectMap* records = heap->GetAllocationRecords();
195       records->Clear();
196     }
197     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
198     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
199   }
200 }
201 
RecordAllocation(Thread * self,ObjPtr<mirror::Object> * obj,size_t byte_count)202 void AllocRecordObjectMap::RecordAllocation(Thread* self,
203                                             ObjPtr<mirror::Object>* obj,
204                                             size_t byte_count) {
205   // Get stack trace outside of lock in case there are allocations during the stack walk.
206   // b/27858645.
207   AllocRecordStackTrace trace;
208   {
209     StackHandleScope<1> hs(self);
210     auto obj_wrapper = hs.NewHandleWrapper(obj);
211 
212     StackVisitor::WalkStack(
213         [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
214           if (trace.GetDepth() >= max_stack_depth_) {
215             return false;
216           }
217           ArtMethod* m = stack_visitor->GetMethod();
218           // m may be null if we have inlined methods of unresolved classes. b/27858645
219           if (m != nullptr && !m->IsRuntimeMethod()) {
220             m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
221             trace.AddStackElement(AllocRecordStackTraceElement(m, stack_visitor->GetDexPc()));
222           }
223           return true;
224         },
225         self,
226         /* context= */ nullptr,
227         art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
228   }
229 
230   MutexLock mu(self, *Locks::alloc_tracker_lock_);
231   Heap* const heap = Runtime::Current()->GetHeap();
232   if (!heap->IsAllocTrackingEnabled()) {
233     // In the process of shutting down recording, bail.
234     return;
235   }
236 
237   // TODO Skip recording allocations associated with DDMS. This was a feature of the old debugger
238   // but when we switched to the JVMTI based debugger the feature was (unintentionally) broken.
239   // Since nobody seemed to really notice or care it might not be worth the trouble.
240 
241   // Wait for GC's sweeping to complete and allow new records.
242   while (UNLIKELY((!gUseReadBarrier && !allow_new_record_) ||
243                   (gUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
244     // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
245     // presence of threads blocking for weak ref access.
246     self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
247     new_record_condition_.WaitHoldingLocks(self);
248   }
249 
250   if (!heap->IsAllocTrackingEnabled()) {
251     // Return if the allocation tracking has been disabled while waiting for system weak access
252     // above.
253     return;
254   }
255 
256   DCHECK_LE(Size(), alloc_record_max_);
257 
258   // Erase extra unfilled elements.
259   trace.SetTid(self->GetTid());
260 
261   // Add the record.
262   Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
263   DCHECK_LE(Size(), alloc_record_max_);
264 }
265 
Clear()266 void AllocRecordObjectMap::Clear() {
267   entries_.clear();
268 }
269 
AllocRecordObjectMap()270 AllocRecordObjectMap::AllocRecordObjectMap()
271     : new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
272 
273 }  // namespace gc
274 }  // namespace art
275