• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "allocation_record.h"
18 
19 #include "art_method-inl.h"
20 #include "base/stl_util.h"
21 #include "stack.h"
22 
23 #ifdef __ANDROID__
24 #include "cutils/properties.h"
25 #endif
26 
27 namespace art {
28 namespace gc {
29 
ComputeLineNumber() const30 int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
31   DCHECK(method_ != nullptr);
32   return method_->GetLineNumFromDexPC(dex_pc_);
33 }
34 
GetClassDescriptor(std::string * storage) const35 const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
36   // klass_ could contain null only if we implement class unloading.
37   return klass_.IsNull() ? "null" : klass_.Read()->GetDescriptor(storage);
38 }
39 
SetProperties()40 void AllocRecordObjectMap::SetProperties() {
41 #ifdef __ANDROID__
42   // Check whether there's a system property overriding the max number of records.
43   const char* propertyName = "dalvik.vm.allocTrackerMax";
44   char allocMaxString[PROPERTY_VALUE_MAX];
45   if (property_get(propertyName, allocMaxString, "") > 0) {
46     char* end;
47     size_t value = strtoul(allocMaxString, &end, 10);
48     if (*end != '\0') {
49       LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocMaxString
50                  << "' --- invalid";
51     } else {
52       alloc_record_max_ = value;
53       if (recent_record_max_ > value) {
54         recent_record_max_ = value;
55       }
56     }
57   }
58   // Check whether there's a system property overriding the number of recent records.
59   propertyName = "dalvik.vm.recentAllocMax";
60   char recentAllocMaxString[PROPERTY_VALUE_MAX];
61   if (property_get(propertyName, recentAllocMaxString, "") > 0) {
62     char* end;
63     size_t value = strtoul(recentAllocMaxString, &end, 10);
64     if (*end != '\0') {
65       LOG(ERROR) << "Ignoring  " << propertyName << " '" << recentAllocMaxString
66                  << "' --- invalid";
67     } else if (value > alloc_record_max_) {
68       LOG(ERROR) << "Ignoring  " << propertyName << " '" << recentAllocMaxString
69                  << "' --- should be less than " << alloc_record_max_;
70     } else {
71       recent_record_max_ = value;
72     }
73   }
74   // Check whether there's a system property overriding the max depth of stack trace.
75   propertyName = "debug.allocTracker.stackDepth";
76   char stackDepthString[PROPERTY_VALUE_MAX];
77   if (property_get(propertyName, stackDepthString, "") > 0) {
78     char* end;
79     size_t value = strtoul(stackDepthString, &end, 10);
80     if (*end != '\0') {
81       LOG(ERROR) << "Ignoring  " << propertyName << " '" << stackDepthString
82                  << "' --- invalid";
83     } else if (value > kMaxSupportedStackDepth) {
84       LOG(WARNING) << propertyName << " '" << stackDepthString << "' too large, using "
85                    << kMaxSupportedStackDepth;
86       max_stack_depth_ = kMaxSupportedStackDepth;
87     } else {
88       max_stack_depth_ = value;
89     }
90   }
91 #endif
92 }
93 
~AllocRecordObjectMap()94 AllocRecordObjectMap::~AllocRecordObjectMap() {
95   Clear();
96 }
97 
VisitRoots(RootVisitor * visitor)98 void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
99   CHECK_LE(recent_record_max_, alloc_record_max_);
100   BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
101   size_t count = recent_record_max_;
102   // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
103   // klass_ fields as strong roots.
104   for (auto it = entries_.rbegin(), end = entries_.rend(); it != end; ++it) {
105     AllocRecord& record = it->second;
106     if (count > 0) {
107       buffered_visitor.VisitRootIfNonNull(record.GetClassGcRoot());
108       --count;
109     }
110     // Visit all of the stack frames to make sure no methods in the stack traces get unloaded by
111     // class unloading.
112     for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
113       const AllocRecordStackTraceElement& element = record.StackElement(i);
114       DCHECK(element.GetMethod() != nullptr);
115       element.GetMethod()->VisitRoots(buffered_visitor, sizeof(void*));
116     }
117   }
118 }
119 
SweepClassObject(AllocRecord * record,IsMarkedVisitor * visitor)120 static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
121     SHARED_REQUIRES(Locks::mutator_lock_)
122     REQUIRES(Locks::alloc_tracker_lock_) {
123   GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
124   // This does not need a read barrier because this is called by GC.
125   mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
126   if (old_object != nullptr) {
127     // The class object can become null if we implement class unloading.
128     // In that case we might still want to keep the class name string (not implemented).
129     mirror::Object* new_object = visitor->IsMarked(old_object);
130     DCHECK(new_object != nullptr);
131     if (UNLIKELY(old_object != new_object)) {
132       klass = GcRoot<mirror::Class>(new_object->AsClass());
133     }
134   }
135 }
136 
SweepAllocationRecords(IsMarkedVisitor * visitor)137 void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
138   VLOG(heap) << "Start SweepAllocationRecords()";
139   size_t count_deleted = 0, count_moved = 0, count = 0;
140   // Only the first (size - recent_record_max_) number of records can be deleted.
141   const size_t delete_bound = std::max(entries_.size(), recent_record_max_) - recent_record_max_;
142   for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
143     ++count;
144     // This does not need a read barrier because this is called by GC.
145     mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
146     AllocRecord& record = it->second;
147     mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
148     if (new_object == nullptr) {
149       if (count > delete_bound) {
150         it->first = GcRoot<mirror::Object>(nullptr);
151         SweepClassObject(&record, visitor);
152         ++it;
153       } else {
154         it = entries_.erase(it);
155         ++count_deleted;
156       }
157     } else {
158       if (old_object != new_object) {
159         it->first = GcRoot<mirror::Object>(new_object);
160         ++count_moved;
161       }
162       SweepClassObject(&record, visitor);
163       ++it;
164     }
165   }
166   VLOG(heap) << "Deleted " << count_deleted << " allocation records";
167   VLOG(heap) << "Updated " << count_moved << " allocation records";
168 }
169 
AllowNewAllocationRecords()170 void AllocRecordObjectMap::AllowNewAllocationRecords() {
171   CHECK(!kUseReadBarrier);
172   allow_new_record_ = true;
173   new_record_condition_.Broadcast(Thread::Current());
174 }
175 
DisallowNewAllocationRecords()176 void AllocRecordObjectMap::DisallowNewAllocationRecords() {
177   CHECK(!kUseReadBarrier);
178   allow_new_record_ = false;
179 }
180 
BroadcastForNewAllocationRecords()181 void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
182   CHECK(kUseReadBarrier);
183   new_record_condition_.Broadcast(Thread::Current());
184 }
185 
186 class AllocRecordStackVisitor : public StackVisitor {
187  public:
AllocRecordStackVisitor(Thread * thread,size_t max_depth,AllocRecordStackTrace * trace_out)188   AllocRecordStackVisitor(Thread* thread, size_t max_depth, AllocRecordStackTrace* trace_out)
189       SHARED_REQUIRES(Locks::mutator_lock_)
190       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFramesNoResolve),
191         max_depth_(max_depth),
192         trace_(trace_out) {}
193 
194   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
195   // annotalysis.
VisitFrame()196   bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
197     if (trace_->GetDepth() >= max_depth_) {
198       return false;
199     }
200     ArtMethod* m = GetMethod();
201     // m may be null if we have inlined methods of unresolved classes. b/27858645
202     if (m != nullptr && !m->IsRuntimeMethod()) {
203       m = m->GetInterfaceMethodIfProxy(sizeof(void*));
204       trace_->AddStackElement(AllocRecordStackTraceElement(m, GetDexPc()));
205     }
206     return true;
207   }
208 
209  private:
210   const size_t max_depth_;
211   AllocRecordStackTrace* const trace_;
212 };
213 
SetAllocTrackingEnabled(bool enable)214 void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
215   Thread* self = Thread::Current();
216   Heap* heap = Runtime::Current()->GetHeap();
217   if (enable) {
218     {
219       MutexLock mu(self, *Locks::alloc_tracker_lock_);
220       if (heap->IsAllocTrackingEnabled()) {
221         return;  // Already enabled, bail.
222       }
223       AllocRecordObjectMap* records = heap->GetAllocationRecords();
224       if (records == nullptr) {
225         records = new AllocRecordObjectMap;
226         heap->SetAllocationRecords(records);
227       }
228       CHECK(records != nullptr);
229       records->SetProperties();
230       std::string self_name;
231       self->GetThreadName(self_name);
232       if (self_name == "JDWP") {
233         records->alloc_ddm_thread_id_ = self->GetTid();
234       }
235       size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
236                   sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
237       LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
238                 << records->max_stack_depth_ << " frames, taking up to "
239                 << PrettySize(sz * records->alloc_record_max_) << ")";
240     }
241     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
242     {
243       MutexLock mu(self, *Locks::alloc_tracker_lock_);
244       heap->SetAllocTrackingEnabled(true);
245     }
246   } else {
247     // Delete outside of the critical section to avoid possible lock violations like the runtime
248     // shutdown lock.
249     {
250       MutexLock mu(self, *Locks::alloc_tracker_lock_);
251       if (!heap->IsAllocTrackingEnabled()) {
252         return;  // Already disabled, bail.
253       }
254       heap->SetAllocTrackingEnabled(false);
255       LOG(INFO) << "Disabling alloc tracker";
256       AllocRecordObjectMap* records = heap->GetAllocationRecords();
257       records->Clear();
258     }
259     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
260     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
261   }
262 }
263 
RecordAllocation(Thread * self,mirror::Object ** obj,size_t byte_count)264 void AllocRecordObjectMap::RecordAllocation(Thread* self,
265                                             mirror::Object** obj,
266                                             size_t byte_count) {
267   // Get stack trace outside of lock in case there are allocations during the stack walk.
268   // b/27858645.
269   AllocRecordStackTrace trace;
270   AllocRecordStackVisitor visitor(self, max_stack_depth_, /*out*/ &trace);
271   {
272     StackHandleScope<1> hs(self);
273     auto obj_wrapper = hs.NewHandleWrapper(obj);
274     visitor.WalkStack();
275   }
276 
277   MutexLock mu(self, *Locks::alloc_tracker_lock_);
278   Heap* const heap = Runtime::Current()->GetHeap();
279   if (!heap->IsAllocTrackingEnabled()) {
280     // In the process of shutting down recording, bail.
281     return;
282   }
283 
284   // Do not record for DDM thread.
285   if (alloc_ddm_thread_id_ == self->GetTid()) {
286     return;
287   }
288 
289   // Wait for GC's sweeping to complete and allow new records
290   while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
291                   (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
292     new_record_condition_.WaitHoldingLocks(self);
293   }
294 
295   if (!heap->IsAllocTrackingEnabled()) {
296     // Return if the allocation tracking has been disabled while waiting for system weak access
297     // above.
298     return;
299   }
300 
301   DCHECK_LE(Size(), alloc_record_max_);
302 
303   // Erase extra unfilled elements.
304   trace.SetTid(self->GetTid());
305 
306   // Add the record.
307   Put(*obj, AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
308   DCHECK_LE(Size(), alloc_record_max_);
309 }
310 
Clear()311 void AllocRecordObjectMap::Clear() {
312   entries_.clear();
313 }
314 
AllocRecordObjectMap()315 AllocRecordObjectMap::AllocRecordObjectMap()
316     : new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
317 
318 }  // namespace gc
319 }  // namespace art
320