1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "allocation_record.h"
18
19 #include "art_method-inl.h"
20 #include "base/enums.h"
21 #include "base/logging.h" // For VLOG
22 #include "base/stl_util.h"
23 #include "obj_ptr-inl.h"
24 #include "object_callbacks.h"
25 #include "stack.h"
26 #include "thread-inl.h" // For GetWeakRefAccessEnabled().
27
28 #include <android-base/properties.h>
29
30 namespace art {
31 namespace gc {
32
ComputeLineNumber() const33 int32_t AllocRecordStackTraceElement::ComputeLineNumber() const {
34 DCHECK(method_ != nullptr);
35 int32_t line_number = method_->GetLineNumFromDexPC(dex_pc_);
36 if (line_number == -1 && !method_->IsProxyMethod()) {
37 // If we failed to map the dex pc to a line number, then most probably there is no debug info.
38 // Make the line_number same as the dex pc - it can be decoded later using a map file.
39 // See b/30183883 and b/228000954.
40 line_number = static_cast<int32_t>(dex_pc_);
41 }
42 return line_number;
43 }
44
GetClassDescriptor(std::string * storage) const45 const char* AllocRecord::GetClassDescriptor(std::string* storage) const {
46 // klass_ could contain null only if we implement class unloading.
47 return klass_.IsNull() ? "null" : klass_.Read()->GetDescriptor(storage);
48 }
49
SetMaxStackDepth(size_t max_stack_depth)50 void AllocRecordObjectMap::SetMaxStackDepth(size_t max_stack_depth) {
51 // Log fatal since this should already be checked when calling VMDebug.setAllocTrackerStackDepth.
52 CHECK_LE(max_stack_depth, kMaxSupportedStackDepth)
53 << "Allocation record max stack depth is too large";
54 max_stack_depth_ = max_stack_depth;
55 }
56
~AllocRecordObjectMap()57 AllocRecordObjectMap::~AllocRecordObjectMap() {
58 Clear();
59 }
60
VisitRoots(RootVisitor * visitor)61 void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
62 CHECK_LE(recent_record_max_, alloc_record_max_);
63 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(visitor, RootInfo(kRootDebugger));
64 size_t count = recent_record_max_;
65 // Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
66 // klass_ fields as strong roots.
67 for (auto it = entries_.rbegin(), end = entries_.rend(); it != end; ++it) {
68 AllocRecord& record = it->second;
69 if (count > 0) {
70 buffered_visitor.VisitRootIfNonNull(record.GetClassGcRoot());
71 --count;
72 }
73 // Visit all of the stack frames to make sure no methods in the stack traces get unloaded by
74 // class unloading.
75 for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
76 const AllocRecordStackTraceElement& element = record.StackElement(i);
77 DCHECK(element.GetMethod() != nullptr);
78 element.GetMethod()->VisitRoots(buffered_visitor, kRuntimePointerSize);
79 }
80 }
81 }
82
SweepClassObject(AllocRecord * record,IsMarkedVisitor * visitor)83 static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
84 REQUIRES_SHARED(Locks::mutator_lock_)
85 REQUIRES(Locks::alloc_tracker_lock_) {
86 GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
87 // This does not need a read barrier because this is called by GC.
88 mirror::Object* old_object = klass.Read<kWithoutReadBarrier>();
89 if (old_object != nullptr) {
90 // The class object can become null if we implement class unloading.
91 // In that case we might still want to keep the class name string (not implemented).
92 mirror::Object* new_object = visitor->IsMarked(old_object);
93 DCHECK(new_object != nullptr);
94 if (UNLIKELY(old_object != new_object)) {
95 klass = GcRoot<mirror::Class>(new_object->AsClass());
96 }
97 }
98 }
99
SweepAllocationRecords(IsMarkedVisitor * visitor)100 void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedVisitor* visitor) {
101 VLOG(heap) << "Start SweepAllocationRecords()";
102 size_t count_deleted = 0, count_moved = 0, count = 0;
103 // Only the first (size - recent_record_max_) number of records can be deleted.
104 const size_t delete_bound = std::max(entries_.size(), recent_record_max_) - recent_record_max_;
105 for (auto it = entries_.begin(), end = entries_.end(); it != end;) {
106 ++count;
107 // This does not need a read barrier because this is called by GC.
108 mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
109 AllocRecord& record = it->second;
110 mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
111 if (new_object == nullptr) {
112 if (count > delete_bound) {
113 it->first = GcRoot<mirror::Object>(nullptr);
114 SweepClassObject(&record, visitor);
115 ++it;
116 } else {
117 it = entries_.erase(it);
118 ++count_deleted;
119 }
120 } else {
121 if (old_object != new_object) {
122 it->first = GcRoot<mirror::Object>(new_object);
123 ++count_moved;
124 }
125 SweepClassObject(&record, visitor);
126 ++it;
127 }
128 }
129 VLOG(heap) << "Deleted " << count_deleted << " allocation records";
130 VLOG(heap) << "Updated " << count_moved << " allocation records";
131 }
132
AllowNewAllocationRecords()133 void AllocRecordObjectMap::AllowNewAllocationRecords() {
134 CHECK(!kUseReadBarrier);
135 allow_new_record_ = true;
136 new_record_condition_.Broadcast(Thread::Current());
137 }
138
DisallowNewAllocationRecords()139 void AllocRecordObjectMap::DisallowNewAllocationRecords() {
140 CHECK(!kUseReadBarrier);
141 allow_new_record_ = false;
142 }
143
BroadcastForNewAllocationRecords()144 void AllocRecordObjectMap::BroadcastForNewAllocationRecords() {
145 new_record_condition_.Broadcast(Thread::Current());
146 }
147
SetAllocTrackingEnabled(bool enable)148 void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
149 Thread* self = Thread::Current();
150 Heap* heap = Runtime::Current()->GetHeap();
151 if (enable) {
152 {
153 MutexLock mu(self, *Locks::alloc_tracker_lock_);
154 if (heap->IsAllocTrackingEnabled()) {
155 return; // Already enabled, bail.
156 }
157 AllocRecordObjectMap* records = heap->GetAllocationRecords();
158 if (records == nullptr) {
159 records = new AllocRecordObjectMap;
160 heap->SetAllocationRecords(records);
161 }
162 CHECK(records != nullptr);
163 records->SetMaxStackDepth(heap->GetAllocTrackerStackDepth());
164 size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
165 sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
166 LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
167 << records->max_stack_depth_ << " frames, taking up to "
168 << PrettySize(sz * records->alloc_record_max_) << ")";
169 }
170 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
171 {
172 MutexLock mu(self, *Locks::alloc_tracker_lock_);
173 heap->SetAllocTrackingEnabled(true);
174 }
175 } else {
176 // Delete outside of the critical section to avoid possible lock violations like the runtime
177 // shutdown lock.
178 {
179 MutexLock mu(self, *Locks::alloc_tracker_lock_);
180 if (!heap->IsAllocTrackingEnabled()) {
181 return; // Already disabled, bail.
182 }
183 heap->SetAllocTrackingEnabled(false);
184 LOG(INFO) << "Disabling alloc tracker";
185 AllocRecordObjectMap* records = heap->GetAllocationRecords();
186 records->Clear();
187 }
188 // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
189 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
190 }
191 }
192
RecordAllocation(Thread * self,ObjPtr<mirror::Object> * obj,size_t byte_count)193 void AllocRecordObjectMap::RecordAllocation(Thread* self,
194 ObjPtr<mirror::Object>* obj,
195 size_t byte_count) {
196 // Get stack trace outside of lock in case there are allocations during the stack walk.
197 // b/27858645.
198 AllocRecordStackTrace trace;
199 {
200 StackHandleScope<1> hs(self);
201 auto obj_wrapper = hs.NewHandleWrapper(obj);
202
203 StackVisitor::WalkStack(
204 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
205 if (trace.GetDepth() >= max_stack_depth_) {
206 return false;
207 }
208 ArtMethod* m = stack_visitor->GetMethod();
209 // m may be null if we have inlined methods of unresolved classes. b/27858645
210 if (m != nullptr && !m->IsRuntimeMethod()) {
211 m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize);
212 trace.AddStackElement(AllocRecordStackTraceElement(m, stack_visitor->GetDexPc()));
213 }
214 return true;
215 },
216 self,
217 /* context= */ nullptr,
218 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
219 }
220
221 MutexLock mu(self, *Locks::alloc_tracker_lock_);
222 Heap* const heap = Runtime::Current()->GetHeap();
223 if (!heap->IsAllocTrackingEnabled()) {
224 // In the process of shutting down recording, bail.
225 return;
226 }
227
228 // TODO Skip recording allocations associated with DDMS. This was a feature of the old debugger
229 // but when we switched to the JVMTI based debugger the feature was (unintentionally) broken.
230 // Since nobody seemed to really notice or care it might not be worth the trouble.
231
232 // Wait for GC's sweeping to complete and allow new records.
233 while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
234 (kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
235 // Check and run the empty checkpoint before blocking so the empty checkpoint will work in the
236 // presence of threads blocking for weak ref access.
237 self->CheckEmptyCheckpointFromWeakRefAccess(Locks::alloc_tracker_lock_);
238 new_record_condition_.WaitHoldingLocks(self);
239 }
240
241 if (!heap->IsAllocTrackingEnabled()) {
242 // Return if the allocation tracking has been disabled while waiting for system weak access
243 // above.
244 return;
245 }
246
247 DCHECK_LE(Size(), alloc_record_max_);
248
249 // Erase extra unfilled elements.
250 trace.SetTid(self->GetTid());
251
252 // Add the record.
253 Put(obj->Ptr(), AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
254 DCHECK_LE(Size(), alloc_record_max_);
255 }
256
Clear()257 void AllocRecordObjectMap::Clear() {
258 entries_.clear();
259 }
260
AllocRecordObjectMap()261 AllocRecordObjectMap::AllocRecordObjectMap()
262 : new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
263
264 } // namespace gc
265 } // namespace art
266