1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "debugger.h"
18
19 #include <sys/uio.h>
20
21 #include <functional>
22 #include <memory>
23 #include <set>
24 #include <vector>
25
26 #include "android-base/macros.h"
27 #include "android-base/stringprintf.h"
28
29 #include "arch/context.h"
30 #include "art_field-inl.h"
31 #include "art_method-inl.h"
32 #include "base/endian_utils.h"
33 #include "base/enums.h"
34 #include "base/logging.h"
35 #include "base/memory_tool.h"
36 #include "base/safe_map.h"
37 #include "base/strlcpy.h"
38 #include "base/time_utils.h"
39 #include "class_linker-inl.h"
40 #include "class_linker.h"
41 #include "dex/descriptors_names.h"
42 #include "dex/dex_file-inl.h"
43 #include "dex/dex_file_annotations.h"
44 #include "dex/dex_file_types.h"
45 #include "dex/dex_instruction.h"
46 #include "dex/utf.h"
47 #include "entrypoints/runtime_asm_entrypoints.h"
48 #include "gc/accounting/card_table-inl.h"
49 #include "gc/allocation_record.h"
50 #include "gc/gc_cause.h"
51 #include "gc/scoped_gc_critical_section.h"
52 #include "gc/space/bump_pointer_space-walk-inl.h"
53 #include "gc/space/large_object_space.h"
54 #include "gc/space/space-inl.h"
55 #include "handle_scope-inl.h"
56 #include "instrumentation.h"
57 #include "jni/jni_internal.h"
58 #include "jvalue-inl.h"
59 #include "mirror/array-alloc-inl.h"
60 #include "mirror/class-alloc-inl.h"
61 #include "mirror/class-inl.h"
62 #include "mirror/class.h"
63 #include "mirror/class_loader.h"
64 #include "mirror/object-inl.h"
65 #include "mirror/object_array-inl.h"
66 #include "mirror/string-alloc-inl.h"
67 #include "mirror/string-inl.h"
68 #include "mirror/throwable.h"
69 #include "nativehelper/scoped_local_ref.h"
70 #include "nativehelper/scoped_primitive_array.h"
71 #include "oat_file.h"
72 #include "obj_ptr-inl.h"
73 #include "reflection.h"
74 #include "reflective_handle.h"
75 #include "reflective_handle_scope-inl.h"
76 #include "runtime-inl.h"
77 #include "runtime_callbacks.h"
78 #include "scoped_thread_state_change-inl.h"
79 #include "scoped_thread_state_change.h"
80 #include "stack.h"
81 #include "thread.h"
82 #include "thread_list.h"
83 #include "thread_pool.h"
84 #include "well_known_classes.h"
85
86 namespace art {
87
88 using android::base::StringPrintf;
89
90 // Limit alloc_record_count to the 2BE value (64k-1) that is the limit of the current protocol.
CappedAllocRecordCount(size_t alloc_record_count)91 static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
92 const size_t cap = 0xffff;
93 if (alloc_record_count > cap) {
94 return cap;
95 }
96 return alloc_record_count;
97 }
98
99 // JDWP is allowed unless the Zygote forbids it.
100 static bool gJdwpAllowed = true;
101
102 static bool gDdmThreadNotification = false;
103
104 // DDMS GC-related settings.
105 static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
106 static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
107 static Dbg::HpsgWhat gDdmHpsgWhat;
108 static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
109 static Dbg::HpsgWhat gDdmNhsgWhat;
110
111 Dbg::DbgThreadLifecycleCallback Dbg::thread_lifecycle_callback_;
112
GcDidFinish()113 void Dbg::GcDidFinish() {
114 if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
115 ScopedObjectAccess soa(Thread::Current());
116 VLOG(jdwp) << "Sending heap info to DDM";
117 DdmSendHeapInfo(gDdmHpifWhen);
118 }
119 if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
120 ScopedObjectAccess soa(Thread::Current());
121 VLOG(jdwp) << "Dumping heap to DDM";
122 DdmSendHeapSegments(false);
123 }
124 if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
125 ScopedObjectAccess soa(Thread::Current());
126 VLOG(jdwp) << "Dumping native heap to DDM";
127 DdmSendHeapSegments(true);
128 }
129 }
130
SetJdwpAllowed(bool allowed)131 void Dbg::SetJdwpAllowed(bool allowed) {
132 gJdwpAllowed = allowed;
133 }
134
IsJdwpAllowed()135 bool Dbg::IsJdwpAllowed() {
136 return gJdwpAllowed;
137 }
138
139 // Do we need to deoptimize the stack to handle an exception?
IsForcedInterpreterNeededForExceptionImpl(Thread * thread)140 bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
141 // Deoptimization is required if at least one method in the stack needs it. However we
142 // skip frames that will be unwound (thus not executed).
143 bool needs_deoptimization = false;
144 StackVisitor::WalkStack(
145 [&](art::StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
146 // The visitor is meant to be used when handling exception from compiled code only.
147 CHECK(!visitor->IsShadowFrame()) << "We only expect to visit compiled frame: "
148 << ArtMethod::PrettyMethod(visitor->GetMethod());
149 ArtMethod* method = visitor->GetMethod();
150 if (method == nullptr) {
151 // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
152 // so we can stop the visit.
153 DCHECK(!needs_deoptimization);
154 return false;
155 }
156 if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
157 // We found a compiled frame in the stack but instrumentation is set to interpret
158 // everything: we need to deoptimize.
159 needs_deoptimization = true;
160 return false;
161 }
162 if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
163 // We found a deoptimized method in the stack.
164 needs_deoptimization = true;
165 return false;
166 }
167 ShadowFrame* frame = visitor->GetThread()->FindDebuggerShadowFrame(visitor->GetFrameId());
168 if (frame != nullptr) {
169 // The debugger allocated a ShadowFrame to update a variable in the stack: we need to
170 // deoptimize the stack to execute (and deallocate) this frame.
171 needs_deoptimization = true;
172 return false;
173 }
174 return true;
175 },
176 thread,
177 /* context= */ nullptr,
178 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames,
179 /* check_suspended */ true,
180 /* include_transitions */ true);
181 return needs_deoptimization;
182 }
183
184
DdmHandleChunk(JNIEnv * env,uint32_t type,const ArrayRef<const jbyte> & data,uint32_t * out_type,std::vector<uint8_t> * out_data)185 bool Dbg::DdmHandleChunk(JNIEnv* env,
186 uint32_t type,
187 const ArrayRef<const jbyte>& data,
188 /*out*/uint32_t* out_type,
189 /*out*/std::vector<uint8_t>* out_data) {
190 ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(data.size()));
191 if (dataArray.get() == nullptr) {
192 LOG(WARNING) << "byte[] allocation failed: " << data.size();
193 env->ExceptionClear();
194 return false;
195 }
196 env->SetByteArrayRegion(dataArray.get(),
197 0,
198 data.size(),
199 reinterpret_cast<const jbyte*>(data.data()));
200 // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
201 ScopedLocalRef<jobject> chunk(
202 env,
203 env->CallStaticObjectMethod(
204 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
205 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
206 type, dataArray.get(), 0, data.size()));
207 if (env->ExceptionCheck()) {
208 Thread* self = Thread::Current();
209 ScopedObjectAccess soa(self);
210 LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type) << std::endl
211 << self->GetException()->Dump();
212 self->ClearException();
213 return false;
214 }
215
216 if (chunk.get() == nullptr) {
217 return false;
218 }
219
220 /*
221 * Pull the pieces out of the chunk. We copy the results into a
222 * newly-allocated buffer that the caller can free. We don't want to
223 * continue using the Chunk object because nothing has a reference to it.
224 *
225 * We could avoid this by returning type/data/offset/length and having
226 * the caller be aware of the object lifetime issues, but that
227 * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
228 * if we have responses for multiple chunks.
229 *
230 * So we're pretty much stuck with copying data around multiple times.
231 */
232 ScopedLocalRef<jbyteArray> replyData(
233 env,
234 reinterpret_cast<jbyteArray>(
235 env->GetObjectField(
236 chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
237 jint offset = env->GetIntField(chunk.get(),
238 WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
239 jint length = env->GetIntField(chunk.get(),
240 WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
241 *out_type = env->GetIntField(chunk.get(),
242 WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
243
244 VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d",
245 type,
246 replyData.get(),
247 offset,
248 length);
249 out_data->resize(length);
250 env->GetByteArrayRegion(replyData.get(),
251 offset,
252 length,
253 reinterpret_cast<jbyte*>(out_data->data()));
254
255 if (env->ExceptionCheck()) {
256 Thread* self = Thread::Current();
257 ScopedObjectAccess soa(self);
258 LOG(INFO) << StringPrintf("Exception thrown when reading response data from dispatcher 0x%08x",
259 type) << std::endl << self->GetException()->Dump();
260 self->ClearException();
261 return false;
262 }
263
264 return true;
265 }
266
DdmBroadcast(bool connect)267 void Dbg::DdmBroadcast(bool connect) {
268 VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
269
270 Thread* self = Thread::Current();
271 if (self->GetState() != ThreadState::kRunnable) {
272 LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
273 /* try anyway? */
274 }
275
276 JNIEnv* env = self->GetJniEnv();
277 jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
278 env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
279 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
280 event);
281 if (env->ExceptionCheck()) {
282 LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
283 env->ExceptionDescribe();
284 env->ExceptionClear();
285 }
286 }
287
DdmConnected()288 void Dbg::DdmConnected() {
289 Dbg::DdmBroadcast(true);
290 }
291
DdmDisconnected()292 void Dbg::DdmDisconnected() {
293 Dbg::DdmBroadcast(false);
294 gDdmThreadNotification = false;
295 }
296
297
298 /*
299 * Send a notification when a thread starts, stops, or changes its name.
300 *
301 * Because we broadcast the full set of threads when the notifications are
302 * first enabled, it's possible for "thread" to be actively executing.
303 */
DdmSendThreadNotification(Thread * t,uint32_t type)304 void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
305 Locks::mutator_lock_->AssertNotExclusiveHeld(Thread::Current());
306 if (!gDdmThreadNotification) {
307 return;
308 }
309
310 RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
311 if (type == CHUNK_TYPE("THDE")) {
312 uint8_t buf[4];
313 Set4BE(&buf[0], t->GetThreadId());
314 cb->DdmPublishChunk(CHUNK_TYPE("THDE"), ArrayRef<const uint8_t>(buf));
315 } else {
316 CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
317 StackHandleScope<1> hs(Thread::Current());
318 Handle<mirror::String> name(hs.NewHandle(t->GetThreadName()));
319 size_t char_count = (name != nullptr) ? name->GetLength() : 0;
320 const jchar* chars = (name != nullptr) ? name->GetValue() : nullptr;
321 bool is_compressed = (name != nullptr) ? name->IsCompressed() : false;
322
323 std::vector<uint8_t> bytes;
324 Append4BE(bytes, t->GetThreadId());
325 if (is_compressed) {
326 const uint8_t* chars_compressed = name->GetValueCompressed();
327 AppendUtf16CompressedBE(bytes, chars_compressed, char_count);
328 } else {
329 AppendUtf16BE(bytes, chars, char_count);
330 }
331 CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
332 cb->DdmPublishChunk(type, ArrayRef<const uint8_t>(bytes));
333 }
334 }
335
DdmSetThreadNotification(bool enable)336 void Dbg::DdmSetThreadNotification(bool enable) {
337 // Enable/disable thread notifications.
338 gDdmThreadNotification = enable;
339 if (enable) {
340 // Use a Checkpoint to cause every currently running thread to send their own notification when
341 // able. We then wait for every thread thread active at the time to post the creation
342 // notification. Threads created later will send this themselves.
343 Thread* self = Thread::Current();
344 ScopedObjectAccess soa(self);
345 Barrier finish_barrier(0);
346 FunctionClosure fc([&](Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
347 Thread* cls_self = Thread::Current();
348 Locks::mutator_lock_->AssertSharedHeld(cls_self);
349 Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
350 finish_barrier.Pass(cls_self);
351 });
352 size_t checkpoints = Runtime::Current()->GetThreadList()->RunCheckpoint(&fc);
353 ScopedThreadSuspension sts(self, ThreadState::kWaitingForCheckPointsToRun);
354 finish_barrier.Increment(self, checkpoints);
355 }
356 }
357
PostThreadStartOrStop(Thread * t,uint32_t type)358 void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
359 Dbg::DdmSendThreadNotification(t, type);
360 }
361
PostThreadStart(Thread * t)362 void Dbg::PostThreadStart(Thread* t) {
363 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
364 }
365
PostThreadDeath(Thread * t)366 void Dbg::PostThreadDeath(Thread* t) {
367 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
368 }
369
DdmHandleHpifChunk(HpifWhen when)370 int Dbg::DdmHandleHpifChunk(HpifWhen when) {
371 if (when == HPIF_WHEN_NOW) {
372 DdmSendHeapInfo(when);
373 return 1;
374 }
375
376 if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
377 LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
378 return 0;
379 }
380
381 gDdmHpifWhen = when;
382 return 1;
383 }
384
DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when,Dbg::HpsgWhat what,bool native)385 bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
386 if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
387 LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
388 return false;
389 }
390
391 if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
392 LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
393 return false;
394 }
395
396 if (native) {
397 gDdmNhsgWhen = when;
398 gDdmNhsgWhat = what;
399 } else {
400 gDdmHpsgWhen = when;
401 gDdmHpsgWhat = what;
402 }
403 return true;
404 }
405
DdmSendHeapInfo(HpifWhen reason)406 void Dbg::DdmSendHeapInfo(HpifWhen reason) {
407 // If there's a one-shot 'when', reset it.
408 if (reason == gDdmHpifWhen) {
409 if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
410 gDdmHpifWhen = HPIF_WHEN_NEVER;
411 }
412 }
413
414 /*
415 * Chunk HPIF (client --> server)
416 *
417 * Heap Info. General information about the heap,
418 * suitable for a summary display.
419 *
420 * [u4]: number of heaps
421 *
422 * For each heap:
423 * [u4]: heap ID
424 * [u8]: timestamp in ms since Unix epoch
425 * [u1]: capture reason (same as 'when' value from server)
426 * [u4]: max heap size in bytes (-Xmx)
427 * [u4]: current heap size in bytes
428 * [u4]: current number of bytes allocated
429 * [u4]: current number of objects allocated
430 */
431 uint8_t heap_count = 1;
432 gc::Heap* heap = Runtime::Current()->GetHeap();
433 std::vector<uint8_t> bytes;
434 Append4BE(bytes, heap_count);
435 Append4BE(bytes, 1); // Heap id (bogus; we only have one heap).
436 Append8BE(bytes, MilliTime());
437 Append1BE(bytes, reason);
438 Append4BE(bytes, heap->GetMaxMemory()); // Max allowed heap size in bytes.
439 Append4BE(bytes, heap->GetTotalMemory()); // Current heap size in bytes.
440 Append4BE(bytes, heap->GetBytesAllocated());
441 Append4BE(bytes, heap->GetObjectsAllocated());
442 CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
443 Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(CHUNK_TYPE("HPIF"),
444 ArrayRef<const uint8_t>(bytes));
445 }
446
447 enum HpsgSolidity {
448 SOLIDITY_FREE = 0,
449 SOLIDITY_HARD = 1,
450 SOLIDITY_SOFT = 2,
451 SOLIDITY_WEAK = 3,
452 SOLIDITY_PHANTOM = 4,
453 SOLIDITY_FINALIZABLE = 5,
454 SOLIDITY_SWEEP = 6,
455 };
456
457 enum HpsgKind {
458 KIND_OBJECT = 0,
459 KIND_CLASS_OBJECT = 1,
460 KIND_ARRAY_1 = 2,
461 KIND_ARRAY_2 = 3,
462 KIND_ARRAY_4 = 4,
463 KIND_ARRAY_8 = 5,
464 KIND_UNKNOWN = 6,
465 KIND_NATIVE = 7,
466 };
467
468 #define HPSG_PARTIAL (1<<7)
469 #define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
470
471 class HeapChunkContext {
472 public:
473 // Maximum chunk size. Obtain this from the formula:
474 // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
HeapChunkContext(bool merge,bool native)475 HeapChunkContext(bool merge, bool native)
476 : buf_(16384 - 16),
477 type_(0),
478 chunk_overhead_(0) {
479 Reset();
480 if (native) {
481 type_ = CHUNK_TYPE("NHSG");
482 } else {
483 type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
484 }
485 }
486
~HeapChunkContext()487 ~HeapChunkContext() {
488 if (p_ > &buf_[0]) {
489 Flush();
490 }
491 }
492
SetChunkOverhead(size_t chunk_overhead)493 void SetChunkOverhead(size_t chunk_overhead) {
494 chunk_overhead_ = chunk_overhead;
495 }
496
ResetStartOfNextChunk()497 void ResetStartOfNextChunk() {
498 startOfNextMemoryChunk_ = nullptr;
499 }
500
EnsureHeader(const void * chunk_ptr)501 void EnsureHeader(const void* chunk_ptr) {
502 if (!needHeader_) {
503 return;
504 }
505
506 // Start a new HPSx chunk.
507 Write4BE(&p_, 1); // Heap id (bogus; we only have one heap).
508 Write1BE(&p_, 8); // Size of allocation unit, in bytes.
509
510 Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr)); // virtual address of segment start.
511 Write4BE(&p_, 0); // offset of this piece (relative to the virtual address).
512 // [u4]: length of piece, in allocation units
513 // We won't know this until we're done, so save the offset and stuff in a fake value.
514 pieceLenField_ = p_;
515 Write4BE(&p_, 0x55555555);
516 needHeader_ = false;
517 }
518
Flush()519 void Flush() REQUIRES_SHARED(Locks::mutator_lock_) {
520 if (pieceLenField_ == nullptr) {
521 // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
522 CHECK(needHeader_);
523 return;
524 }
525 // Patch the "length of piece" field.
526 CHECK_LE(&buf_[0], pieceLenField_);
527 CHECK_LE(pieceLenField_, p_);
528 Set4BE(pieceLenField_, totalAllocationUnits_);
529
530 ArrayRef<const uint8_t> out(&buf_[0], p_ - &buf_[0]);
531 Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(type_, out);
532 Reset();
533 }
534
HeapChunkJavaCallback(void * start,void * end,size_t used_bytes,void * arg)535 static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
536 REQUIRES_SHARED(Locks::heap_bitmap_lock_,
537 Locks::mutator_lock_) {
538 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
539 }
540
HeapChunkNativeCallback(void * start,void * end,size_t used_bytes,void * arg)541 static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
542 REQUIRES_SHARED(Locks::mutator_lock_) {
543 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
544 }
545
546 private:
547 enum { ALLOCATION_UNIT_SIZE = 8 };
548
Reset()549 void Reset() {
550 p_ = &buf_[0];
551 ResetStartOfNextChunk();
552 totalAllocationUnits_ = 0;
553 needHeader_ = true;
554 pieceLenField_ = nullptr;
555 }
556
IsNative() const557 bool IsNative() const {
558 return type_ == CHUNK_TYPE("NHSG");
559 }
560
561 // Returns true if the object is not an empty chunk.
ProcessRecord(void * start,size_t used_bytes)562 bool ProcessRecord(void* start, size_t used_bytes) REQUIRES_SHARED(Locks::mutator_lock_) {
563 // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
564 // in the following code not to allocate memory, by ensuring buf_ is of the correct size
565 if (used_bytes == 0) {
566 if (start == nullptr) {
567 // Reset for start of new heap.
568 startOfNextMemoryChunk_ = nullptr;
569 Flush();
570 }
571 // Only process in use memory so that free region information
572 // also includes dlmalloc book keeping.
573 return false;
574 }
575 if (startOfNextMemoryChunk_ != nullptr) {
576 // Transmit any pending free memory. Native free memory of over kMaxFreeLen could be because
577 // of the use of mmaps, so don't report. If not free memory then start a new segment.
578 bool flush = true;
579 if (start > startOfNextMemoryChunk_) {
580 const size_t kMaxFreeLen = 2 * kPageSize;
581 void* free_start = startOfNextMemoryChunk_;
582 void* free_end = start;
583 const size_t free_len =
584 reinterpret_cast<uintptr_t>(free_end) - reinterpret_cast<uintptr_t>(free_start);
585 if (!IsNative() || free_len < kMaxFreeLen) {
586 AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), free_start, free_len, IsNative());
587 flush = false;
588 }
589 }
590 if (flush) {
591 startOfNextMemoryChunk_ = nullptr;
592 Flush();
593 }
594 }
595 return true;
596 }
597
HeapChunkNativeCallback(void * start,void *,size_t used_bytes)598 void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
599 REQUIRES_SHARED(Locks::mutator_lock_) {
600 if (ProcessRecord(start, used_bytes)) {
601 uint8_t state = ExamineNativeObject(start);
602 AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ true);
603 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
604 }
605 }
606
HeapChunkJavaCallback(void * start,void *,size_t used_bytes)607 void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
608 REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
609 if (ProcessRecord(start, used_bytes)) {
610 // Determine the type of this chunk.
611 // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
612 // If it's the same, we should combine them.
613 uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
614 AppendChunk(state, start, used_bytes + chunk_overhead_, /*is_native=*/ false);
615 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
616 }
617 }
618
AppendChunk(uint8_t state,void * ptr,size_t length,bool is_native)619 void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
620 REQUIRES_SHARED(Locks::mutator_lock_) {
621 // Make sure there's enough room left in the buffer.
622 // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
623 // 17 bytes for any header.
624 const size_t needed = ((RoundUp(length / ALLOCATION_UNIT_SIZE, 256) / 256) * 2) + 17;
625 size_t byte_left = &buf_.back() - p_;
626 if (byte_left < needed) {
627 if (is_native) {
628 // Cannot trigger memory allocation while walking native heap.
629 return;
630 }
631 Flush();
632 }
633
634 byte_left = &buf_.back() - p_;
635 if (byte_left < needed) {
636 LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
637 << needed << " bytes)";
638 return;
639 }
640 EnsureHeader(ptr);
641 // Write out the chunk description.
642 length /= ALLOCATION_UNIT_SIZE; // Convert to allocation units.
643 totalAllocationUnits_ += length;
644 while (length > 256) {
645 *p_++ = state | HPSG_PARTIAL;
646 *p_++ = 255; // length - 1
647 length -= 256;
648 }
649 *p_++ = state;
650 *p_++ = length - 1;
651 }
652
ExamineNativeObject(const void * p)653 uint8_t ExamineNativeObject(const void* p) REQUIRES_SHARED(Locks::mutator_lock_) {
654 return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
655 }
656
ExamineJavaObject(ObjPtr<mirror::Object> o)657 uint8_t ExamineJavaObject(ObjPtr<mirror::Object> o)
658 REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
659 if (o == nullptr) {
660 return HPSG_STATE(SOLIDITY_FREE, 0);
661 }
662 // It's an allocated chunk. Figure out what it is.
663 gc::Heap* heap = Runtime::Current()->GetHeap();
664 if (!heap->IsLiveObjectLocked(o)) {
665 LOG(ERROR) << "Invalid object in managed heap: " << o;
666 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
667 }
668 ObjPtr<mirror::Class> c = o->GetClass();
669 if (c == nullptr) {
670 // The object was probably just created but hasn't been initialized yet.
671 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
672 }
673 if (!heap->IsValidObjectAddress(c.Ptr())) {
674 LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
675 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
676 }
677 if (c->GetClass() == nullptr) {
678 LOG(ERROR) << "Null class of class " << c << " for object " << o;
679 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
680 }
681 if (c->IsClassClass()) {
682 return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
683 }
684 if (c->IsArrayClass()) {
685 switch (c->GetComponentSize()) {
686 case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
687 case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
688 case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
689 case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
690 }
691 }
692 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
693 }
694
695 std::vector<uint8_t> buf_;
696 uint8_t* p_;
697 uint8_t* pieceLenField_;
698 void* startOfNextMemoryChunk_;
699 size_t totalAllocationUnits_;
700 uint32_t type_;
701 bool needHeader_;
702 size_t chunk_overhead_;
703
704 DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
705 };
706
707
DdmSendHeapSegments(bool native)708 void Dbg::DdmSendHeapSegments(bool native) {
709 Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
710 Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
711 if (when == HPSG_WHEN_NEVER) {
712 return;
713 }
714 RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
715 // Figure out what kind of chunks we'll be sending.
716 CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS)
717 << static_cast<int>(what);
718
719 // First, send a heap start chunk.
720 uint8_t heap_id[4];
721 Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap).
722 cb->DdmPublishChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"),
723 ArrayRef<const uint8_t>(heap_id));
724 Thread* self = Thread::Current();
725 Locks::mutator_lock_->AssertSharedHeld(self);
726
727 // Send a series of heap segment chunks.
728 HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
729 auto bump_pointer_space_visitor = [&](mirror::Object* obj)
730 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
731 const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
732 HeapChunkContext::HeapChunkJavaCallback(
733 obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, &context);
734 };
735 if (native) {
736 UNIMPLEMENTED(WARNING) << "Native heap inspection is not supported";
737 } else {
738 gc::Heap* heap = Runtime::Current()->GetHeap();
739 for (const auto& space : heap->GetContinuousSpaces()) {
740 if (space->IsDlMallocSpace()) {
741 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
742 // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
743 // allocation then the first sizeof(size_t) may belong to it.
744 context.SetChunkOverhead(sizeof(size_t));
745 space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
746 } else if (space->IsRosAllocSpace()) {
747 context.SetChunkOverhead(0);
748 // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
749 // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
750 ScopedThreadSuspension sts(self, ThreadState::kSuspended);
751 ScopedSuspendAll ssa(__FUNCTION__);
752 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
753 space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
754 } else if (space->IsBumpPointerSpace()) {
755 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
756 context.SetChunkOverhead(0);
757 space->AsBumpPointerSpace()->Walk(bump_pointer_space_visitor);
758 HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
759 } else if (space->IsRegionSpace()) {
760 heap->IncrementDisableMovingGC(self);
761 {
762 ScopedThreadSuspension sts(self, ThreadState::kSuspended);
763 ScopedSuspendAll ssa(__FUNCTION__);
764 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
765 context.SetChunkOverhead(0);
766 space->AsRegionSpace()->Walk(bump_pointer_space_visitor);
767 HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
768 }
769 heap->DecrementDisableMovingGC(self);
770 } else {
771 UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
772 }
773 context.ResetStartOfNextChunk();
774 }
775 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
776 // Walk the large objects, these are not in the AllocSpace.
777 context.SetChunkOverhead(0);
778 heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
779 }
780
781 // Finally, send a heap end chunk.
782 cb->DdmPublishChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"),
783 ArrayRef<const uint8_t>(heap_id));
784 }
785
SetAllocTrackingEnabled(bool enable)786 void Dbg::SetAllocTrackingEnabled(bool enable) {
787 gc::AllocRecordObjectMap::SetAllocTrackingEnabled(enable);
788 }
789
790 class StringTable {
791 private:
792 struct Entry {
Entryart::StringTable::Entry793 explicit Entry(const char* data_in)
794 : data(data_in), hash(ComputeModifiedUtf8Hash(data_in)), index(0) {
795 }
796 Entry(const Entry& entry) = default;
797 Entry(Entry&& entry) = default;
798
799 // Pointer to the actual string data.
800 const char* data;
801
802 // The hash of the data.
803 const uint32_t hash;
804
805 // The index. This will be filled in on Finish and is not part of the ordering, so mark it
806 // mutable.
807 mutable uint32_t index;
808
operator ==art::StringTable::Entry809 bool operator==(const Entry& other) const {
810 return strcmp(data, other.data) == 0;
811 }
812 };
813 struct EntryHash {
operator ()art::StringTable::EntryHash814 size_t operator()(const Entry& entry) const {
815 return entry.hash;
816 }
817 };
818
819 public:
StringTable()820 StringTable() : finished_(false) {
821 }
822
Add(const char * str,bool copy_string)823 void Add(const char* str, bool copy_string) {
824 DCHECK(!finished_);
825 if (UNLIKELY(copy_string)) {
826 // Check whether it's already there.
827 Entry entry(str);
828 if (table_.find(entry) != table_.end()) {
829 return;
830 }
831
832 // Make a copy.
833 size_t str_len = strlen(str);
834 char* copy = new char[str_len + 1];
835 strlcpy(copy, str, str_len + 1);
836 string_backup_.emplace_back(copy);
837 str = copy;
838 }
839 Entry entry(str);
840 table_.insert(entry);
841 }
842
843 // Update all entries and give them an index. Note that this is likely not the insertion order,
844 // as the set will with high likelihood reorder elements. Thus, Add must not be called after
845 // Finish, and Finish must be called before IndexOf. In that case, WriteTo will walk in
846 // the same order as Finish, and indices will agree. The order invariant, as well as indices,
847 // are enforced through debug checks.
Finish()848 void Finish() {
849 DCHECK(!finished_);
850 finished_ = true;
851 uint32_t index = 0;
852 for (auto& entry : table_) {
853 entry.index = index;
854 ++index;
855 }
856 }
857
IndexOf(const char * s) const858 size_t IndexOf(const char* s) const {
859 DCHECK(finished_);
860 Entry entry(s);
861 auto it = table_.find(entry);
862 if (it == table_.end()) {
863 LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
864 }
865 return it->index;
866 }
867
Size() const868 size_t Size() const {
869 return table_.size();
870 }
871
WriteTo(std::vector<uint8_t> & bytes) const872 void WriteTo(std::vector<uint8_t>& bytes) const {
873 DCHECK(finished_);
874 uint32_t cur_index = 0;
875 for (const auto& entry : table_) {
876 DCHECK_EQ(cur_index++, entry.index);
877
878 size_t s_len = CountModifiedUtf8Chars(entry.data);
879 std::unique_ptr<uint16_t[]> s_utf16(new uint16_t[s_len]);
880 ConvertModifiedUtf8ToUtf16(s_utf16.get(), entry.data);
881 AppendUtf16BE(bytes, s_utf16.get(), s_len);
882 }
883 }
884
885 private:
886 std::unordered_set<Entry, EntryHash> table_;
887 std::vector<std::unique_ptr<char[]>> string_backup_;
888
889 bool finished_;
890
891 DISALLOW_COPY_AND_ASSIGN(StringTable);
892 };
893
894
GetMethodSourceFile(ArtMethod * method)895 static const char* GetMethodSourceFile(ArtMethod* method)
896 REQUIRES_SHARED(Locks::mutator_lock_) {
897 DCHECK(method != nullptr);
898 const char* source_file = method->GetDeclaringClassSourceFile();
899 return (source_file != nullptr) ? source_file : "";
900 }
901
902 /*
903 * The data we send to DDMS contains everything we have recorded.
904 *
905 * Message header (all values big-endian):
906 * (1b) message header len (to allow future expansion); includes itself
907 * (1b) entry header len
908 * (1b) stack frame len
909 * (2b) number of entries
910 * (4b) offset to string table from start of message
911 * (2b) number of class name strings
912 * (2b) number of method name strings
913 * (2b) number of source file name strings
914 * For each entry:
915 * (4b) total allocation size
916 * (2b) thread id
917 * (2b) allocated object's class name index
918 * (1b) stack depth
919 * For each stack frame:
920 * (2b) method's class name
921 * (2b) method name
922 * (2b) method source file
923 * (2b) line number, clipped to 32767; -2 if native; -1 if no source
924 * (xb) class name strings
925 * (xb) method name strings
926 * (xb) source file strings
927 *
928 * As with other DDM traffic, strings are sent as a 4-byte length
929 * followed by UTF-16 data.
930 *
931 * We send up 16-bit unsigned indexes into string tables. In theory there
932 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
933 * each table, but in practice there should be far fewer.
934 *
935 * The chief reason for using a string table here is to keep the size of
936 * the DDMS message to a minimum. This is partly to make the protocol
937 * efficient, but also because we have to form the whole thing up all at
938 * once in a memory buffer.
939 *
940 * We use separate string tables for class names, method names, and source
941 * files to keep the indexes small. There will generally be no overlap
942 * between the contents of these tables.
943 */
GetRecentAllocations()944 jbyteArray Dbg::GetRecentAllocations() {
945 if ((false)) {
946 DumpRecentAllocations();
947 }
948
949 Thread* self = Thread::Current();
950 std::vector<uint8_t> bytes;
951 {
952 MutexLock mu(self, *Locks::alloc_tracker_lock_);
953 gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
954 // In case this method is called when allocation tracker is not enabled,
955 // we should still send some data back.
956 gc::AllocRecordObjectMap fallback_record_map;
957 if (records == nullptr) {
958 CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
959 records = &fallback_record_map;
960 }
961 // We don't need to wait on the condition variable records->new_record_condition_, because this
962 // function only reads the class objects, which are already marked so it doesn't change their
963 // reachability.
964
965 //
966 // Part 1: generate string tables.
967 //
968 StringTable class_names;
969 StringTable method_names;
970 StringTable filenames;
971
972 VLOG(jdwp) << "Collecting StringTables.";
973
974 const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize());
975 uint16_t count = capped_count;
976 size_t alloc_byte_count = 0;
977 for (auto it = records->RBegin(), end = records->REnd();
978 count > 0 && it != end; count--, it++) {
979 const gc::AllocRecord* record = &it->second;
980 std::string temp;
981 const char* class_descr = record->GetClassDescriptor(&temp);
982 class_names.Add(class_descr, !temp.empty());
983
984 // Size + tid + class name index + stack depth.
985 alloc_byte_count += 4u + 2u + 2u + 1u;
986
987 for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) {
988 ArtMethod* m = record->StackElement(i).GetMethod();
989 class_names.Add(m->GetDeclaringClassDescriptor(), false);
990 method_names.Add(m->GetName(), false);
991 filenames.Add(GetMethodSourceFile(m), false);
992 }
993
994 // Depth * (class index + method name index + file name index + line number).
995 alloc_byte_count += record->GetDepth() * (2u + 2u + 2u + 2u);
996 }
997
998 class_names.Finish();
999 method_names.Finish();
1000 filenames.Finish();
1001 VLOG(jdwp) << "Done collecting StringTables:" << std::endl
1002 << " ClassNames: " << class_names.Size() << std::endl
1003 << " MethodNames: " << method_names.Size() << std::endl
1004 << " Filenames: " << filenames.Size();
1005
1006 LOG(INFO) << "recent allocation records: " << capped_count;
1007 LOG(INFO) << "allocation records all objects: " << records->Size();
1008
1009 //
1010 // Part 2: Generate the output and store it in the buffer.
1011 //
1012
1013 // (1b) message header len (to allow future expansion); includes itself
1014 // (1b) entry header len
1015 // (1b) stack frame len
1016 const int kMessageHeaderLen = 15;
1017 const int kEntryHeaderLen = 9;
1018 const int kStackFrameLen = 8;
1019 Append1BE(bytes, kMessageHeaderLen);
1020 Append1BE(bytes, kEntryHeaderLen);
1021 Append1BE(bytes, kStackFrameLen);
1022
1023 // (2b) number of entries
1024 // (4b) offset to string table from start of message
1025 // (2b) number of class name strings
1026 // (2b) number of method name strings
1027 // (2b) number of source file name strings
1028 Append2BE(bytes, capped_count);
1029 size_t string_table_offset = bytes.size();
1030 Append4BE(bytes, 0); // We'll patch this later...
1031 Append2BE(bytes, class_names.Size());
1032 Append2BE(bytes, method_names.Size());
1033 Append2BE(bytes, filenames.Size());
1034
1035 VLOG(jdwp) << "Dumping allocations with stacks";
1036
1037 // Enlarge the vector for the allocation data.
1038 size_t reserve_size = bytes.size() + alloc_byte_count;
1039 bytes.reserve(reserve_size);
1040
1041 std::string temp;
1042 count = capped_count;
1043 // The last "count" number of allocation records in "records" are the most recent "count" number
1044 // of allocations. Reverse iterate to get them. The most recent allocation is sent first.
1045 for (auto it = records->RBegin(), end = records->REnd();
1046 count > 0 && it != end; count--, it++) {
1047 // For each entry:
1048 // (4b) total allocation size
1049 // (2b) thread id
1050 // (2b) allocated object's class name index
1051 // (1b) stack depth
1052 const gc::AllocRecord* record = &it->second;
1053 size_t stack_depth = record->GetDepth();
1054 size_t allocated_object_class_name_index =
1055 class_names.IndexOf(record->GetClassDescriptor(&temp));
1056 Append4BE(bytes, record->ByteCount());
1057 Append2BE(bytes, static_cast<uint16_t>(record->GetTid()));
1058 Append2BE(bytes, allocated_object_class_name_index);
1059 Append1BE(bytes, stack_depth);
1060
1061 for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
1062 // For each stack frame:
1063 // (2b) method's class name
1064 // (2b) method name
1065 // (2b) method source file
1066 // (2b) line number, clipped to 32767; -2 if native; -1 if no source
1067 ArtMethod* m = record->StackElement(stack_frame).GetMethod();
1068 size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
1069 size_t method_name_index = method_names.IndexOf(m->GetName());
1070 size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
1071 Append2BE(bytes, class_name_index);
1072 Append2BE(bytes, method_name_index);
1073 Append2BE(bytes, file_name_index);
1074 Append2BE(bytes, record->StackElement(stack_frame).ComputeLineNumber());
1075 }
1076 }
1077
1078 CHECK_EQ(bytes.size(), reserve_size);
1079 VLOG(jdwp) << "Dumping tables.";
1080
1081 // (xb) class name strings
1082 // (xb) method name strings
1083 // (xb) source file strings
1084 Set4BE(&bytes[string_table_offset], bytes.size());
1085 class_names.WriteTo(bytes);
1086 method_names.WriteTo(bytes);
1087 filenames.WriteTo(bytes);
1088
1089 VLOG(jdwp) << "GetRecentAllocations: data created. " << bytes.size();
1090 }
1091 JNIEnv* env = self->GetJniEnv();
1092 jbyteArray result = env->NewByteArray(bytes.size());
1093 if (result != nullptr) {
1094 env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
1095 }
1096 return result;
1097 }
1098
ThreadStart(Thread * self)1099 void Dbg::DbgThreadLifecycleCallback::ThreadStart(Thread* self) {
1100 Dbg::PostThreadStart(self);
1101 }
1102
ThreadDeath(Thread * self)1103 void Dbg::DbgThreadLifecycleCallback::ThreadDeath(Thread* self) {
1104 Dbg::PostThreadDeath(self);
1105 }
1106
1107 } // namespace art
1108