1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "debugger.h"
18
19 #include <sys/uio.h>
20
21 #include <set>
22
23 #include "arch/context.h"
24 #include "class_linker.h"
25 #include "class_linker-inl.h"
26 #include "dex_file-inl.h"
27 #include "dex_instruction.h"
28 #include "gc/accounting/card_table-inl.h"
29 #include "gc/space/large_object_space.h"
30 #include "gc/space/space-inl.h"
31 #include "invoke_arg_array_builder.h"
32 #include "jdwp/object_registry.h"
33 #include "mirror/art_field-inl.h"
34 #include "mirror/art_method-inl.h"
35 #include "mirror/class.h"
36 #include "mirror/class-inl.h"
37 #include "mirror/class_loader.h"
38 #include "mirror/object-inl.h"
39 #include "mirror/object_array-inl.h"
40 #include "mirror/throwable.h"
41 #include "object_utils.h"
42 #include "safe_map.h"
43 #include "scoped_thread_state_change.h"
44 #include "ScopedLocalRef.h"
45 #include "ScopedPrimitiveArray.h"
46 #include "sirt_ref.h"
47 #include "stack_indirect_reference_table.h"
48 #include "thread_list.h"
49 #include "throw_location.h"
50 #include "utf.h"
51 #include "well_known_classes.h"
52
53 #ifdef HAVE_ANDROID_OS
54 #include "cutils/properties.h"
55 #endif
56
57 namespace art {
58
59 static const size_t kMaxAllocRecordStackDepth = 16; // Max 255.
60 static const size_t kDefaultNumAllocRecords = 64*1024; // Must be a power of 2.
61
62 struct AllocRecordStackTraceElement {
63 mirror::ArtMethod* method;
64 uint32_t dex_pc;
65
LineNumberart::AllocRecordStackTraceElement66 int32_t LineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
67 return MethodHelper(method).GetLineNumFromDexPC(dex_pc);
68 }
69 };
70
71 struct AllocRecord {
72 mirror::Class* type;
73 size_t byte_count;
74 uint16_t thin_lock_id;
75 AllocRecordStackTraceElement stack[kMaxAllocRecordStackDepth]; // Unused entries have NULL method.
76
GetDepthart::AllocRecord77 size_t GetDepth() {
78 size_t depth = 0;
79 while (depth < kMaxAllocRecordStackDepth && stack[depth].method != NULL) {
80 ++depth;
81 }
82 return depth;
83 }
84 };
85
86 struct Breakpoint {
87 mirror::ArtMethod* method;
88 uint32_t dex_pc;
Breakpointart::Breakpoint89 Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc) : method(method), dex_pc(dex_pc) {}
90 };
91
operator <<(std::ostream & os,const Breakpoint & rhs)92 static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
93 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
94 os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.method).c_str(), rhs.dex_pc);
95 return os;
96 }
97
98 struct SingleStepControl {
99 // Are we single-stepping right now?
100 bool is_active;
101 Thread* thread;
102
103 JDWP::JdwpStepSize step_size;
104 JDWP::JdwpStepDepth step_depth;
105
106 const mirror::ArtMethod* method;
107 int32_t line_number; // Or -1 for native methods.
108 std::set<uint32_t> dex_pcs;
109 int stack_depth;
110 };
111
112 class DebugInstrumentationListener : public instrumentation::InstrumentationListener {
113 public:
DebugInstrumentationListener()114 DebugInstrumentationListener() {}
~DebugInstrumentationListener()115 virtual ~DebugInstrumentationListener() {}
116
MethodEntered(Thread * thread,mirror::Object * this_object,const mirror::ArtMethod * method,uint32_t dex_pc)117 virtual void MethodEntered(Thread* thread, mirror::Object* this_object,
118 const mirror::ArtMethod* method, uint32_t dex_pc)
119 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
120 if (method->IsNative()) {
121 // TODO: post location events is a suspension point and native method entry stubs aren't.
122 return;
123 }
124 Dbg::PostLocationEvent(method, 0, this_object, Dbg::kMethodEntry);
125 }
126
MethodExited(Thread * thread,mirror::Object * this_object,const mirror::ArtMethod * method,uint32_t dex_pc,const JValue & return_value)127 virtual void MethodExited(Thread* thread, mirror::Object* this_object,
128 const mirror::ArtMethod* method,
129 uint32_t dex_pc, const JValue& return_value)
130 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
131 UNUSED(return_value);
132 if (method->IsNative()) {
133 // TODO: post location events is a suspension point and native method entry stubs aren't.
134 return;
135 }
136 Dbg::PostLocationEvent(method, dex_pc, this_object, Dbg::kMethodExit);
137 }
138
MethodUnwind(Thread * thread,const mirror::ArtMethod * method,uint32_t dex_pc)139 virtual void MethodUnwind(Thread* thread, const mirror::ArtMethod* method,
140 uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
141 // We're not recorded to listen to this kind of event, so complain.
142 LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
143 << " " << dex_pc;
144 }
145
DexPcMoved(Thread * thread,mirror::Object * this_object,const mirror::ArtMethod * method,uint32_t new_dex_pc)146 virtual void DexPcMoved(Thread* thread, mirror::Object* this_object,
147 const mirror::ArtMethod* method, uint32_t new_dex_pc)
148 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
149 Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc);
150 }
151
ExceptionCaught(Thread * thread,const ThrowLocation & throw_location,mirror::ArtMethod * catch_method,uint32_t catch_dex_pc,mirror::Throwable * exception_object)152 virtual void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
153 mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
154 mirror::Throwable* exception_object)
155 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
156 Dbg::PostException(thread, throw_location, catch_method, catch_dex_pc, exception_object);
157 }
158 } gDebugInstrumentationListener;
159
160 // JDWP is allowed unless the Zygote forbids it.
161 static bool gJdwpAllowed = true;
162
163 // Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
164 static bool gJdwpConfigured = false;
165
166 // Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.)
167 static JDWP::JdwpOptions gJdwpOptions;
168
169 // Runtime JDWP state.
170 static JDWP::JdwpState* gJdwpState = NULL;
171 static bool gDebuggerConnected; // debugger or DDMS is connected.
172 static bool gDebuggerActive; // debugger is making requests.
173 static bool gDisposed; // debugger called VirtualMachine.Dispose, so we should drop the connection.
174
175 static bool gDdmThreadNotification = false;
176
177 // DDMS GC-related settings.
178 static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
179 static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
180 static Dbg::HpsgWhat gDdmHpsgWhat;
181 static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
182 static Dbg::HpsgWhat gDdmNhsgWhat;
183
184 static ObjectRegistry* gRegistry = NULL;
185
186 // Recent allocation tracking.
187 static Mutex gAllocTrackerLock DEFAULT_MUTEX_ACQUIRED_AFTER("AllocTracker lock");
188 AllocRecord* Dbg::recent_allocation_records_ PT_GUARDED_BY(gAllocTrackerLock) = NULL; // TODO: CircularBuffer<AllocRecord>
189 static size_t gAllocRecordMax GUARDED_BY(gAllocTrackerLock) = 0;
190 static size_t gAllocRecordHead GUARDED_BY(gAllocTrackerLock) = 0;
191 static size_t gAllocRecordCount GUARDED_BY(gAllocTrackerLock) = 0;
192
193 // Breakpoints and single-stepping.
194 static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
195 static SingleStepControl gSingleStepControl GUARDED_BY(Locks::breakpoint_lock_);
196
IsBreakpoint(const mirror::ArtMethod * m,uint32_t dex_pc)197 static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
198 LOCKS_EXCLUDED(Locks::breakpoint_lock_)
199 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
200 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
201 for (size_t i = 0; i < gBreakpoints.size(); ++i) {
202 if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == dex_pc) {
203 VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
204 return true;
205 }
206 }
207 return false;
208 }
209
IsSuspendedForDebugger(ScopedObjectAccessUnchecked & soa,Thread * thread)210 static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread) {
211 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
212 // A thread may be suspended for GC; in this code, we really want to know whether
213 // there's a debugger suspension active.
214 return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
215 }
216
DecodeArray(JDWP::RefTypeId id,JDWP::JdwpError & status)217 static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status)
218 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
219 mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
220 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
221 status = JDWP::ERR_INVALID_OBJECT;
222 return NULL;
223 }
224 if (!o->IsArrayInstance()) {
225 status = JDWP::ERR_INVALID_ARRAY;
226 return NULL;
227 }
228 status = JDWP::ERR_NONE;
229 return o->AsArray();
230 }
231
DecodeClass(JDWP::RefTypeId id,JDWP::JdwpError & status)232 static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status)
233 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
234 mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
235 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
236 status = JDWP::ERR_INVALID_OBJECT;
237 return NULL;
238 }
239 if (!o->IsClass()) {
240 status = JDWP::ERR_INVALID_CLASS;
241 return NULL;
242 }
243 status = JDWP::ERR_NONE;
244 return o->AsClass();
245 }
246
DecodeThread(ScopedObjectAccessUnchecked & soa,JDWP::ObjectId thread_id,Thread * & thread)247 static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, Thread*& thread)
248 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
249 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
250 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
251 mirror::Object* thread_peer = gRegistry->Get<mirror::Object*>(thread_id);
252 if (thread_peer == NULL || thread_peer == ObjectRegistry::kInvalidObject) {
253 // This isn't even an object.
254 return JDWP::ERR_INVALID_OBJECT;
255 }
256
257 mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
258 if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
259 // This isn't a thread.
260 return JDWP::ERR_INVALID_THREAD;
261 }
262
263 thread = Thread::FromManagedThread(soa, thread_peer);
264 if (thread == NULL) {
265 // This is a java.lang.Thread without a Thread*. Must be a zombie.
266 return JDWP::ERR_THREAD_NOT_ALIVE;
267 }
268 return JDWP::ERR_NONE;
269 }
270
BasicTagFromDescriptor(const char * descriptor)271 static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
272 // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
273 // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
274 return static_cast<JDWP::JdwpTag>(descriptor[0]);
275 }
276
TagFromClass(mirror::Class * c)277 static JDWP::JdwpTag TagFromClass(mirror::Class* c)
278 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
279 CHECK(c != NULL);
280 if (c->IsArrayClass()) {
281 return JDWP::JT_ARRAY;
282 }
283
284 ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
285 if (c->IsStringClass()) {
286 return JDWP::JT_STRING;
287 } else if (c->IsClassClass()) {
288 return JDWP::JT_CLASS_OBJECT;
289 } else if (class_linker->FindSystemClass("Ljava/lang/Thread;")->IsAssignableFrom(c)) {
290 return JDWP::JT_THREAD;
291 } else if (class_linker->FindSystemClass("Ljava/lang/ThreadGroup;")->IsAssignableFrom(c)) {
292 return JDWP::JT_THREAD_GROUP;
293 } else if (class_linker->FindSystemClass("Ljava/lang/ClassLoader;")->IsAssignableFrom(c)) {
294 return JDWP::JT_CLASS_LOADER;
295 } else {
296 return JDWP::JT_OBJECT;
297 }
298 }
299
300 /*
301 * Objects declared to hold Object might actually hold a more specific
302 * type. The debugger may take a special interest in these (e.g. it
303 * wants to display the contents of Strings), so we want to return an
304 * appropriate tag.
305 *
306 * Null objects are tagged JT_OBJECT.
307 */
TagFromObject(const mirror::Object * o)308 static JDWP::JdwpTag TagFromObject(const mirror::Object* o)
309 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
310 return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(o->GetClass());
311 }
312
IsPrimitiveTag(JDWP::JdwpTag tag)313 static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
314 switch (tag) {
315 case JDWP::JT_BOOLEAN:
316 case JDWP::JT_BYTE:
317 case JDWP::JT_CHAR:
318 case JDWP::JT_FLOAT:
319 case JDWP::JT_DOUBLE:
320 case JDWP::JT_INT:
321 case JDWP::JT_LONG:
322 case JDWP::JT_SHORT:
323 case JDWP::JT_VOID:
324 return true;
325 default:
326 return false;
327 }
328 }
329
330 /*
331 * Handle one of the JDWP name/value pairs.
332 *
333 * JDWP options are:
334 * help: if specified, show help message and bail
335 * transport: may be dt_socket or dt_shmem
336 * address: for dt_socket, "host:port", or just "port" when listening
337 * server: if "y", wait for debugger to attach; if "n", attach to debugger
338 * timeout: how long to wait for debugger to connect / listen
339 *
340 * Useful with server=n (these aren't supported yet):
341 * onthrow=<exception-name>: connect to debugger when exception thrown
342 * onuncaught=y|n: connect to debugger when uncaught exception thrown
343 * launch=<command-line>: launch the debugger itself
344 *
345 * The "transport" option is required, as is "address" if server=n.
346 */
ParseJdwpOption(const std::string & name,const std::string & value)347 static bool ParseJdwpOption(const std::string& name, const std::string& value) {
348 if (name == "transport") {
349 if (value == "dt_socket") {
350 gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
351 } else if (value == "dt_android_adb") {
352 gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
353 } else {
354 LOG(ERROR) << "JDWP transport not supported: " << value;
355 return false;
356 }
357 } else if (name == "server") {
358 if (value == "n") {
359 gJdwpOptions.server = false;
360 } else if (value == "y") {
361 gJdwpOptions.server = true;
362 } else {
363 LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
364 return false;
365 }
366 } else if (name == "suspend") {
367 if (value == "n") {
368 gJdwpOptions.suspend = false;
369 } else if (value == "y") {
370 gJdwpOptions.suspend = true;
371 } else {
372 LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
373 return false;
374 }
375 } else if (name == "address") {
376 /* this is either <port> or <host>:<port> */
377 std::string port_string;
378 gJdwpOptions.host.clear();
379 std::string::size_type colon = value.find(':');
380 if (colon != std::string::npos) {
381 gJdwpOptions.host = value.substr(0, colon);
382 port_string = value.substr(colon + 1);
383 } else {
384 port_string = value;
385 }
386 if (port_string.empty()) {
387 LOG(ERROR) << "JDWP address missing port: " << value;
388 return false;
389 }
390 char* end;
391 uint64_t port = strtoul(port_string.c_str(), &end, 10);
392 if (*end != '\0' || port > 0xffff) {
393 LOG(ERROR) << "JDWP address has junk in port field: " << value;
394 return false;
395 }
396 gJdwpOptions.port = port;
397 } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
398 /* valid but unsupported */
399 LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
400 } else {
401 LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
402 }
403
404 return true;
405 }
406
407 /*
408 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
409 * "transport=dt_socket,address=8000,server=y,suspend=n"
410 */
ParseJdwpOptions(const std::string & options)411 bool Dbg::ParseJdwpOptions(const std::string& options) {
412 VLOG(jdwp) << "ParseJdwpOptions: " << options;
413
414 std::vector<std::string> pairs;
415 Split(options, ',', pairs);
416
417 for (size_t i = 0; i < pairs.size(); ++i) {
418 std::string::size_type equals = pairs[i].find('=');
419 if (equals == std::string::npos) {
420 LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
421 return false;
422 }
423 ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
424 }
425
426 if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
427 LOG(ERROR) << "Must specify JDWP transport: " << options;
428 }
429 if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
430 LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
431 return false;
432 }
433
434 gJdwpConfigured = true;
435 return true;
436 }
437
StartJdwp()438 void Dbg::StartJdwp() {
439 if (!gJdwpAllowed || !IsJdwpConfigured()) {
440 // No JDWP for you!
441 return;
442 }
443
444 CHECK(gRegistry == NULL);
445 gRegistry = new ObjectRegistry;
446
447 // Init JDWP if the debugger is enabled. This may connect out to a
448 // debugger, passively listen for a debugger, or block waiting for a
449 // debugger.
450 gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
451 if (gJdwpState == NULL) {
452 // We probably failed because some other process has the port already, which means that
453 // if we don't abort the user is likely to think they're talking to us when they're actually
454 // talking to that other process.
455 LOG(FATAL) << "Debugger thread failed to initialize";
456 }
457
458 // If a debugger has already attached, send the "welcome" message.
459 // This may cause us to suspend all threads.
460 if (gJdwpState->IsActive()) {
461 ScopedObjectAccess soa(Thread::Current());
462 if (!gJdwpState->PostVMStart()) {
463 LOG(WARNING) << "Failed to post 'start' message to debugger";
464 }
465 }
466 }
467
StopJdwp()468 void Dbg::StopJdwp() {
469 delete gJdwpState;
470 delete gRegistry;
471 gRegistry = NULL;
472 }
473
GcDidFinish()474 void Dbg::GcDidFinish() {
475 if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
476 ScopedObjectAccess soa(Thread::Current());
477 LOG(DEBUG) << "Sending heap info to DDM";
478 DdmSendHeapInfo(gDdmHpifWhen);
479 }
480 if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
481 ScopedObjectAccess soa(Thread::Current());
482 LOG(DEBUG) << "Dumping heap to DDM";
483 DdmSendHeapSegments(false);
484 }
485 if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
486 ScopedObjectAccess soa(Thread::Current());
487 LOG(DEBUG) << "Dumping native heap to DDM";
488 DdmSendHeapSegments(true);
489 }
490 }
491
SetJdwpAllowed(bool allowed)492 void Dbg::SetJdwpAllowed(bool allowed) {
493 gJdwpAllowed = allowed;
494 }
495
GetInvokeReq()496 DebugInvokeReq* Dbg::GetInvokeReq() {
497 return Thread::Current()->GetInvokeReq();
498 }
499
GetDebugThread()500 Thread* Dbg::GetDebugThread() {
501 return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL;
502 }
503
ClearWaitForEventThread()504 void Dbg::ClearWaitForEventThread() {
505 gJdwpState->ClearWaitForEventThread();
506 }
507
Connected()508 void Dbg::Connected() {
509 CHECK(!gDebuggerConnected);
510 VLOG(jdwp) << "JDWP has attached";
511 gDebuggerConnected = true;
512 gDisposed = false;
513 }
514
Disposed()515 void Dbg::Disposed() {
516 gDisposed = true;
517 }
518
IsDisposed()519 bool Dbg::IsDisposed() {
520 return gDisposed;
521 }
522
GoActive()523 void Dbg::GoActive() {
524 // Enable all debugging features, including scans for breakpoints.
525 // This is a no-op if we're already active.
526 // Only called from the JDWP handler thread.
527 if (gDebuggerActive) {
528 return;
529 }
530
531 {
532 // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
533 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
534 CHECK_EQ(gBreakpoints.size(), 0U);
535 }
536
537 Runtime* runtime = Runtime::Current();
538 runtime->GetThreadList()->SuspendAll();
539 Thread* self = Thread::Current();
540 ThreadState old_state = self->SetStateUnsafe(kRunnable);
541 CHECK_NE(old_state, kRunnable);
542 runtime->GetInstrumentation()->AddListener(&gDebugInstrumentationListener,
543 instrumentation::Instrumentation::kMethodEntered |
544 instrumentation::Instrumentation::kMethodExited |
545 instrumentation::Instrumentation::kDexPcMoved |
546 instrumentation::Instrumentation::kExceptionCaught);
547 gDebuggerActive = true;
548 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
549 runtime->GetThreadList()->ResumeAll();
550
551 LOG(INFO) << "Debugger is active";
552 }
553
Disconnected()554 void Dbg::Disconnected() {
555 CHECK(gDebuggerConnected);
556
557 LOG(INFO) << "Debugger is no longer active";
558
559 // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
560 // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
561 // and clear the object registry.
562 Runtime* runtime = Runtime::Current();
563 runtime->GetThreadList()->SuspendAll();
564 Thread* self = Thread::Current();
565 ThreadState old_state = self->SetStateUnsafe(kRunnable);
566 runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
567 instrumentation::Instrumentation::kMethodEntered |
568 instrumentation::Instrumentation::kMethodExited |
569 instrumentation::Instrumentation::kDexPcMoved |
570 instrumentation::Instrumentation::kExceptionCaught);
571 gDebuggerActive = false;
572 gRegistry->Clear();
573 gDebuggerConnected = false;
574 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
575 runtime->GetThreadList()->ResumeAll();
576 }
577
IsDebuggerActive()578 bool Dbg::IsDebuggerActive() {
579 return gDebuggerActive;
580 }
581
IsJdwpConfigured()582 bool Dbg::IsJdwpConfigured() {
583 return gJdwpConfigured;
584 }
585
LastDebuggerActivity()586 int64_t Dbg::LastDebuggerActivity() {
587 return gJdwpState->LastDebuggerActivity();
588 }
589
UndoDebuggerSuspensions()590 void Dbg::UndoDebuggerSuspensions() {
591 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
592 }
593
GetClassName(JDWP::RefTypeId class_id)594 std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
595 mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id);
596 if (o == NULL) {
597 return "NULL";
598 }
599 if (o == ObjectRegistry::kInvalidObject) {
600 return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
601 }
602 if (!o->IsClass()) {
603 return StringPrintf("non-class %p", o); // This is only used for debugging output anyway.
604 }
605 return DescriptorToName(ClassHelper(o->AsClass()).GetDescriptor());
606 }
607
GetClassObject(JDWP::RefTypeId id,JDWP::ObjectId & class_object_id)608 JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) {
609 JDWP::JdwpError status;
610 mirror::Class* c = DecodeClass(id, status);
611 if (c == NULL) {
612 return status;
613 }
614 class_object_id = gRegistry->Add(c);
615 return JDWP::ERR_NONE;
616 }
617
GetSuperclass(JDWP::RefTypeId id,JDWP::RefTypeId & superclass_id)618 JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) {
619 JDWP::JdwpError status;
620 mirror::Class* c = DecodeClass(id, status);
621 if (c == NULL) {
622 return status;
623 }
624 if (c->IsInterface()) {
625 // http://code.google.com/p/android/issues/detail?id=20856
626 superclass_id = 0;
627 } else {
628 superclass_id = gRegistry->Add(c->GetSuperClass());
629 }
630 return JDWP::ERR_NONE;
631 }
632
GetClassLoader(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)633 JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
634 mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
635 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
636 return JDWP::ERR_INVALID_OBJECT;
637 }
638 expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
639 return JDWP::ERR_NONE;
640 }
641
GetModifiers(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)642 JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
643 JDWP::JdwpError status;
644 mirror::Class* c = DecodeClass(id, status);
645 if (c == NULL) {
646 return status;
647 }
648
649 uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
650
651 // Set ACC_SUPER; dex files don't contain this flag, but all classes are supposed to have it set.
652 // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
653 access_flags |= kAccSuper;
654
655 expandBufAdd4BE(pReply, access_flags);
656
657 return JDWP::ERR_NONE;
658 }
659
GetMonitorInfo(JDWP::ObjectId object_id,JDWP::ExpandBuf * reply)660 JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
661 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
662 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
663 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
664 return JDWP::ERR_INVALID_OBJECT;
665 }
666
667 // Ensure all threads are suspended while we read objects' lock words.
668 Thread* self = Thread::Current();
669 Locks::mutator_lock_->SharedUnlock(self);
670 Locks::mutator_lock_->ExclusiveLock(self);
671
672 MonitorInfo monitor_info(o);
673
674 Locks::mutator_lock_->ExclusiveUnlock(self);
675 Locks::mutator_lock_->SharedLock(self);
676
677 if (monitor_info.owner != NULL) {
678 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner->GetPeer()));
679 } else {
680 expandBufAddObjectId(reply, gRegistry->Add(NULL));
681 }
682 expandBufAdd4BE(reply, monitor_info.entry_count);
683 expandBufAdd4BE(reply, monitor_info.waiters.size());
684 for (size_t i = 0; i < monitor_info.waiters.size(); ++i) {
685 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters[i]->GetPeer()));
686 }
687 return JDWP::ERR_NONE;
688 }
689
GetOwnedMonitors(JDWP::ObjectId thread_id,std::vector<JDWP::ObjectId> & monitors,std::vector<uint32_t> & stack_depths)690 JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
691 std::vector<JDWP::ObjectId>& monitors,
692 std::vector<uint32_t>& stack_depths)
693 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
694 ScopedObjectAccessUnchecked soa(Thread::Current());
695 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
696 Thread* thread;
697 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
698 if (error != JDWP::ERR_NONE) {
699 return error;
700 }
701 if (!IsSuspendedForDebugger(soa, thread)) {
702 return JDWP::ERR_THREAD_NOT_SUSPENDED;
703 }
704
705 struct OwnedMonitorVisitor : public StackVisitor {
706 OwnedMonitorVisitor(Thread* thread, Context* context)
707 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
708 : StackVisitor(thread, context), current_stack_depth(0) {}
709
710 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
711 // annotalysis.
712 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
713 if (!GetMethod()->IsRuntimeMethod()) {
714 Monitor::VisitLocks(this, AppendOwnedMonitors, this);
715 ++current_stack_depth;
716 }
717 return true;
718 }
719
720 static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg) {
721 OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
722 visitor->monitors.push_back(owned_monitor);
723 visitor->stack_depths.push_back(visitor->current_stack_depth);
724 }
725
726 size_t current_stack_depth;
727 std::vector<mirror::Object*> monitors;
728 std::vector<uint32_t> stack_depths;
729 };
730 UniquePtr<Context> context(Context::Create());
731 OwnedMonitorVisitor visitor(thread, context.get());
732 visitor.WalkStack();
733
734 for (size_t i = 0; i < visitor.monitors.size(); ++i) {
735 monitors.push_back(gRegistry->Add(visitor.monitors[i]));
736 stack_depths.push_back(visitor.stack_depths[i]);
737 }
738
739 return JDWP::ERR_NONE;
740 }
741
GetContendedMonitor(JDWP::ObjectId thread_id,JDWP::ObjectId & contended_monitor)742 JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id, JDWP::ObjectId& contended_monitor)
743 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
744 ScopedObjectAccessUnchecked soa(Thread::Current());
745 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
746 Thread* thread;
747 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
748 if (error != JDWP::ERR_NONE) {
749 return error;
750 }
751 if (!IsSuspendedForDebugger(soa, thread)) {
752 return JDWP::ERR_THREAD_NOT_SUSPENDED;
753 }
754
755 contended_monitor = gRegistry->Add(Monitor::GetContendedMonitor(thread));
756
757 return JDWP::ERR_NONE;
758 }
759
GetInstanceCounts(const std::vector<JDWP::RefTypeId> & class_ids,std::vector<uint64_t> & counts)760 JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
761 std::vector<uint64_t>& counts)
762 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
763 std::vector<mirror::Class*> classes;
764 counts.clear();
765 for (size_t i = 0; i < class_ids.size(); ++i) {
766 JDWP::JdwpError status;
767 mirror::Class* c = DecodeClass(class_ids[i], status);
768 if (c == NULL) {
769 return status;
770 }
771 classes.push_back(c);
772 counts.push_back(0);
773 }
774
775 Runtime::Current()->GetHeap()->CountInstances(classes, false, &counts[0]);
776 return JDWP::ERR_NONE;
777 }
778
GetInstances(JDWP::RefTypeId class_id,int32_t max_count,std::vector<JDWP::ObjectId> & instances)779 JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
780 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
781 JDWP::JdwpError status;
782 mirror::Class* c = DecodeClass(class_id, status);
783 if (c == NULL) {
784 return status;
785 }
786
787 std::vector<mirror::Object*> raw_instances;
788 Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
789 for (size_t i = 0; i < raw_instances.size(); ++i) {
790 instances.push_back(gRegistry->Add(raw_instances[i]));
791 }
792 return JDWP::ERR_NONE;
793 }
794
GetReferringObjects(JDWP::ObjectId object_id,int32_t max_count,std::vector<JDWP::ObjectId> & referring_objects)795 JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
796 std::vector<JDWP::ObjectId>& referring_objects)
797 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
798 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
799 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
800 return JDWP::ERR_INVALID_OBJECT;
801 }
802
803 std::vector<mirror::Object*> raw_instances;
804 Runtime::Current()->GetHeap()->GetReferringObjects(o, max_count, raw_instances);
805 for (size_t i = 0; i < raw_instances.size(); ++i) {
806 referring_objects.push_back(gRegistry->Add(raw_instances[i]));
807 }
808 return JDWP::ERR_NONE;
809 }
810
DisableCollection(JDWP::ObjectId object_id)811 JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id)
812 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
813 gRegistry->DisableCollection(object_id);
814 return JDWP::ERR_NONE;
815 }
816
EnableCollection(JDWP::ObjectId object_id)817 JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id)
818 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
819 gRegistry->EnableCollection(object_id);
820 return JDWP::ERR_NONE;
821 }
822
IsCollected(JDWP::ObjectId object_id,bool & is_collected)823 JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected)
824 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
825 is_collected = gRegistry->IsCollected(object_id);
826 return JDWP::ERR_NONE;
827 }
828
DisposeObject(JDWP::ObjectId object_id,uint32_t reference_count)829 void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
830 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
831 gRegistry->DisposeObject(object_id, reference_count);
832 }
833
GetReflectedType(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)834 JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
835 JDWP::JdwpError status;
836 mirror::Class* c = DecodeClass(class_id, status);
837 if (c == NULL) {
838 return status;
839 }
840
841 expandBufAdd1(pReply, c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS);
842 expandBufAddRefTypeId(pReply, class_id);
843 return JDWP::ERR_NONE;
844 }
845
GetClassList(std::vector<JDWP::RefTypeId> & classes)846 void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) {
847 // Get the complete list of reference classes (i.e. all classes except
848 // the primitive types).
849 // Returns a newly-allocated buffer full of RefTypeId values.
850 struct ClassListCreator {
851 explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) {
852 }
853
854 static bool Visit(mirror::Class* c, void* arg) {
855 return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
856 }
857
858 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
859 // annotalysis.
860 bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
861 if (!c->IsPrimitive()) {
862 classes.push_back(gRegistry->AddRefType(c));
863 }
864 return true;
865 }
866
867 std::vector<JDWP::RefTypeId>& classes;
868 };
869
870 ClassListCreator clc(classes);
871 Runtime::Current()->GetClassLinker()->VisitClasses(ClassListCreator::Visit, &clc);
872 }
873
GetClassInfo(JDWP::RefTypeId class_id,JDWP::JdwpTypeTag * pTypeTag,uint32_t * pStatus,std::string * pDescriptor)874 JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) {
875 JDWP::JdwpError status;
876 mirror::Class* c = DecodeClass(class_id, status);
877 if (c == NULL) {
878 return status;
879 }
880
881 if (c->IsArrayClass()) {
882 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
883 *pTypeTag = JDWP::TT_ARRAY;
884 } else {
885 if (c->IsErroneous()) {
886 *pStatus = JDWP::CS_ERROR;
887 } else {
888 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
889 }
890 *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
891 }
892
893 if (pDescriptor != NULL) {
894 *pDescriptor = ClassHelper(c).GetDescriptor();
895 }
896 return JDWP::ERR_NONE;
897 }
898
FindLoadedClassBySignature(const char * descriptor,std::vector<JDWP::RefTypeId> & ids)899 void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) {
900 std::vector<mirror::Class*> classes;
901 Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
902 ids.clear();
903 for (size_t i = 0; i < classes.size(); ++i) {
904 ids.push_back(gRegistry->Add(classes[i]));
905 }
906 }
907
GetReferenceType(JDWP::ObjectId object_id,JDWP::ExpandBuf * pReply)908 JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
909 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
910 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
911 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
912 return JDWP::ERR_INVALID_OBJECT;
913 }
914
915 JDWP::JdwpTypeTag type_tag;
916 if (o->GetClass()->IsArrayClass()) {
917 type_tag = JDWP::TT_ARRAY;
918 } else if (o->GetClass()->IsInterface()) {
919 type_tag = JDWP::TT_INTERFACE;
920 } else {
921 type_tag = JDWP::TT_CLASS;
922 }
923 JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
924
925 expandBufAdd1(pReply, type_tag);
926 expandBufAddRefTypeId(pReply, type_id);
927
928 return JDWP::ERR_NONE;
929 }
930
GetSignature(JDWP::RefTypeId class_id,std::string & signature)931 JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string& signature) {
932 JDWP::JdwpError status;
933 mirror::Class* c = DecodeClass(class_id, status);
934 if (c == NULL) {
935 return status;
936 }
937 signature = ClassHelper(c).GetDescriptor();
938 return JDWP::ERR_NONE;
939 }
940
GetSourceFile(JDWP::RefTypeId class_id,std::string & result)941 JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) {
942 JDWP::JdwpError status;
943 mirror::Class* c = DecodeClass(class_id, status);
944 if (c == NULL) {
945 return status;
946 }
947 result = ClassHelper(c).GetSourceFile();
948 return JDWP::ERR_NONE;
949 }
950
GetObjectTag(JDWP::ObjectId object_id,uint8_t & tag)951 JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) {
952 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
953 if (o == ObjectRegistry::kInvalidObject) {
954 return JDWP::ERR_INVALID_OBJECT;
955 }
956 tag = TagFromObject(o);
957 return JDWP::ERR_NONE;
958 }
959
GetTagWidth(JDWP::JdwpTag tag)960 size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
961 switch (tag) {
962 case JDWP::JT_VOID:
963 return 0;
964 case JDWP::JT_BYTE:
965 case JDWP::JT_BOOLEAN:
966 return 1;
967 case JDWP::JT_CHAR:
968 case JDWP::JT_SHORT:
969 return 2;
970 case JDWP::JT_FLOAT:
971 case JDWP::JT_INT:
972 return 4;
973 case JDWP::JT_ARRAY:
974 case JDWP::JT_OBJECT:
975 case JDWP::JT_STRING:
976 case JDWP::JT_THREAD:
977 case JDWP::JT_THREAD_GROUP:
978 case JDWP::JT_CLASS_LOADER:
979 case JDWP::JT_CLASS_OBJECT:
980 return sizeof(JDWP::ObjectId);
981 case JDWP::JT_DOUBLE:
982 case JDWP::JT_LONG:
983 return 8;
984 default:
985 LOG(FATAL) << "Unknown tag " << tag;
986 return -1;
987 }
988 }
989
GetArrayLength(JDWP::ObjectId array_id,int & length)990 JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) {
991 JDWP::JdwpError status;
992 mirror::Array* a = DecodeArray(array_id, status);
993 if (a == NULL) {
994 return status;
995 }
996 length = a->GetLength();
997 return JDWP::ERR_NONE;
998 }
999
OutputArray(JDWP::ObjectId array_id,int offset,int count,JDWP::ExpandBuf * pReply)1000 JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1001 JDWP::JdwpError status;
1002 mirror::Array* a = DecodeArray(array_id, status);
1003 if (a == NULL) {
1004 return status;
1005 }
1006
1007 if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1008 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1009 return JDWP::ERR_INVALID_LENGTH;
1010 }
1011 std::string descriptor(ClassHelper(a->GetClass()).GetDescriptor());
1012 JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1);
1013
1014 expandBufAdd1(pReply, tag);
1015 expandBufAdd4BE(pReply, count);
1016
1017 if (IsPrimitiveTag(tag)) {
1018 size_t width = GetTagWidth(tag);
1019 uint8_t* dst = expandBufAddSpace(pReply, count * width);
1020 if (width == 8) {
1021 const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t)));
1022 for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1023 } else if (width == 4) {
1024 const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t)));
1025 for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1026 } else if (width == 2) {
1027 const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t)));
1028 for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1029 } else {
1030 const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t)));
1031 memcpy(dst, &src[offset * width], count * width);
1032 }
1033 } else {
1034 mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1035 for (int i = 0; i < count; ++i) {
1036 mirror::Object* element = oa->Get(offset + i);
1037 JDWP::JdwpTag specific_tag = (element != NULL) ? TagFromObject(element) : tag;
1038 expandBufAdd1(pReply, specific_tag);
1039 expandBufAddObjectId(pReply, gRegistry->Add(element));
1040 }
1041 }
1042
1043 return JDWP::ERR_NONE;
1044 }
1045
CopyArrayData(mirror::Array * a,JDWP::Request & src,int offset,int count)1046 template <typename T> void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count) {
1047 DCHECK(a->GetClass()->IsPrimitiveArray());
1048
1049 T* dst = &(reinterpret_cast<T*>(a->GetRawData(sizeof(T)))[offset * sizeof(T)]);
1050 for (int i = 0; i < count; ++i) {
1051 *dst++ = src.ReadValue(sizeof(T));
1052 }
1053 }
1054
SetArrayElements(JDWP::ObjectId array_id,int offset,int count,JDWP::Request & request)1055 JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1056 JDWP::Request& request)
1057 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1058 JDWP::JdwpError status;
1059 mirror::Array* dst = DecodeArray(array_id, status);
1060 if (dst == NULL) {
1061 return status;
1062 }
1063
1064 if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1065 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1066 return JDWP::ERR_INVALID_LENGTH;
1067 }
1068 std::string descriptor(ClassHelper(dst->GetClass()).GetDescriptor());
1069 JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1);
1070
1071 if (IsPrimitiveTag(tag)) {
1072 size_t width = GetTagWidth(tag);
1073 if (width == 8) {
1074 CopyArrayData<uint64_t>(dst, request, offset, count);
1075 } else if (width == 4) {
1076 CopyArrayData<uint32_t>(dst, request, offset, count);
1077 } else if (width == 2) {
1078 CopyArrayData<uint16_t>(dst, request, offset, count);
1079 } else {
1080 CopyArrayData<uint8_t>(dst, request, offset, count);
1081 }
1082 } else {
1083 mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1084 for (int i = 0; i < count; ++i) {
1085 JDWP::ObjectId id = request.ReadObjectId();
1086 mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
1087 if (o == ObjectRegistry::kInvalidObject) {
1088 return JDWP::ERR_INVALID_OBJECT;
1089 }
1090 oa->Set(offset + i, o);
1091 }
1092 }
1093
1094 return JDWP::ERR_NONE;
1095 }
1096
CreateString(const std::string & str)1097 JDWP::ObjectId Dbg::CreateString(const std::string& str) {
1098 return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
1099 }
1100
CreateObject(JDWP::RefTypeId class_id,JDWP::ObjectId & new_object)1101 JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) {
1102 JDWP::JdwpError status;
1103 mirror::Class* c = DecodeClass(class_id, status);
1104 if (c == NULL) {
1105 return status;
1106 }
1107 new_object = gRegistry->Add(c->AllocObject(Thread::Current()));
1108 return JDWP::ERR_NONE;
1109 }
1110
1111 /*
1112 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1113 */
CreateArrayObject(JDWP::RefTypeId array_class_id,uint32_t length,JDWP::ObjectId & new_array)1114 JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1115 JDWP::ObjectId& new_array) {
1116 JDWP::JdwpError status;
1117 mirror::Class* c = DecodeClass(array_class_id, status);
1118 if (c == NULL) {
1119 return status;
1120 }
1121 new_array = gRegistry->Add(mirror::Array::Alloc(Thread::Current(), c, length));
1122 return JDWP::ERR_NONE;
1123 }
1124
MatchType(JDWP::RefTypeId instance_class_id,JDWP::RefTypeId class_id)1125 bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id) {
1126 JDWP::JdwpError status;
1127 mirror::Class* c1 = DecodeClass(instance_class_id, status);
1128 CHECK(c1 != NULL);
1129 mirror::Class* c2 = DecodeClass(class_id, status);
1130 CHECK(c2 != NULL);
1131 return c1->IsAssignableFrom(c2);
1132 }
1133
ToFieldId(const mirror::ArtField * f)1134 static JDWP::FieldId ToFieldId(const mirror::ArtField* f)
1135 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1136 #ifdef MOVING_GARBAGE_COLLECTOR
1137 UNIMPLEMENTED(FATAL);
1138 #else
1139 return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1140 #endif
1141 }
1142
ToMethodId(const mirror::ArtMethod * m)1143 static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
1144 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1145 #ifdef MOVING_GARBAGE_COLLECTOR
1146 UNIMPLEMENTED(FATAL);
1147 #else
1148 return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1149 #endif
1150 }
1151
FromFieldId(JDWP::FieldId fid)1152 static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
1153 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1154 #ifdef MOVING_GARBAGE_COLLECTOR
1155 UNIMPLEMENTED(FATAL);
1156 #else
1157 return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
1158 #endif
1159 }
1160
FromMethodId(JDWP::MethodId mid)1161 static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
1162 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1163 #ifdef MOVING_GARBAGE_COLLECTOR
1164 UNIMPLEMENTED(FATAL);
1165 #else
1166 return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
1167 #endif
1168 }
1169
SetLocation(JDWP::JdwpLocation & location,mirror::ArtMethod * m,uint32_t dex_pc)1170 static void SetLocation(JDWP::JdwpLocation& location, mirror::ArtMethod* m, uint32_t dex_pc)
1171 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1172 if (m == NULL) {
1173 memset(&location, 0, sizeof(location));
1174 } else {
1175 mirror::Class* c = m->GetDeclaringClass();
1176 location.type_tag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1177 location.class_id = gRegistry->Add(c);
1178 location.method_id = ToMethodId(m);
1179 location.dex_pc = dex_pc;
1180 }
1181 }
1182
GetMethodName(JDWP::MethodId method_id)1183 std::string Dbg::GetMethodName(JDWP::MethodId method_id)
1184 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1185 mirror::ArtMethod* m = FromMethodId(method_id);
1186 return MethodHelper(m).GetName();
1187 }
1188
GetFieldName(JDWP::FieldId field_id)1189 std::string Dbg::GetFieldName(JDWP::FieldId field_id)
1190 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1191 mirror::ArtField* f = FromFieldId(field_id);
1192 return FieldHelper(f).GetName();
1193 }
1194
1195 /*
1196 * Augment the access flags for synthetic methods and fields by setting
1197 * the (as described by the spec) "0xf0000000 bit". Also, strip out any
1198 * flags not specified by the Java programming language.
1199 */
MangleAccessFlags(uint32_t accessFlags)1200 static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1201 accessFlags &= kAccJavaFlagsMask;
1202 if ((accessFlags & kAccSynthetic) != 0) {
1203 accessFlags |= 0xf0000000;
1204 }
1205 return accessFlags;
1206 }
1207
1208 static const uint16_t kEclipseWorkaroundSlot = 1000;
1209
1210 /*
1211 * Eclipse appears to expect that the "this" reference is in slot zero.
1212 * If it's not, the "variables" display will show two copies of "this",
1213 * possibly because it gets "this" from SF.ThisObject and then displays
1214 * all locals with nonzero slot numbers.
1215 *
1216 * So, we remap the item in slot 0 to 1000, and remap "this" to zero. On
1217 * SF.GetValues / SF.SetValues we map them back.
1218 *
1219 * TODO: jdb uses the value to determine whether a variable is a local or an argument,
1220 * by checking whether it's less than the number of arguments. To make that work, we'd
1221 * have to "mangle" all the arguments to come first, not just the implicit argument 'this'.
1222 */
MangleSlot(uint16_t slot,const char * name)1223 static uint16_t MangleSlot(uint16_t slot, const char* name) {
1224 uint16_t newSlot = slot;
1225 if (strcmp(name, "this") == 0) {
1226 newSlot = 0;
1227 } else if (slot == 0) {
1228 newSlot = kEclipseWorkaroundSlot;
1229 }
1230 return newSlot;
1231 }
1232
DemangleSlot(uint16_t slot,mirror::ArtMethod * m)1233 static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
1234 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1235 if (slot == kEclipseWorkaroundSlot) {
1236 return 0;
1237 } else if (slot == 0) {
1238 const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
1239 CHECK(code_item != NULL) << PrettyMethod(m);
1240 return code_item->registers_size_ - code_item->ins_size_;
1241 }
1242 return slot;
1243 }
1244
OutputDeclaredFields(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1245 JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1246 JDWP::JdwpError status;
1247 mirror::Class* c = DecodeClass(class_id, status);
1248 if (c == NULL) {
1249 return status;
1250 }
1251
1252 size_t instance_field_count = c->NumInstanceFields();
1253 size_t static_field_count = c->NumStaticFields();
1254
1255 expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1256
1257 for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1258 mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
1259 FieldHelper fh(f);
1260 expandBufAddFieldId(pReply, ToFieldId(f));
1261 expandBufAddUtf8String(pReply, fh.GetName());
1262 expandBufAddUtf8String(pReply, fh.GetTypeDescriptor());
1263 if (with_generic) {
1264 static const char genericSignature[1] = "";
1265 expandBufAddUtf8String(pReply, genericSignature);
1266 }
1267 expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1268 }
1269 return JDWP::ERR_NONE;
1270 }
1271
OutputDeclaredMethods(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1272 JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1273 JDWP::ExpandBuf* pReply) {
1274 JDWP::JdwpError status;
1275 mirror::Class* c = DecodeClass(class_id, status);
1276 if (c == NULL) {
1277 return status;
1278 }
1279
1280 size_t direct_method_count = c->NumDirectMethods();
1281 size_t virtual_method_count = c->NumVirtualMethods();
1282
1283 expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1284
1285 for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1286 mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
1287 MethodHelper mh(m);
1288 expandBufAddMethodId(pReply, ToMethodId(m));
1289 expandBufAddUtf8String(pReply, mh.GetName());
1290 expandBufAddUtf8String(pReply, mh.GetSignature());
1291 if (with_generic) {
1292 static const char genericSignature[1] = "";
1293 expandBufAddUtf8String(pReply, genericSignature);
1294 }
1295 expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1296 }
1297 return JDWP::ERR_NONE;
1298 }
1299
OutputDeclaredInterfaces(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)1300 JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1301 JDWP::JdwpError status;
1302 mirror::Class* c = DecodeClass(class_id, status);
1303 if (c == NULL) {
1304 return status;
1305 }
1306
1307 ClassHelper kh(c);
1308 size_t interface_count = kh.NumDirectInterfaces();
1309 expandBufAdd4BE(pReply, interface_count);
1310 for (size_t i = 0; i < interface_count; ++i) {
1311 expandBufAddRefTypeId(pReply, gRegistry->AddRefType(kh.GetDirectInterface(i)));
1312 }
1313 return JDWP::ERR_NONE;
1314 }
1315
OutputLineTable(JDWP::RefTypeId,JDWP::MethodId method_id,JDWP::ExpandBuf * pReply)1316 void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply)
1317 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1318 struct DebugCallbackContext {
1319 int numItems;
1320 JDWP::ExpandBuf* pReply;
1321
1322 static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1323 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1324 expandBufAdd8BE(pContext->pReply, address);
1325 expandBufAdd4BE(pContext->pReply, line_number);
1326 pContext->numItems++;
1327 return false;
1328 }
1329 };
1330 mirror::ArtMethod* m = FromMethodId(method_id);
1331 MethodHelper mh(m);
1332 uint64_t start, end;
1333 if (m->IsNative()) {
1334 start = -1;
1335 end = -1;
1336 } else {
1337 start = 0;
1338 // Return the index of the last instruction
1339 end = mh.GetCodeItem()->insns_size_in_code_units_ - 1;
1340 }
1341
1342 expandBufAdd8BE(pReply, start);
1343 expandBufAdd8BE(pReply, end);
1344
1345 // Add numLines later
1346 size_t numLinesOffset = expandBufGetLength(pReply);
1347 expandBufAdd4BE(pReply, 0);
1348
1349 DebugCallbackContext context;
1350 context.numItems = 0;
1351 context.pReply = pReply;
1352
1353 mh.GetDexFile().DecodeDebugInfo(mh.GetCodeItem(), m->IsStatic(), m->GetDexMethodIndex(),
1354 DebugCallbackContext::Callback, NULL, &context);
1355
1356 JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1357 }
1358
OutputVariableTable(JDWP::RefTypeId,JDWP::MethodId method_id,bool with_generic,JDWP::ExpandBuf * pReply)1359 void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1360 struct DebugCallbackContext {
1361 JDWP::ExpandBuf* pReply;
1362 size_t variable_count;
1363 bool with_generic;
1364
1365 static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress, const char* name, const char* descriptor, const char* signature) {
1366 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1367
1368 VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d", pContext->variable_count, startAddress, endAddress - startAddress, name, descriptor, signature, slot, MangleSlot(slot, name));
1369
1370 slot = MangleSlot(slot, name);
1371
1372 expandBufAdd8BE(pContext->pReply, startAddress);
1373 expandBufAddUtf8String(pContext->pReply, name);
1374 expandBufAddUtf8String(pContext->pReply, descriptor);
1375 if (pContext->with_generic) {
1376 expandBufAddUtf8String(pContext->pReply, signature);
1377 }
1378 expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1379 expandBufAdd4BE(pContext->pReply, slot);
1380
1381 ++pContext->variable_count;
1382 }
1383 };
1384 mirror::ArtMethod* m = FromMethodId(method_id);
1385 MethodHelper mh(m);
1386 const DexFile::CodeItem* code_item = mh.GetCodeItem();
1387
1388 // arg_count considers doubles and longs to take 2 units.
1389 // variable_count considers everything to take 1 unit.
1390 std::string shorty(mh.GetShorty());
1391 expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
1392
1393 // We don't know the total number of variables yet, so leave a blank and update it later.
1394 size_t variable_count_offset = expandBufGetLength(pReply);
1395 expandBufAdd4BE(pReply, 0);
1396
1397 DebugCallbackContext context;
1398 context.pReply = pReply;
1399 context.variable_count = 0;
1400 context.with_generic = with_generic;
1401
1402 mh.GetDexFile().DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL,
1403 DebugCallbackContext::Callback, &context);
1404
1405 JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1406 }
1407
GetBytecodes(JDWP::RefTypeId,JDWP::MethodId method_id,std::vector<uint8_t> & bytecodes)1408 JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1409 std::vector<uint8_t>& bytecodes)
1410 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1411 mirror::ArtMethod* m = FromMethodId(method_id);
1412 if (m == NULL) {
1413 return JDWP::ERR_INVALID_METHODID;
1414 }
1415 MethodHelper mh(m);
1416 const DexFile::CodeItem* code_item = mh.GetCodeItem();
1417 size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1418 const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1419 const uint8_t* end = begin + byte_count;
1420 for (const uint8_t* p = begin; p != end; ++p) {
1421 bytecodes.push_back(*p);
1422 }
1423 return JDWP::ERR_NONE;
1424 }
1425
GetFieldBasicTag(JDWP::FieldId field_id)1426 JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1427 return BasicTagFromDescriptor(FieldHelper(FromFieldId(field_id)).GetTypeDescriptor());
1428 }
1429
GetStaticFieldBasicTag(JDWP::FieldId field_id)1430 JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1431 return BasicTagFromDescriptor(FieldHelper(FromFieldId(field_id)).GetTypeDescriptor());
1432 }
1433
GetFieldValueImpl(JDWP::RefTypeId ref_type_id,JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply,bool is_static)1434 static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1435 JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1436 bool is_static)
1437 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1438 JDWP::JdwpError status;
1439 mirror::Class* c = DecodeClass(ref_type_id, status);
1440 if (ref_type_id != 0 && c == NULL) {
1441 return status;
1442 }
1443
1444 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1445 if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1446 return JDWP::ERR_INVALID_OBJECT;
1447 }
1448 mirror::ArtField* f = FromFieldId(field_id);
1449
1450 mirror::Class* receiver_class = c;
1451 if (receiver_class == NULL && o != NULL) {
1452 receiver_class = o->GetClass();
1453 }
1454 // TODO: should we give up now if receiver_class is NULL?
1455 if (receiver_class != NULL && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1456 LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1457 return JDWP::ERR_INVALID_FIELDID;
1458 }
1459
1460 // The RI only enforces the static/non-static mismatch in one direction.
1461 // TODO: should we change the tests and check both?
1462 if (is_static) {
1463 if (!f->IsStatic()) {
1464 return JDWP::ERR_INVALID_FIELDID;
1465 }
1466 } else {
1467 if (f->IsStatic()) {
1468 LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1469 }
1470 }
1471 if (f->IsStatic()) {
1472 o = f->GetDeclaringClass();
1473 }
1474
1475 JDWP::JdwpTag tag = BasicTagFromDescriptor(FieldHelper(f).GetTypeDescriptor());
1476
1477 if (IsPrimitiveTag(tag)) {
1478 expandBufAdd1(pReply, tag);
1479 if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1480 expandBufAdd1(pReply, f->Get32(o));
1481 } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1482 expandBufAdd2BE(pReply, f->Get32(o));
1483 } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1484 expandBufAdd4BE(pReply, f->Get32(o));
1485 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1486 expandBufAdd8BE(pReply, f->Get64(o));
1487 } else {
1488 LOG(FATAL) << "Unknown tag: " << tag;
1489 }
1490 } else {
1491 mirror::Object* value = f->GetObject(o);
1492 expandBufAdd1(pReply, TagFromObject(value));
1493 expandBufAddObjectId(pReply, gRegistry->Add(value));
1494 }
1495 return JDWP::ERR_NONE;
1496 }
1497
GetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1498 JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1499 JDWP::ExpandBuf* pReply) {
1500 return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1501 }
1502
GetStaticFieldValue(JDWP::RefTypeId ref_type_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1503 JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) {
1504 return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1505 }
1506
SetFieldValueImpl(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width,bool is_static)1507 static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1508 uint64_t value, int width, bool is_static)
1509 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1510 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1511 if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1512 return JDWP::ERR_INVALID_OBJECT;
1513 }
1514 mirror::ArtField* f = FromFieldId(field_id);
1515
1516 // The RI only enforces the static/non-static mismatch in one direction.
1517 // TODO: should we change the tests and check both?
1518 if (is_static) {
1519 if (!f->IsStatic()) {
1520 return JDWP::ERR_INVALID_FIELDID;
1521 }
1522 } else {
1523 if (f->IsStatic()) {
1524 LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1525 }
1526 }
1527 if (f->IsStatic()) {
1528 o = f->GetDeclaringClass();
1529 }
1530
1531 JDWP::JdwpTag tag = BasicTagFromDescriptor(FieldHelper(f).GetTypeDescriptor());
1532
1533 if (IsPrimitiveTag(tag)) {
1534 if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1535 CHECK_EQ(width, 8);
1536 f->Set64(o, value);
1537 } else {
1538 CHECK_LE(width, 4);
1539 f->Set32(o, value);
1540 }
1541 } else {
1542 mirror::Object* v = gRegistry->Get<mirror::Object*>(value);
1543 if (v == ObjectRegistry::kInvalidObject) {
1544 return JDWP::ERR_INVALID_OBJECT;
1545 }
1546 if (v != NULL) {
1547 mirror::Class* field_type = FieldHelper(f).GetType();
1548 if (!field_type->IsAssignableFrom(v->GetClass())) {
1549 return JDWP::ERR_INVALID_OBJECT;
1550 }
1551 }
1552 f->SetObject(o, v);
1553 }
1554
1555 return JDWP::ERR_NONE;
1556 }
1557
SetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width)1558 JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1559 int width) {
1560 return SetFieldValueImpl(object_id, field_id, value, width, false);
1561 }
1562
SetStaticFieldValue(JDWP::FieldId field_id,uint64_t value,int width)1563 JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1564 return SetFieldValueImpl(0, field_id, value, width, true);
1565 }
1566
StringToUtf8(JDWP::ObjectId string_id)1567 std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) {
1568 mirror::String* s = gRegistry->Get<mirror::String*>(string_id);
1569 return s->ToModifiedUtf8();
1570 }
1571
GetThreadName(JDWP::ObjectId thread_id,std::string & name)1572 JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) {
1573 ScopedObjectAccessUnchecked soa(Thread::Current());
1574 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1575 Thread* thread;
1576 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1577 if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1578 return error;
1579 }
1580
1581 // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1582 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1583 mirror::ArtField* java_lang_Thread_name_field =
1584 soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1585 mirror::String* s =
1586 reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1587 if (s != NULL) {
1588 name = s->ToModifiedUtf8();
1589 }
1590 return JDWP::ERR_NONE;
1591 }
1592
GetThreadGroup(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)1593 JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1594 ScopedObjectAccess soa(Thread::Current());
1595 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1596 if (thread_object == ObjectRegistry::kInvalidObject) {
1597 return JDWP::ERR_INVALID_OBJECT;
1598 }
1599
1600 // Okay, so it's an object, but is it actually a thread?
1601 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1602 Thread* thread;
1603 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1604 if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1605 // Zombie threads are in the null group.
1606 expandBufAddObjectId(pReply, JDWP::ObjectId(0));
1607 return JDWP::ERR_NONE;
1608 }
1609 if (error != JDWP::ERR_NONE) {
1610 return error;
1611 }
1612
1613 mirror::Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/Thread;");
1614 CHECK(c != NULL);
1615 mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
1616 CHECK(f != NULL);
1617 mirror::Object* group = f->GetObject(thread_object);
1618 CHECK(group != NULL);
1619 JDWP::ObjectId thread_group_id = gRegistry->Add(group);
1620
1621 expandBufAddObjectId(pReply, thread_group_id);
1622 return JDWP::ERR_NONE;
1623 }
1624
GetThreadGroupName(JDWP::ObjectId thread_group_id)1625 std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
1626 ScopedObjectAccess soa(Thread::Current());
1627 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1628 CHECK(thread_group != NULL);
1629
1630 mirror::Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;");
1631 CHECK(c != NULL);
1632 mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
1633 CHECK(f != NULL);
1634 mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
1635 return s->ToModifiedUtf8();
1636 }
1637
GetThreadGroupParent(JDWP::ObjectId thread_group_id)1638 JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
1639 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1640 CHECK(thread_group != NULL);
1641
1642 mirror::Class* c = Runtime::Current()->GetClassLinker()->FindSystemClass("Ljava/lang/ThreadGroup;");
1643 CHECK(c != NULL);
1644 mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
1645 CHECK(f != NULL);
1646 mirror::Object* parent = f->GetObject(thread_group);
1647 return gRegistry->Add(parent);
1648 }
1649
GetSystemThreadGroupId()1650 JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
1651 ScopedObjectAccessUnchecked soa(Thread::Current());
1652 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
1653 mirror::Object* group = f->GetObject(f->GetDeclaringClass());
1654 return gRegistry->Add(group);
1655 }
1656
GetMainThreadGroupId()1657 JDWP::ObjectId Dbg::GetMainThreadGroupId() {
1658 ScopedObjectAccess soa(Thread::Current());
1659 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
1660 mirror::Object* group = f->GetObject(f->GetDeclaringClass());
1661 return gRegistry->Add(group);
1662 }
1663
ToJdwpThreadStatus(ThreadState state)1664 JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
1665 switch (state) {
1666 case kBlocked:
1667 return JDWP::TS_MONITOR;
1668 case kNative:
1669 case kRunnable:
1670 case kSuspended:
1671 return JDWP::TS_RUNNING;
1672 case kSleeping:
1673 return JDWP::TS_SLEEPING;
1674 case kStarting:
1675 case kTerminated:
1676 return JDWP::TS_ZOMBIE;
1677 case kTimedWaiting:
1678 case kWaitingForDebuggerSend:
1679 case kWaitingForDebuggerSuspension:
1680 case kWaitingForDebuggerToAttach:
1681 case kWaitingForGcToComplete:
1682 case kWaitingForCheckPointsToRun:
1683 case kWaitingForJniOnLoad:
1684 case kWaitingForSignalCatcherOutput:
1685 case kWaitingInMainDebuggerLoop:
1686 case kWaitingInMainSignalCatcherLoop:
1687 case kWaitingPerformingGc:
1688 case kWaiting:
1689 return JDWP::TS_WAIT;
1690 // Don't add a 'default' here so the compiler can spot incompatible enum changes.
1691 }
1692 LOG(FATAL) << "Unknown thread state: " << state;
1693 return JDWP::TS_ZOMBIE;
1694 }
1695
GetThreadStatus(JDWP::ObjectId thread_id,JDWP::JdwpThreadStatus * pThreadStatus,JDWP::JdwpSuspendStatus * pSuspendStatus)1696 JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus, JDWP::JdwpSuspendStatus* pSuspendStatus) {
1697 ScopedObjectAccess soa(Thread::Current());
1698
1699 *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
1700
1701 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1702 Thread* thread;
1703 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1704 if (error != JDWP::ERR_NONE) {
1705 if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1706 *pThreadStatus = JDWP::TS_ZOMBIE;
1707 return JDWP::ERR_NONE;
1708 }
1709 return error;
1710 }
1711
1712 if (IsSuspendedForDebugger(soa, thread)) {
1713 *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
1714 }
1715
1716 *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
1717 return JDWP::ERR_NONE;
1718 }
1719
GetThreadDebugSuspendCount(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)1720 JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1721 ScopedObjectAccess soa(Thread::Current());
1722 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1723 Thread* thread;
1724 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1725 if (error != JDWP::ERR_NONE) {
1726 return error;
1727 }
1728 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
1729 expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
1730 return JDWP::ERR_NONE;
1731 }
1732
Interrupt(JDWP::ObjectId thread_id)1733 JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
1734 ScopedObjectAccess soa(Thread::Current());
1735 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1736 Thread* thread;
1737 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1738 if (error != JDWP::ERR_NONE) {
1739 return error;
1740 }
1741 thread->Interrupt();
1742 return JDWP::ERR_NONE;
1743 }
1744
GetThreads(JDWP::ObjectId thread_group_id,std::vector<JDWP::ObjectId> & thread_ids)1745 void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& thread_ids) {
1746 class ThreadListVisitor {
1747 public:
1748 ThreadListVisitor(const ScopedObjectAccessUnchecked& soa, mirror::Object* desired_thread_group,
1749 std::vector<JDWP::ObjectId>& thread_ids)
1750 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1751 : soa_(soa), desired_thread_group_(desired_thread_group), thread_ids_(thread_ids) {}
1752
1753 static void Visit(Thread* t, void* arg) {
1754 reinterpret_cast<ThreadListVisitor*>(arg)->Visit(t);
1755 }
1756
1757 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1758 // annotalysis.
1759 void Visit(Thread* t) NO_THREAD_SAFETY_ANALYSIS {
1760 if (t == Dbg::GetDebugThread()) {
1761 // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
1762 // query all threads, so it's easier if we just don't tell them about this thread.
1763 return;
1764 }
1765 mirror::Object* peer = t->GetPeer();
1766 if (IsInDesiredThreadGroup(peer)) {
1767 thread_ids_.push_back(gRegistry->Add(peer));
1768 }
1769 }
1770
1771 private:
1772 bool IsInDesiredThreadGroup(mirror::Object* peer)
1773 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1774 // peer might be NULL if the thread is still starting up.
1775 if (peer == NULL) {
1776 // We can't tell the debugger about this thread yet.
1777 // TODO: if we identified threads to the debugger by their Thread*
1778 // rather than their peer's mirror::Object*, we could fix this.
1779 // Doing so might help us report ZOMBIE threads too.
1780 return false;
1781 }
1782 // Do we want threads from all thread groups?
1783 if (desired_thread_group_ == NULL) {
1784 return true;
1785 }
1786 mirror::Object* group = soa_.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer);
1787 return (group == desired_thread_group_);
1788 }
1789
1790 const ScopedObjectAccessUnchecked& soa_;
1791 mirror::Object* const desired_thread_group_;
1792 std::vector<JDWP::ObjectId>& thread_ids_;
1793 };
1794
1795 ScopedObjectAccessUnchecked soa(Thread::Current());
1796 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1797 ThreadListVisitor tlv(soa, thread_group, thread_ids);
1798 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1799 Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv);
1800 }
1801
GetChildThreadGroups(JDWP::ObjectId thread_group_id,std::vector<JDWP::ObjectId> & child_thread_group_ids)1802 void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids) {
1803 ScopedObjectAccess soa(Thread::Current());
1804 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1805
1806 // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
1807 mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
1808 mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
1809
1810 // Get the array and size out of the ArrayList<ThreadGroup>...
1811 mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
1812 mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
1813 mirror::ObjectArray<mirror::Object>* groups_array =
1814 array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
1815 const int32_t size = size_field->GetInt(groups_array_list);
1816
1817 // Copy the first 'size' elements out of the array into the result.
1818 for (int32_t i = 0; i < size; ++i) {
1819 child_thread_group_ids.push_back(gRegistry->Add(groups_array->Get(i)));
1820 }
1821 }
1822
GetStackDepth(Thread * thread)1823 static int GetStackDepth(Thread* thread)
1824 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1825 struct CountStackDepthVisitor : public StackVisitor {
1826 explicit CountStackDepthVisitor(Thread* thread)
1827 : StackVisitor(thread, NULL), depth(0) {}
1828
1829 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1830 // annotalysis.
1831 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
1832 if (!GetMethod()->IsRuntimeMethod()) {
1833 ++depth;
1834 }
1835 return true;
1836 }
1837 size_t depth;
1838 };
1839
1840 CountStackDepthVisitor visitor(thread);
1841 visitor.WalkStack();
1842 return visitor.depth;
1843 }
1844
GetThreadFrameCount(JDWP::ObjectId thread_id,size_t & result)1845 JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result) {
1846 ScopedObjectAccess soa(Thread::Current());
1847 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1848 Thread* thread;
1849 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1850 if (error != JDWP::ERR_NONE) {
1851 return error;
1852 }
1853 if (!IsSuspendedForDebugger(soa, thread)) {
1854 return JDWP::ERR_THREAD_NOT_SUSPENDED;
1855 }
1856 result = GetStackDepth(thread);
1857 return JDWP::ERR_NONE;
1858 }
1859
GetThreadFrames(JDWP::ObjectId thread_id,size_t start_frame,size_t frame_count,JDWP::ExpandBuf * buf)1860 JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
1861 size_t frame_count, JDWP::ExpandBuf* buf) {
1862 class GetFrameVisitor : public StackVisitor {
1863 public:
1864 GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
1865 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1866 : StackVisitor(thread, NULL), depth_(0),
1867 start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
1868 expandBufAdd4BE(buf_, frame_count_);
1869 }
1870
1871 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1872 // annotalysis.
1873 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
1874 if (GetMethod()->IsRuntimeMethod()) {
1875 return true; // The debugger can't do anything useful with a frame that has no Method*.
1876 }
1877 if (depth_ >= start_frame_ + frame_count_) {
1878 return false;
1879 }
1880 if (depth_ >= start_frame_) {
1881 JDWP::FrameId frame_id(GetFrameId());
1882 JDWP::JdwpLocation location;
1883 SetLocation(location, GetMethod(), GetDexPc());
1884 VLOG(jdwp) << StringPrintf(" Frame %3zd: id=%3lld ", depth_, frame_id) << location;
1885 expandBufAdd8BE(buf_, frame_id);
1886 expandBufAddLocation(buf_, location);
1887 }
1888 ++depth_;
1889 return true;
1890 }
1891
1892 private:
1893 size_t depth_;
1894 const size_t start_frame_;
1895 const size_t frame_count_;
1896 JDWP::ExpandBuf* buf_;
1897 };
1898
1899 ScopedObjectAccessUnchecked soa(Thread::Current());
1900 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1901 Thread* thread;
1902 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1903 if (error != JDWP::ERR_NONE) {
1904 return error;
1905 }
1906 if (!IsSuspendedForDebugger(soa, thread)) {
1907 return JDWP::ERR_THREAD_NOT_SUSPENDED;
1908 }
1909 GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
1910 visitor.WalkStack();
1911 return JDWP::ERR_NONE;
1912 }
1913
GetThreadSelfId()1914 JDWP::ObjectId Dbg::GetThreadSelfId() {
1915 ScopedObjectAccessUnchecked soa(Thread::Current());
1916 return gRegistry->Add(soa.Self()->GetPeer());
1917 }
1918
SuspendVM()1919 void Dbg::SuspendVM() {
1920 Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
1921 }
1922
ResumeVM()1923 void Dbg::ResumeVM() {
1924 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
1925 }
1926
SuspendThread(JDWP::ObjectId thread_id,bool request_suspension)1927 JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
1928 ScopedLocalRef<jobject> peer(Thread::Current()->GetJniEnv(), NULL);
1929 {
1930 ScopedObjectAccess soa(Thread::Current());
1931 peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
1932 }
1933 if (peer.get() == NULL) {
1934 return JDWP::ERR_THREAD_NOT_ALIVE;
1935 }
1936 // Suspend thread to build stack trace.
1937 bool timed_out;
1938 Thread* thread = Thread::SuspendForDebugger(peer.get(), request_suspension, &timed_out);
1939 if (thread != NULL) {
1940 return JDWP::ERR_NONE;
1941 } else if (timed_out) {
1942 return JDWP::ERR_INTERNAL;
1943 } else {
1944 return JDWP::ERR_THREAD_NOT_ALIVE;
1945 }
1946 }
1947
ResumeThread(JDWP::ObjectId thread_id)1948 void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
1949 ScopedObjectAccessUnchecked soa(Thread::Current());
1950 mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id);
1951 Thread* thread;
1952 {
1953 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1954 thread = Thread::FromManagedThread(soa, peer);
1955 }
1956 if (thread == NULL) {
1957 LOG(WARNING) << "No such thread for resume: " << peer;
1958 return;
1959 }
1960 bool needs_resume;
1961 {
1962 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
1963 needs_resume = thread->GetSuspendCount() > 0;
1964 }
1965 if (needs_resume) {
1966 Runtime::Current()->GetThreadList()->Resume(thread, true);
1967 }
1968 }
1969
SuspendSelf()1970 void Dbg::SuspendSelf() {
1971 Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
1972 }
1973
1974 struct GetThisVisitor : public StackVisitor {
GetThisVisitorart::GetThisVisitor1975 GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
1976 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1977 : StackVisitor(thread, context), this_object(NULL), frame_id(frame_id) {}
1978
1979 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1980 // annotalysis.
VisitFrameart::GetThisVisitor1981 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
1982 if (frame_id != GetFrameId()) {
1983 return true; // continue
1984 } else {
1985 this_object = GetThisObject();
1986 return false;
1987 }
1988 }
1989
1990 mirror::Object* this_object;
1991 JDWP::FrameId frame_id;
1992 };
1993
GetThisObject(JDWP::ObjectId thread_id,JDWP::FrameId frame_id,JDWP::ObjectId * result)1994 JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
1995 JDWP::ObjectId* result) {
1996 ScopedObjectAccessUnchecked soa(Thread::Current());
1997 Thread* thread;
1998 {
1999 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2000 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2001 if (error != JDWP::ERR_NONE) {
2002 return error;
2003 }
2004 if (!IsSuspendedForDebugger(soa, thread)) {
2005 return JDWP::ERR_THREAD_NOT_SUSPENDED;
2006 }
2007 }
2008 UniquePtr<Context> context(Context::Create());
2009 GetThisVisitor visitor(thread, context.get(), frame_id);
2010 visitor.WalkStack();
2011 *result = gRegistry->Add(visitor.this_object);
2012 return JDWP::ERR_NONE;
2013 }
2014
GetLocalValue(JDWP::ObjectId thread_id,JDWP::FrameId frame_id,int slot,JDWP::JdwpTag tag,uint8_t * buf,size_t width)2015 void Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag,
2016 uint8_t* buf, size_t width) {
2017 struct GetLocalVisitor : public StackVisitor {
2018 GetLocalVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id, int slot,
2019 JDWP::JdwpTag tag, uint8_t* buf, size_t width)
2020 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2021 : StackVisitor(thread, context), frame_id_(frame_id), slot_(slot), tag_(tag),
2022 buf_(buf), width_(width) {}
2023
2024 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2025 // annotalysis.
2026 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2027 if (GetFrameId() != frame_id_) {
2028 return true; // Not our frame, carry on.
2029 }
2030 // TODO: check that the tag is compatible with the actual type of the slot!
2031 mirror::ArtMethod* m = GetMethod();
2032 uint16_t reg = DemangleSlot(slot_, m);
2033
2034 switch (tag_) {
2035 case JDWP::JT_BOOLEAN:
2036 {
2037 CHECK_EQ(width_, 1U);
2038 uint32_t intVal = GetVReg(m, reg, kIntVReg);
2039 VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
2040 JDWP::Set1(buf_+1, intVal != 0);
2041 }
2042 break;
2043 case JDWP::JT_BYTE:
2044 {
2045 CHECK_EQ(width_, 1U);
2046 uint32_t intVal = GetVReg(m, reg, kIntVReg);
2047 VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
2048 JDWP::Set1(buf_+1, intVal);
2049 }
2050 break;
2051 case JDWP::JT_SHORT:
2052 case JDWP::JT_CHAR:
2053 {
2054 CHECK_EQ(width_, 2U);
2055 uint32_t intVal = GetVReg(m, reg, kIntVReg);
2056 VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
2057 JDWP::Set2BE(buf_+1, intVal);
2058 }
2059 break;
2060 case JDWP::JT_INT:
2061 {
2062 CHECK_EQ(width_, 4U);
2063 uint32_t intVal = GetVReg(m, reg, kIntVReg);
2064 VLOG(jdwp) << "get int local " << reg << " = " << intVal;
2065 JDWP::Set4BE(buf_+1, intVal);
2066 }
2067 break;
2068 case JDWP::JT_FLOAT:
2069 {
2070 CHECK_EQ(width_, 4U);
2071 uint32_t intVal = GetVReg(m, reg, kFloatVReg);
2072 VLOG(jdwp) << "get int/float local " << reg << " = " << intVal;
2073 JDWP::Set4BE(buf_+1, intVal);
2074 }
2075 break;
2076 case JDWP::JT_ARRAY:
2077 {
2078 CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2079 mirror::Object* o = reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kReferenceVReg));
2080 VLOG(jdwp) << "get array local " << reg << " = " << o;
2081 if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
2082 LOG(FATAL) << "Register " << reg << " expected to hold array: " << o;
2083 }
2084 JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
2085 }
2086 break;
2087 case JDWP::JT_CLASS_LOADER:
2088 case JDWP::JT_CLASS_OBJECT:
2089 case JDWP::JT_OBJECT:
2090 case JDWP::JT_STRING:
2091 case JDWP::JT_THREAD:
2092 case JDWP::JT_THREAD_GROUP:
2093 {
2094 CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2095 mirror::Object* o = reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kReferenceVReg));
2096 VLOG(jdwp) << "get object local " << reg << " = " << o;
2097 if (!Runtime::Current()->GetHeap()->IsHeapAddress(o)) {
2098 LOG(FATAL) << "Register " << reg << " expected to hold object: " << o;
2099 }
2100 tag_ = TagFromObject(o);
2101 JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
2102 }
2103 break;
2104 case JDWP::JT_DOUBLE:
2105 {
2106 CHECK_EQ(width_, 8U);
2107 uint32_t lo = GetVReg(m, reg, kDoubleLoVReg);
2108 uint64_t hi = GetVReg(m, reg + 1, kDoubleHiVReg);
2109 uint64_t longVal = (hi << 32) | lo;
2110 VLOG(jdwp) << "get double/long local " << hi << ":" << lo << " = " << longVal;
2111 JDWP::Set8BE(buf_+1, longVal);
2112 }
2113 break;
2114 case JDWP::JT_LONG:
2115 {
2116 CHECK_EQ(width_, 8U);
2117 uint32_t lo = GetVReg(m, reg, kLongLoVReg);
2118 uint64_t hi = GetVReg(m, reg + 1, kLongHiVReg);
2119 uint64_t longVal = (hi << 32) | lo;
2120 VLOG(jdwp) << "get double/long local " << hi << ":" << lo << " = " << longVal;
2121 JDWP::Set8BE(buf_+1, longVal);
2122 }
2123 break;
2124 default:
2125 LOG(FATAL) << "Unknown tag " << tag_;
2126 break;
2127 }
2128
2129 // Prepend tag, which may have been updated.
2130 JDWP::Set1(buf_, tag_);
2131 return false;
2132 }
2133
2134 const JDWP::FrameId frame_id_;
2135 const int slot_;
2136 JDWP::JdwpTag tag_;
2137 uint8_t* const buf_;
2138 const size_t width_;
2139 };
2140
2141 ScopedObjectAccessUnchecked soa(Thread::Current());
2142 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2143 Thread* thread;
2144 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2145 if (error != JDWP::ERR_NONE) {
2146 return;
2147 }
2148 UniquePtr<Context> context(Context::Create());
2149 GetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, buf, width);
2150 visitor.WalkStack();
2151 }
2152
SetLocalValue(JDWP::ObjectId thread_id,JDWP::FrameId frame_id,int slot,JDWP::JdwpTag tag,uint64_t value,size_t width)2153 void Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag,
2154 uint64_t value, size_t width) {
2155 struct SetLocalVisitor : public StackVisitor {
2156 SetLocalVisitor(Thread* thread, Context* context,
2157 JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value,
2158 size_t width)
2159 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2160 : StackVisitor(thread, context),
2161 frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width) {}
2162
2163 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2164 // annotalysis.
2165 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2166 if (GetFrameId() != frame_id_) {
2167 return true; // Not our frame, carry on.
2168 }
2169 // TODO: check that the tag is compatible with the actual type of the slot!
2170 mirror::ArtMethod* m = GetMethod();
2171 uint16_t reg = DemangleSlot(slot_, m);
2172
2173 switch (tag_) {
2174 case JDWP::JT_BOOLEAN:
2175 case JDWP::JT_BYTE:
2176 CHECK_EQ(width_, 1U);
2177 SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg);
2178 break;
2179 case JDWP::JT_SHORT:
2180 case JDWP::JT_CHAR:
2181 CHECK_EQ(width_, 2U);
2182 SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg);
2183 break;
2184 case JDWP::JT_INT:
2185 CHECK_EQ(width_, 4U);
2186 SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg);
2187 break;
2188 case JDWP::JT_FLOAT:
2189 CHECK_EQ(width_, 4U);
2190 SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg);
2191 break;
2192 case JDWP::JT_ARRAY:
2193 case JDWP::JT_OBJECT:
2194 case JDWP::JT_STRING:
2195 {
2196 CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2197 mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_));
2198 if (o == ObjectRegistry::kInvalidObject) {
2199 UNIMPLEMENTED(FATAL) << "return an error code when given an invalid object to store";
2200 }
2201 SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)), kReferenceVReg);
2202 }
2203 break;
2204 case JDWP::JT_DOUBLE:
2205 CHECK_EQ(width_, 8U);
2206 SetVReg(m, reg, static_cast<uint32_t>(value_), kDoubleLoVReg);
2207 SetVReg(m, reg + 1, static_cast<uint32_t>(value_ >> 32), kDoubleHiVReg);
2208 break;
2209 case JDWP::JT_LONG:
2210 CHECK_EQ(width_, 8U);
2211 SetVReg(m, reg, static_cast<uint32_t>(value_), kLongLoVReg);
2212 SetVReg(m, reg + 1, static_cast<uint32_t>(value_ >> 32), kLongHiVReg);
2213 break;
2214 default:
2215 LOG(FATAL) << "Unknown tag " << tag_;
2216 break;
2217 }
2218 return false;
2219 }
2220
2221 const JDWP::FrameId frame_id_;
2222 const int slot_;
2223 const JDWP::JdwpTag tag_;
2224 const uint64_t value_;
2225 const size_t width_;
2226 };
2227
2228 ScopedObjectAccessUnchecked soa(Thread::Current());
2229 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2230 Thread* thread;
2231 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2232 if (error != JDWP::ERR_NONE) {
2233 return;
2234 }
2235 UniquePtr<Context> context(Context::Create());
2236 SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width);
2237 visitor.WalkStack();
2238 }
2239
PostLocationEvent(const mirror::ArtMethod * m,int dex_pc,mirror::Object * this_object,int event_flags)2240 void Dbg::PostLocationEvent(const mirror::ArtMethod* m, int dex_pc,
2241 mirror::Object* this_object, int event_flags) {
2242 mirror::Class* c = m->GetDeclaringClass();
2243
2244 JDWP::JdwpLocation location;
2245 location.type_tag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
2246 location.class_id = gRegistry->AddRefType(c);
2247 location.method_id = ToMethodId(m);
2248 location.dex_pc = m->IsNative() ? -1 : dex_pc;
2249
2250 // If 'this_object' isn't already in the registry, we know that we're not looking for it,
2251 // so there's no point adding it to the registry and burning through ids.
2252 JDWP::ObjectId this_id = 0;
2253 if (gRegistry->Contains(this_object)) {
2254 this_id = gRegistry->Add(this_object);
2255 }
2256 gJdwpState->PostLocationEvent(&location, this_id, event_flags);
2257 }
2258
PostException(Thread * thread,const ThrowLocation & throw_location,mirror::ArtMethod * catch_method,uint32_t catch_dex_pc,mirror::Throwable * exception_object)2259 void Dbg::PostException(Thread* thread, const ThrowLocation& throw_location,
2260 mirror::ArtMethod* catch_method,
2261 uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
2262 if (!IsDebuggerActive()) {
2263 return;
2264 }
2265
2266 JDWP::JdwpLocation jdwp_throw_location;
2267 SetLocation(jdwp_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
2268 JDWP::JdwpLocation catch_location;
2269 SetLocation(catch_location, catch_method, catch_dex_pc);
2270
2271 // We need 'this' for InstanceOnly filters.
2272 JDWP::ObjectId this_id = gRegistry->Add(throw_location.GetThis());
2273 JDWP::ObjectId exception_id = gRegistry->Add(exception_object);
2274 JDWP::RefTypeId exception_class_id = gRegistry->AddRefType(exception_object->GetClass());
2275
2276 gJdwpState->PostException(&jdwp_throw_location, exception_id, exception_class_id, &catch_location,
2277 this_id);
2278 }
2279
PostClassPrepare(mirror::Class * c)2280 void Dbg::PostClassPrepare(mirror::Class* c) {
2281 if (!IsDebuggerActive()) {
2282 return;
2283 }
2284
2285 // OLD-TODO - we currently always send both "verified" and "prepared" since
2286 // debuggers seem to like that. There might be some advantage to honesty,
2287 // since the class may not yet be verified.
2288 int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
2289 JDWP::JdwpTypeTag tag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
2290 gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), ClassHelper(c).GetDescriptor(), state);
2291 }
2292
UpdateDebugger(Thread * thread,mirror::Object * this_object,const mirror::ArtMethod * m,uint32_t dex_pc)2293 void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2294 const mirror::ArtMethod* m, uint32_t dex_pc) {
2295 if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2296 return;
2297 }
2298
2299 int event_flags = 0;
2300
2301 if (IsBreakpoint(m, dex_pc)) {
2302 event_flags |= kBreakpoint;
2303 }
2304
2305 {
2306 // If the debugger is single-stepping one of our threads, check to
2307 // see if we're that thread and we've reached a step point.
2308 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
2309 if (gSingleStepControl.is_active && gSingleStepControl.thread == thread) {
2310 CHECK(!m->IsNative());
2311 if (gSingleStepControl.step_depth == JDWP::SD_INTO) {
2312 // Step into method calls. We break when the line number
2313 // or method pointer changes. If we're in SS_MIN mode, we
2314 // always stop.
2315 if (gSingleStepControl.method != m) {
2316 event_flags |= kSingleStep;
2317 VLOG(jdwp) << "SS new method";
2318 } else if (gSingleStepControl.step_size == JDWP::SS_MIN) {
2319 event_flags |= kSingleStep;
2320 VLOG(jdwp) << "SS new instruction";
2321 } else if (gSingleStepControl.dex_pcs.find(dex_pc) == gSingleStepControl.dex_pcs.end()) {
2322 event_flags |= kSingleStep;
2323 VLOG(jdwp) << "SS new line";
2324 }
2325 } else if (gSingleStepControl.step_depth == JDWP::SD_OVER) {
2326 // Step over method calls. We break when the line number is
2327 // different and the frame depth is <= the original frame
2328 // depth. (We can't just compare on the method, because we
2329 // might get unrolled past it by an exception, and it's tricky
2330 // to identify recursion.)
2331
2332 int stack_depth = GetStackDepth(thread);
2333
2334 if (stack_depth < gSingleStepControl.stack_depth) {
2335 // popped up one or more frames, always trigger
2336 event_flags |= kSingleStep;
2337 VLOG(jdwp) << "SS method pop";
2338 } else if (stack_depth == gSingleStepControl.stack_depth) {
2339 // same depth, see if we moved
2340 if (gSingleStepControl.step_size == JDWP::SS_MIN) {
2341 event_flags |= kSingleStep;
2342 VLOG(jdwp) << "SS new instruction";
2343 } else if (gSingleStepControl.dex_pcs.find(dex_pc) == gSingleStepControl.dex_pcs.end()) {
2344 event_flags |= kSingleStep;
2345 VLOG(jdwp) << "SS new line";
2346 }
2347 }
2348 } else {
2349 CHECK_EQ(gSingleStepControl.step_depth, JDWP::SD_OUT);
2350 // Return from the current method. We break when the frame
2351 // depth pops up.
2352
2353 // This differs from the "method exit" break in that it stops
2354 // with the PC at the next instruction in the returned-to
2355 // function, rather than the end of the returning function.
2356
2357 int stack_depth = GetStackDepth(thread);
2358 if (stack_depth < gSingleStepControl.stack_depth) {
2359 event_flags |= kSingleStep;
2360 VLOG(jdwp) << "SS method pop";
2361 }
2362 }
2363 }
2364 }
2365
2366 // If there's something interesting going on, see if it matches one
2367 // of the debugger filters.
2368 if (event_flags != 0) {
2369 Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags);
2370 }
2371 }
2372
WatchLocation(const JDWP::JdwpLocation * location)2373 void Dbg::WatchLocation(const JDWP::JdwpLocation* location) {
2374 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
2375 mirror::ArtMethod* m = FromMethodId(location->method_id);
2376 gBreakpoints.push_back(Breakpoint(m, location->dex_pc));
2377 VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " << gBreakpoints[gBreakpoints.size() - 1];
2378 }
2379
UnwatchLocation(const JDWP::JdwpLocation * location)2380 void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location) {
2381 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
2382 mirror::ArtMethod* m = FromMethodId(location->method_id);
2383 for (size_t i = 0; i < gBreakpoints.size(); ++i) {
2384 if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == location->dex_pc) {
2385 VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
2386 gBreakpoints.erase(gBreakpoints.begin() + i);
2387 return;
2388 }
2389 }
2390 }
2391
2392 // Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
2393 // cause suspension if the thread is the current thread.
2394 class ScopedThreadSuspension {
2395 public:
ScopedThreadSuspension(Thread * self,JDWP::ObjectId thread_id)2396 ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
2397 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
2398 thread_(NULL),
2399 error_(JDWP::ERR_NONE),
2400 self_suspend_(false),
2401 other_suspend_(false) {
2402 ScopedObjectAccessUnchecked soa(self);
2403 {
2404 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2405 error_ = DecodeThread(soa, thread_id, thread_);
2406 }
2407 if (error_ == JDWP::ERR_NONE) {
2408 if (thread_ == soa.Self()) {
2409 self_suspend_ = true;
2410 } else {
2411 soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
2412 jobject thread_peer = gRegistry->GetJObject(thread_id);
2413 bool timed_out;
2414 Thread* suspended_thread = Thread::SuspendForDebugger(thread_peer, true, &timed_out);
2415 CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
2416 if (suspended_thread == NULL) {
2417 // Thread terminated from under us while suspending.
2418 error_ = JDWP::ERR_INVALID_THREAD;
2419 } else {
2420 CHECK_EQ(suspended_thread, thread_);
2421 other_suspend_ = true;
2422 }
2423 }
2424 }
2425 }
2426
GetThread() const2427 Thread* GetThread() const {
2428 return thread_;
2429 }
2430
GetError() const2431 JDWP::JdwpError GetError() const {
2432 return error_;
2433 }
2434
~ScopedThreadSuspension()2435 ~ScopedThreadSuspension() {
2436 if (other_suspend_) {
2437 Runtime::Current()->GetThreadList()->Resume(thread_, true);
2438 }
2439 }
2440
2441 private:
2442 Thread* thread_;
2443 JDWP::JdwpError error_;
2444 bool self_suspend_;
2445 bool other_suspend_;
2446 };
2447
ConfigureStep(JDWP::ObjectId thread_id,JDWP::JdwpStepSize step_size,JDWP::JdwpStepDepth step_depth)2448 JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
2449 JDWP::JdwpStepDepth step_depth) {
2450 Thread* self = Thread::Current();
2451 ScopedThreadSuspension sts(self, thread_id);
2452 if (sts.GetError() != JDWP::ERR_NONE) {
2453 return sts.GetError();
2454 }
2455
2456 MutexLock mu2(self, *Locks::breakpoint_lock_);
2457 // TODO: there's no theoretical reason why we couldn't support single-stepping
2458 // of multiple threads at once, but we never did so historically.
2459 if (gSingleStepControl.thread != NULL && sts.GetThread() != gSingleStepControl.thread) {
2460 LOG(WARNING) << "single-step already active for " << *gSingleStepControl.thread
2461 << "; switching to " << *sts.GetThread();
2462 }
2463
2464 //
2465 // Work out what Method* we're in, the current line number, and how deep the stack currently
2466 // is for step-out.
2467 //
2468
2469 struct SingleStepStackVisitor : public StackVisitor {
2470 explicit SingleStepStackVisitor(Thread* thread)
2471 EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_)
2472 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2473 : StackVisitor(thread, NULL) {
2474 gSingleStepControl.method = NULL;
2475 gSingleStepControl.stack_depth = 0;
2476 }
2477
2478 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2479 // annotalysis.
2480 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2481 Locks::breakpoint_lock_->AssertHeld(Thread::Current());
2482 const mirror::ArtMethod* m = GetMethod();
2483 if (!m->IsRuntimeMethod()) {
2484 ++gSingleStepControl.stack_depth;
2485 if (gSingleStepControl.method == NULL) {
2486 const mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
2487 gSingleStepControl.method = m;
2488 gSingleStepControl.line_number = -1;
2489 if (dex_cache != NULL) {
2490 const DexFile& dex_file = *dex_cache->GetDexFile();
2491 gSingleStepControl.line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
2492 }
2493 }
2494 }
2495 return true;
2496 }
2497 };
2498
2499 SingleStepStackVisitor visitor(sts.GetThread());
2500 visitor.WalkStack();
2501
2502 //
2503 // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
2504 //
2505
2506 struct DebugCallbackContext {
2507 DebugCallbackContext() EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) {
2508 last_pc_valid = false;
2509 last_pc = 0;
2510 }
2511
2512 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2513 // annotalysis.
2514 static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) NO_THREAD_SAFETY_ANALYSIS {
2515 Locks::breakpoint_lock_->AssertHeld(Thread::Current());
2516 DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
2517 if (static_cast<int32_t>(line_number) == gSingleStepControl.line_number) {
2518 if (!context->last_pc_valid) {
2519 // Everything from this address until the next line change is ours.
2520 context->last_pc = address;
2521 context->last_pc_valid = true;
2522 }
2523 // Otherwise, if we're already in a valid range for this line,
2524 // just keep going (shouldn't really happen)...
2525 } else if (context->last_pc_valid) { // and the line number is new
2526 // Add everything from the last entry up until here to the set
2527 for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
2528 gSingleStepControl.dex_pcs.insert(dex_pc);
2529 }
2530 context->last_pc_valid = false;
2531 }
2532 return false; // There may be multiple entries for any given line.
2533 }
2534
2535 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2536 // annotalysis.
2537 ~DebugCallbackContext() NO_THREAD_SAFETY_ANALYSIS {
2538 Locks::breakpoint_lock_->AssertHeld(Thread::Current());
2539 // If the line number was the last in the position table...
2540 if (last_pc_valid) {
2541 size_t end = MethodHelper(gSingleStepControl.method).GetCodeItem()->insns_size_in_code_units_;
2542 for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
2543 gSingleStepControl.dex_pcs.insert(dex_pc);
2544 }
2545 }
2546 }
2547
2548 bool last_pc_valid;
2549 uint32_t last_pc;
2550 };
2551 gSingleStepControl.dex_pcs.clear();
2552 const mirror::ArtMethod* m = gSingleStepControl.method;
2553 if (m->IsNative()) {
2554 gSingleStepControl.line_number = -1;
2555 } else {
2556 DebugCallbackContext context;
2557 MethodHelper mh(m);
2558 mh.GetDexFile().DecodeDebugInfo(mh.GetCodeItem(), m->IsStatic(), m->GetDexMethodIndex(),
2559 DebugCallbackContext::Callback, NULL, &context);
2560 }
2561
2562 //
2563 // Everything else...
2564 //
2565
2566 gSingleStepControl.thread = sts.GetThread();
2567 gSingleStepControl.step_size = step_size;
2568 gSingleStepControl.step_depth = step_depth;
2569 gSingleStepControl.is_active = true;
2570
2571 if (VLOG_IS_ON(jdwp)) {
2572 VLOG(jdwp) << "Single-step thread: " << *gSingleStepControl.thread;
2573 VLOG(jdwp) << "Single-step step size: " << gSingleStepControl.step_size;
2574 VLOG(jdwp) << "Single-step step depth: " << gSingleStepControl.step_depth;
2575 VLOG(jdwp) << "Single-step current method: " << PrettyMethod(gSingleStepControl.method);
2576 VLOG(jdwp) << "Single-step current line: " << gSingleStepControl.line_number;
2577 VLOG(jdwp) << "Single-step current stack depth: " << gSingleStepControl.stack_depth;
2578 VLOG(jdwp) << "Single-step dex_pc values:";
2579 for (std::set<uint32_t>::iterator it = gSingleStepControl.dex_pcs.begin() ; it != gSingleStepControl.dex_pcs.end(); ++it) {
2580 VLOG(jdwp) << StringPrintf(" %#x", *it);
2581 }
2582 }
2583
2584 return JDWP::ERR_NONE;
2585 }
2586
UnconfigureStep(JDWP::ObjectId)2587 void Dbg::UnconfigureStep(JDWP::ObjectId /*thread_id*/) {
2588 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
2589
2590 gSingleStepControl.is_active = false;
2591 gSingleStepControl.thread = NULL;
2592 gSingleStepControl.dex_pcs.clear();
2593 }
2594
JdwpTagToShortyChar(JDWP::JdwpTag tag)2595 static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
2596 switch (tag) {
2597 default:
2598 LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
2599
2600 // Primitives.
2601 case JDWP::JT_BYTE: return 'B';
2602 case JDWP::JT_CHAR: return 'C';
2603 case JDWP::JT_FLOAT: return 'F';
2604 case JDWP::JT_DOUBLE: return 'D';
2605 case JDWP::JT_INT: return 'I';
2606 case JDWP::JT_LONG: return 'J';
2607 case JDWP::JT_SHORT: return 'S';
2608 case JDWP::JT_VOID: return 'V';
2609 case JDWP::JT_BOOLEAN: return 'Z';
2610
2611 // Reference types.
2612 case JDWP::JT_ARRAY:
2613 case JDWP::JT_OBJECT:
2614 case JDWP::JT_STRING:
2615 case JDWP::JT_THREAD:
2616 case JDWP::JT_THREAD_GROUP:
2617 case JDWP::JT_CLASS_LOADER:
2618 case JDWP::JT_CLASS_OBJECT:
2619 return 'L';
2620 }
2621 }
2622
InvokeMethod(JDWP::ObjectId thread_id,JDWP::ObjectId object_id,JDWP::RefTypeId class_id,JDWP::MethodId method_id,uint32_t arg_count,uint64_t * arg_values,JDWP::JdwpTag * arg_types,uint32_t options,JDWP::JdwpTag * pResultTag,uint64_t * pResultValue,JDWP::ObjectId * pExceptionId)2623 JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
2624 JDWP::RefTypeId class_id, JDWP::MethodId method_id,
2625 uint32_t arg_count, uint64_t* arg_values,
2626 JDWP::JdwpTag* arg_types, uint32_t options,
2627 JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
2628 JDWP::ObjectId* pExceptionId) {
2629 ThreadList* thread_list = Runtime::Current()->GetThreadList();
2630
2631 Thread* targetThread = NULL;
2632 DebugInvokeReq* req = NULL;
2633 Thread* self = Thread::Current();
2634 {
2635 ScopedObjectAccessUnchecked soa(self);
2636 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2637 JDWP::JdwpError error = DecodeThread(soa, thread_id, targetThread);
2638 if (error != JDWP::ERR_NONE) {
2639 LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
2640 return error;
2641 }
2642 req = targetThread->GetInvokeReq();
2643 if (!req->ready) {
2644 LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
2645 return JDWP::ERR_INVALID_THREAD;
2646 }
2647
2648 /*
2649 * We currently have a bug where we don't successfully resume the
2650 * target thread if the suspend count is too deep. We're expected to
2651 * require one "resume" for each "suspend", but when asked to execute
2652 * a method we have to resume fully and then re-suspend it back to the
2653 * same level. (The easiest way to cause this is to type "suspend"
2654 * multiple times in jdb.)
2655 *
2656 * It's unclear what this means when the event specifies "resume all"
2657 * and some threads are suspended more deeply than others. This is
2658 * a rare problem, so for now we just prevent it from hanging forever
2659 * by rejecting the method invocation request. Without this, we will
2660 * be stuck waiting on a suspended thread.
2661 */
2662 int suspend_count;
2663 {
2664 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2665 suspend_count = targetThread->GetSuspendCount();
2666 }
2667 if (suspend_count > 1) {
2668 LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
2669 return JDWP::ERR_THREAD_SUSPENDED; // Probably not expected here.
2670 }
2671
2672 JDWP::JdwpError status;
2673 mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id);
2674 if (receiver == ObjectRegistry::kInvalidObject) {
2675 return JDWP::ERR_INVALID_OBJECT;
2676 }
2677
2678 mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id);
2679 if (thread == ObjectRegistry::kInvalidObject) {
2680 return JDWP::ERR_INVALID_OBJECT;
2681 }
2682 // TODO: check that 'thread' is actually a java.lang.Thread!
2683
2684 mirror::Class* c = DecodeClass(class_id, status);
2685 if (c == NULL) {
2686 return status;
2687 }
2688
2689 mirror::ArtMethod* m = FromMethodId(method_id);
2690 if (m->IsStatic() != (receiver == NULL)) {
2691 return JDWP::ERR_INVALID_METHODID;
2692 }
2693 if (m->IsStatic()) {
2694 if (m->GetDeclaringClass() != c) {
2695 return JDWP::ERR_INVALID_METHODID;
2696 }
2697 } else {
2698 if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
2699 return JDWP::ERR_INVALID_METHODID;
2700 }
2701 }
2702
2703 // Check the argument list matches the method.
2704 MethodHelper mh(m);
2705 if (mh.GetShortyLength() - 1 != arg_count) {
2706 return JDWP::ERR_ILLEGAL_ARGUMENT;
2707 }
2708 const char* shorty = mh.GetShorty();
2709 const DexFile::TypeList* types = mh.GetParameterTypeList();
2710 for (size_t i = 0; i < arg_count; ++i) {
2711 if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
2712 return JDWP::ERR_ILLEGAL_ARGUMENT;
2713 }
2714
2715 if (shorty[i + 1] == 'L') {
2716 // Did we really get an argument of an appropriate reference type?
2717 mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
2718 mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i]);
2719 if (argument == ObjectRegistry::kInvalidObject) {
2720 return JDWP::ERR_INVALID_OBJECT;
2721 }
2722 if (!argument->InstanceOf(parameter_type)) {
2723 return JDWP::ERR_ILLEGAL_ARGUMENT;
2724 }
2725
2726 // Turn the on-the-wire ObjectId into a jobject.
2727 jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
2728 v.l = gRegistry->GetJObject(arg_values[i]);
2729 }
2730 }
2731
2732 req->receiver_ = receiver;
2733 req->thread_ = thread;
2734 req->class_ = c;
2735 req->method_ = m;
2736 req->arg_count_ = arg_count;
2737 req->arg_values_ = arg_values;
2738 req->options_ = options;
2739 req->invoke_needed_ = true;
2740 }
2741
2742 // The fact that we've released the thread list lock is a bit risky --- if the thread goes
2743 // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
2744 // call, and it's unwise to hold it during WaitForSuspend.
2745
2746 {
2747 /*
2748 * We change our (JDWP thread) status, which should be THREAD_RUNNING,
2749 * so we can suspend for a GC if the invoke request causes us to
2750 * run out of memory. It's also a good idea to change it before locking
2751 * the invokeReq mutex, although that should never be held for long.
2752 */
2753 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
2754
2755 VLOG(jdwp) << " Transferring control to event thread";
2756 {
2757 MutexLock mu(self, req->lock_);
2758
2759 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
2760 VLOG(jdwp) << " Resuming all threads";
2761 thread_list->UndoDebuggerSuspensions();
2762 } else {
2763 VLOG(jdwp) << " Resuming event thread only";
2764 thread_list->Resume(targetThread, true);
2765 }
2766
2767 // Wait for the request to finish executing.
2768 while (req->invoke_needed_) {
2769 req->cond_.Wait(self);
2770 }
2771 }
2772 VLOG(jdwp) << " Control has returned from event thread";
2773
2774 /* wait for thread to re-suspend itself */
2775 SuspendThread(thread_id, false /* request_suspension */);
2776 self->TransitionFromSuspendedToRunnable();
2777 }
2778
2779 /*
2780 * Suspend the threads. We waited for the target thread to suspend
2781 * itself, so all we need to do is suspend the others.
2782 *
2783 * The suspendAllThreads() call will double-suspend the event thread,
2784 * so we want to resume the target thread once to keep the books straight.
2785 */
2786 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
2787 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
2788 VLOG(jdwp) << " Suspending all threads";
2789 thread_list->SuspendAllForDebugger();
2790 self->TransitionFromSuspendedToRunnable();
2791 VLOG(jdwp) << " Resuming event thread to balance the count";
2792 thread_list->Resume(targetThread, true);
2793 }
2794
2795 // Copy the result.
2796 *pResultTag = req->result_tag;
2797 if (IsPrimitiveTag(req->result_tag)) {
2798 *pResultValue = req->result_value.GetJ();
2799 } else {
2800 *pResultValue = gRegistry->Add(req->result_value.GetL());
2801 }
2802 *pExceptionId = req->exception;
2803 return req->error;
2804 }
2805
ExecuteMethod(DebugInvokeReq * pReq)2806 void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
2807 ScopedObjectAccess soa(Thread::Current());
2808
2809 // We can be called while an exception is pending. We need
2810 // to preserve that across the method invocation.
2811 SirtRef<mirror::Object> old_throw_this_object(soa.Self(), NULL);
2812 SirtRef<mirror::ArtMethod> old_throw_method(soa.Self(), NULL);
2813 SirtRef<mirror::Throwable> old_exception(soa.Self(), NULL);
2814 uint32_t old_throw_dex_pc;
2815 {
2816 ThrowLocation old_throw_location;
2817 mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
2818 old_throw_this_object.reset(old_throw_location.GetThis());
2819 old_throw_method.reset(old_throw_location.GetMethod());
2820 old_exception.reset(old_exception_obj);
2821 old_throw_dex_pc = old_throw_location.GetDexPc();
2822 soa.Self()->ClearException();
2823 }
2824
2825 // Translate the method through the vtable, unless the debugger wants to suppress it.
2826 mirror::ArtMethod* m = pReq->method_;
2827 if ((pReq->options_ & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver_ != NULL) {
2828 mirror::ArtMethod* actual_method = pReq->class_->FindVirtualMethodForVirtualOrInterface(pReq->method_);
2829 if (actual_method != m) {
2830 VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m) << " to " << PrettyMethod(actual_method);
2831 m = actual_method;
2832 }
2833 }
2834 VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m)
2835 << " receiver=" << pReq->receiver_
2836 << " arg_count=" << pReq->arg_count_;
2837 CHECK(m != NULL);
2838
2839 CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
2840
2841 MethodHelper mh(m);
2842 ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
2843 arg_array.BuildArgArray(soa, pReq->receiver_, reinterpret_cast<jvalue*>(pReq->arg_values_));
2844 InvokeWithArgArray(soa, m, &arg_array, &pReq->result_value, mh.GetShorty()[0]);
2845
2846 mirror::Throwable* exception = soa.Self()->GetException(NULL);
2847 soa.Self()->ClearException();
2848 pReq->exception = gRegistry->Add(exception);
2849 pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m).GetShorty());
2850 if (pReq->exception != 0) {
2851 VLOG(jdwp) << " JDWP invocation returning with exception=" << exception
2852 << " " << exception->Dump();
2853 pReq->result_value.SetJ(0);
2854 } else if (pReq->result_tag == JDWP::JT_OBJECT) {
2855 /* if no exception thrown, examine object result more closely */
2856 JDWP::JdwpTag new_tag = TagFromObject(pReq->result_value.GetL());
2857 if (new_tag != pReq->result_tag) {
2858 VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
2859 pReq->result_tag = new_tag;
2860 }
2861
2862 /*
2863 * Register the object. We don't actually need an ObjectId yet,
2864 * but we do need to be sure that the GC won't move or discard the
2865 * object when we switch out of RUNNING. The ObjectId conversion
2866 * will add the object to the "do not touch" list.
2867 *
2868 * We can't use the "tracked allocation" mechanism here because
2869 * the object is going to be handed off to a different thread.
2870 */
2871 gRegistry->Add(pReq->result_value.GetL());
2872 }
2873
2874 if (old_exception.get() != NULL) {
2875 ThrowLocation gc_safe_throw_location(old_throw_this_object.get(), old_throw_method.get(),
2876 old_throw_dex_pc);
2877 soa.Self()->SetException(gc_safe_throw_location, old_exception.get());
2878 }
2879 }
2880
2881 /*
2882 * "request" contains a full JDWP packet, possibly with multiple chunks. We
2883 * need to process each, accumulate the replies, and ship the whole thing
2884 * back.
2885 *
2886 * Returns "true" if we have a reply. The reply buffer is newly allocated,
2887 * and includes the chunk type/length, followed by the data.
2888 *
2889 * OLD-TODO: we currently assume that the request and reply include a single
2890 * chunk. If this becomes inconvenient we will need to adapt.
2891 */
DdmHandlePacket(JDWP::Request & request,uint8_t ** pReplyBuf,int * pReplyLen)2892 bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen) {
2893 Thread* self = Thread::Current();
2894 JNIEnv* env = self->GetJniEnv();
2895
2896 uint32_t type = request.ReadUnsigned32("type");
2897 uint32_t length = request.ReadUnsigned32("length");
2898
2899 // Create a byte[] corresponding to 'request'.
2900 size_t request_length = request.size();
2901 ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
2902 if (dataArray.get() == NULL) {
2903 LOG(WARNING) << "byte[] allocation failed: " << request_length;
2904 env->ExceptionClear();
2905 return false;
2906 }
2907 env->SetByteArrayRegion(dataArray.get(), 0, request_length, reinterpret_cast<const jbyte*>(request.data()));
2908 request.Skip(request_length);
2909
2910 // Run through and find all chunks. [Currently just find the first.]
2911 ScopedByteArrayRO contents(env, dataArray.get());
2912 if (length != request_length) {
2913 LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%d)", length, request_length);
2914 return false;
2915 }
2916
2917 // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
2918 ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
2919 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
2920 type, dataArray.get(), 0, length));
2921 if (env->ExceptionCheck()) {
2922 LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
2923 env->ExceptionDescribe();
2924 env->ExceptionClear();
2925 return false;
2926 }
2927
2928 if (chunk.get() == NULL) {
2929 return false;
2930 }
2931
2932 /*
2933 * Pull the pieces out of the chunk. We copy the results into a
2934 * newly-allocated buffer that the caller can free. We don't want to
2935 * continue using the Chunk object because nothing has a reference to it.
2936 *
2937 * We could avoid this by returning type/data/offset/length and having
2938 * the caller be aware of the object lifetime issues, but that
2939 * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
2940 * if we have responses for multiple chunks.
2941 *
2942 * So we're pretty much stuck with copying data around multiple times.
2943 */
2944 ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
2945 jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
2946 length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
2947 type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
2948
2949 VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
2950 if (length == 0 || replyData.get() == NULL) {
2951 return false;
2952 }
2953
2954 const int kChunkHdrLen = 8;
2955 uint8_t* reply = new uint8_t[length + kChunkHdrLen];
2956 if (reply == NULL) {
2957 LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
2958 return false;
2959 }
2960 JDWP::Set4BE(reply + 0, type);
2961 JDWP::Set4BE(reply + 4, length);
2962 env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
2963
2964 *pReplyBuf = reply;
2965 *pReplyLen = length + kChunkHdrLen;
2966
2967 VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
2968 return true;
2969 }
2970
DdmBroadcast(bool connect)2971 void Dbg::DdmBroadcast(bool connect) {
2972 VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
2973
2974 Thread* self = Thread::Current();
2975 if (self->GetState() != kRunnable) {
2976 LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
2977 /* try anyway? */
2978 }
2979
2980 JNIEnv* env = self->GetJniEnv();
2981 jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
2982 env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
2983 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
2984 event);
2985 if (env->ExceptionCheck()) {
2986 LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
2987 env->ExceptionDescribe();
2988 env->ExceptionClear();
2989 }
2990 }
2991
DdmConnected()2992 void Dbg::DdmConnected() {
2993 Dbg::DdmBroadcast(true);
2994 }
2995
DdmDisconnected()2996 void Dbg::DdmDisconnected() {
2997 Dbg::DdmBroadcast(false);
2998 gDdmThreadNotification = false;
2999 }
3000
3001 /*
3002 * Send a notification when a thread starts, stops, or changes its name.
3003 *
3004 * Because we broadcast the full set of threads when the notifications are
3005 * first enabled, it's possible for "thread" to be actively executing.
3006 */
DdmSendThreadNotification(Thread * t,uint32_t type)3007 void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
3008 if (!gDdmThreadNotification) {
3009 return;
3010 }
3011
3012 if (type == CHUNK_TYPE("THDE")) {
3013 uint8_t buf[4];
3014 JDWP::Set4BE(&buf[0], t->GetThinLockId());
3015 Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
3016 } else {
3017 CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
3018 ScopedObjectAccessUnchecked soa(Thread::Current());
3019 SirtRef<mirror::String> name(soa.Self(), t->GetThreadName(soa));
3020 size_t char_count = (name.get() != NULL) ? name->GetLength() : 0;
3021 const jchar* chars = (name.get() != NULL) ? name->GetCharArray()->GetData() : NULL;
3022
3023 std::vector<uint8_t> bytes;
3024 JDWP::Append4BE(bytes, t->GetThinLockId());
3025 JDWP::AppendUtf16BE(bytes, chars, char_count);
3026 CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
3027 Dbg::DdmSendChunk(type, bytes);
3028 }
3029 }
3030
DdmSetThreadNotification(bool enable)3031 void Dbg::DdmSetThreadNotification(bool enable) {
3032 // Enable/disable thread notifications.
3033 gDdmThreadNotification = enable;
3034 if (enable) {
3035 // Suspend the VM then post thread start notifications for all threads. Threads attaching will
3036 // see a suspension in progress and block until that ends. They then post their own start
3037 // notification.
3038 SuspendVM();
3039 std::list<Thread*> threads;
3040 Thread* self = Thread::Current();
3041 {
3042 MutexLock mu(self, *Locks::thread_list_lock_);
3043 threads = Runtime::Current()->GetThreadList()->GetList();
3044 }
3045 {
3046 ScopedObjectAccess soa(self);
3047 for (Thread* thread : threads) {
3048 Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
3049 }
3050 }
3051 ResumeVM();
3052 }
3053 }
3054
PostThreadStartOrStop(Thread * t,uint32_t type)3055 void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
3056 if (IsDebuggerActive()) {
3057 ScopedObjectAccessUnchecked soa(Thread::Current());
3058 JDWP::ObjectId id = gRegistry->Add(t->GetPeer());
3059 gJdwpState->PostThreadChange(id, type == CHUNK_TYPE("THCR"));
3060 }
3061 Dbg::DdmSendThreadNotification(t, type);
3062 }
3063
PostThreadStart(Thread * t)3064 void Dbg::PostThreadStart(Thread* t) {
3065 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
3066 }
3067
PostThreadDeath(Thread * t)3068 void Dbg::PostThreadDeath(Thread* t) {
3069 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
3070 }
3071
DdmSendChunk(uint32_t type,size_t byte_count,const uint8_t * buf)3072 void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
3073 CHECK(buf != NULL);
3074 iovec vec[1];
3075 vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
3076 vec[0].iov_len = byte_count;
3077 Dbg::DdmSendChunkV(type, vec, 1);
3078 }
3079
DdmSendChunk(uint32_t type,const std::vector<uint8_t> & bytes)3080 void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
3081 DdmSendChunk(type, bytes.size(), &bytes[0]);
3082 }
3083
DdmSendChunkV(uint32_t type,const iovec * iov,int iov_count)3084 void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
3085 if (gJdwpState == NULL) {
3086 VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
3087 } else {
3088 gJdwpState->DdmSendChunkV(type, iov, iov_count);
3089 }
3090 }
3091
DdmHandleHpifChunk(HpifWhen when)3092 int Dbg::DdmHandleHpifChunk(HpifWhen when) {
3093 if (when == HPIF_WHEN_NOW) {
3094 DdmSendHeapInfo(when);
3095 return true;
3096 }
3097
3098 if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
3099 LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
3100 return false;
3101 }
3102
3103 gDdmHpifWhen = when;
3104 return true;
3105 }
3106
DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when,Dbg::HpsgWhat what,bool native)3107 bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
3108 if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
3109 LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
3110 return false;
3111 }
3112
3113 if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
3114 LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
3115 return false;
3116 }
3117
3118 if (native) {
3119 gDdmNhsgWhen = when;
3120 gDdmNhsgWhat = what;
3121 } else {
3122 gDdmHpsgWhen = when;
3123 gDdmHpsgWhat = what;
3124 }
3125 return true;
3126 }
3127
DdmSendHeapInfo(HpifWhen reason)3128 void Dbg::DdmSendHeapInfo(HpifWhen reason) {
3129 // If there's a one-shot 'when', reset it.
3130 if (reason == gDdmHpifWhen) {
3131 if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
3132 gDdmHpifWhen = HPIF_WHEN_NEVER;
3133 }
3134 }
3135
3136 /*
3137 * Chunk HPIF (client --> server)
3138 *
3139 * Heap Info. General information about the heap,
3140 * suitable for a summary display.
3141 *
3142 * [u4]: number of heaps
3143 *
3144 * For each heap:
3145 * [u4]: heap ID
3146 * [u8]: timestamp in ms since Unix epoch
3147 * [u1]: capture reason (same as 'when' value from server)
3148 * [u4]: max heap size in bytes (-Xmx)
3149 * [u4]: current heap size in bytes
3150 * [u4]: current number of bytes allocated
3151 * [u4]: current number of objects allocated
3152 */
3153 uint8_t heap_count = 1;
3154 gc::Heap* heap = Runtime::Current()->GetHeap();
3155 std::vector<uint8_t> bytes;
3156 JDWP::Append4BE(bytes, heap_count);
3157 JDWP::Append4BE(bytes, 1); // Heap id (bogus; we only have one heap).
3158 JDWP::Append8BE(bytes, MilliTime());
3159 JDWP::Append1BE(bytes, reason);
3160 JDWP::Append4BE(bytes, heap->GetMaxMemory()); // Max allowed heap size in bytes.
3161 JDWP::Append4BE(bytes, heap->GetTotalMemory()); // Current heap size in bytes.
3162 JDWP::Append4BE(bytes, heap->GetBytesAllocated());
3163 JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
3164 CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
3165 Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
3166 }
3167
3168 enum HpsgSolidity {
3169 SOLIDITY_FREE = 0,
3170 SOLIDITY_HARD = 1,
3171 SOLIDITY_SOFT = 2,
3172 SOLIDITY_WEAK = 3,
3173 SOLIDITY_PHANTOM = 4,
3174 SOLIDITY_FINALIZABLE = 5,
3175 SOLIDITY_SWEEP = 6,
3176 };
3177
3178 enum HpsgKind {
3179 KIND_OBJECT = 0,
3180 KIND_CLASS_OBJECT = 1,
3181 KIND_ARRAY_1 = 2,
3182 KIND_ARRAY_2 = 3,
3183 KIND_ARRAY_4 = 4,
3184 KIND_ARRAY_8 = 5,
3185 KIND_UNKNOWN = 6,
3186 KIND_NATIVE = 7,
3187 };
3188
3189 #define HPSG_PARTIAL (1<<7)
3190 #define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
3191
3192 class HeapChunkContext {
3193 public:
3194 // Maximum chunk size. Obtain this from the formula:
3195 // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
HeapChunkContext(bool merge,bool native)3196 HeapChunkContext(bool merge, bool native)
3197 : buf_(16384 - 16),
3198 type_(0),
3199 merge_(merge) {
3200 Reset();
3201 if (native) {
3202 type_ = CHUNK_TYPE("NHSG");
3203 } else {
3204 type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
3205 }
3206 }
3207
~HeapChunkContext()3208 ~HeapChunkContext() {
3209 if (p_ > &buf_[0]) {
3210 Flush();
3211 }
3212 }
3213
EnsureHeader(const void * chunk_ptr)3214 void EnsureHeader(const void* chunk_ptr) {
3215 if (!needHeader_) {
3216 return;
3217 }
3218
3219 // Start a new HPSx chunk.
3220 JDWP::Write4BE(&p_, 1); // Heap id (bogus; we only have one heap).
3221 JDWP::Write1BE(&p_, 8); // Size of allocation unit, in bytes.
3222
3223 JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr)); // virtual address of segment start.
3224 JDWP::Write4BE(&p_, 0); // offset of this piece (relative to the virtual address).
3225 // [u4]: length of piece, in allocation units
3226 // We won't know this until we're done, so save the offset and stuff in a dummy value.
3227 pieceLenField_ = p_;
3228 JDWP::Write4BE(&p_, 0x55555555);
3229 needHeader_ = false;
3230 }
3231
Flush()3232 void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3233 if (pieceLenField_ == NULL) {
3234 // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
3235 CHECK(needHeader_);
3236 return;
3237 }
3238 // Patch the "length of piece" field.
3239 CHECK_LE(&buf_[0], pieceLenField_);
3240 CHECK_LE(pieceLenField_, p_);
3241 JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
3242
3243 Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
3244 Reset();
3245 }
3246
HeapChunkCallback(void * start,void * end,size_t used_bytes,void * arg)3247 static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg)
3248 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
3249 Locks::mutator_lock_) {
3250 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes);
3251 }
3252
3253 private:
3254 enum { ALLOCATION_UNIT_SIZE = 8 };
3255
Reset()3256 void Reset() {
3257 p_ = &buf_[0];
3258 startOfNextMemoryChunk_ = NULL;
3259 totalAllocationUnits_ = 0;
3260 needHeader_ = true;
3261 pieceLenField_ = NULL;
3262 }
3263
HeapChunkCallback(void * start,void *,size_t used_bytes)3264 void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes)
3265 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
3266 Locks::mutator_lock_) {
3267 // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
3268 // in the following code not to allocate memory, by ensuring buf_ is of the correct size
3269 if (used_bytes == 0) {
3270 if (start == NULL) {
3271 // Reset for start of new heap.
3272 startOfNextMemoryChunk_ = NULL;
3273 Flush();
3274 }
3275 // Only process in use memory so that free region information
3276 // also includes dlmalloc book keeping.
3277 return;
3278 }
3279
3280 /* If we're looking at the native heap, we'll just return
3281 * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
3282 */
3283 bool native = type_ == CHUNK_TYPE("NHSG");
3284
3285 if (startOfNextMemoryChunk_ != NULL) {
3286 // Transmit any pending free memory. Native free memory of
3287 // over kMaxFreeLen could be because of the use of mmaps, so
3288 // don't report. If not free memory then start a new segment.
3289 bool flush = true;
3290 if (start > startOfNextMemoryChunk_) {
3291 const size_t kMaxFreeLen = 2 * kPageSize;
3292 void* freeStart = startOfNextMemoryChunk_;
3293 void* freeEnd = start;
3294 size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart);
3295 if (!native || freeLen < kMaxFreeLen) {
3296 AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen);
3297 flush = false;
3298 }
3299 }
3300 if (flush) {
3301 startOfNextMemoryChunk_ = NULL;
3302 Flush();
3303 }
3304 }
3305 const mirror::Object* obj = reinterpret_cast<const mirror::Object*>(start);
3306
3307 // Determine the type of this chunk.
3308 // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
3309 // If it's the same, we should combine them.
3310 uint8_t state = ExamineObject(obj, native);
3311 // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
3312 // allocation then the first sizeof(size_t) may belong to it.
3313 const size_t dlMallocOverhead = sizeof(size_t);
3314 AppendChunk(state, start, used_bytes + dlMallocOverhead);
3315 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + dlMallocOverhead;
3316 }
3317
AppendChunk(uint8_t state,void * ptr,size_t length)3318 void AppendChunk(uint8_t state, void* ptr, size_t length)
3319 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3320 // Make sure there's enough room left in the buffer.
3321 // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
3322 // 17 bytes for any header.
3323 size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
3324 size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
3325 if (bytesLeft < needed) {
3326 Flush();
3327 }
3328
3329 bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
3330 if (bytesLeft < needed) {
3331 LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
3332 << needed << " bytes)";
3333 return;
3334 }
3335 EnsureHeader(ptr);
3336 // Write out the chunk description.
3337 length /= ALLOCATION_UNIT_SIZE; // Convert to allocation units.
3338 totalAllocationUnits_ += length;
3339 while (length > 256) {
3340 *p_++ = state | HPSG_PARTIAL;
3341 *p_++ = 255; // length - 1
3342 length -= 256;
3343 }
3344 *p_++ = state;
3345 *p_++ = length - 1;
3346 }
3347
ExamineObject(const mirror::Object * o,bool is_native_heap)3348 uint8_t ExamineObject(const mirror::Object* o, bool is_native_heap)
3349 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
3350 if (o == NULL) {
3351 return HPSG_STATE(SOLIDITY_FREE, 0);
3352 }
3353
3354 // It's an allocated chunk. Figure out what it is.
3355
3356 // If we're looking at the native heap, we'll just return
3357 // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
3358 if (is_native_heap) {
3359 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
3360 }
3361
3362 if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
3363 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
3364 }
3365
3366 mirror::Class* c = o->GetClass();
3367 if (c == NULL) {
3368 // The object was probably just created but hasn't been initialized yet.
3369 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
3370 }
3371
3372 if (!Runtime::Current()->GetHeap()->IsHeapAddress(c)) {
3373 LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
3374 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
3375 }
3376
3377 if (c->IsClassClass()) {
3378 return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
3379 }
3380
3381 if (c->IsArrayClass()) {
3382 if (o->IsObjectArray()) {
3383 return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
3384 }
3385 switch (c->GetComponentSize()) {
3386 case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
3387 case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
3388 case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
3389 case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
3390 }
3391 }
3392
3393 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
3394 }
3395
3396 std::vector<uint8_t> buf_;
3397 uint8_t* p_;
3398 uint8_t* pieceLenField_;
3399 void* startOfNextMemoryChunk_;
3400 size_t totalAllocationUnits_;
3401 uint32_t type_;
3402 bool merge_;
3403 bool needHeader_;
3404
3405 DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
3406 };
3407
DdmSendHeapSegments(bool native)3408 void Dbg::DdmSendHeapSegments(bool native) {
3409 Dbg::HpsgWhen when;
3410 Dbg::HpsgWhat what;
3411 if (!native) {
3412 when = gDdmHpsgWhen;
3413 what = gDdmHpsgWhat;
3414 } else {
3415 when = gDdmNhsgWhen;
3416 what = gDdmNhsgWhat;
3417 }
3418 if (when == HPSG_WHEN_NEVER) {
3419 return;
3420 }
3421
3422 // Figure out what kind of chunks we'll be sending.
3423 CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
3424
3425 // First, send a heap start chunk.
3426 uint8_t heap_id[4];
3427 JDWP::Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap).
3428 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
3429
3430 // Send a series of heap segment chunks.
3431 HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
3432 if (native) {
3433 dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
3434 } else {
3435 gc::Heap* heap = Runtime::Current()->GetHeap();
3436 const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
3437 Thread* self = Thread::Current();
3438 ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
3439 typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
3440 for (It cur = spaces.begin(), end = spaces.end(); cur != end; ++cur) {
3441 if ((*cur)->IsDlMallocSpace()) {
3442 (*cur)->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
3443 }
3444 }
3445 // Walk the large objects, these are not in the AllocSpace.
3446 heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
3447 }
3448
3449 // Finally, send a heap end chunk.
3450 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
3451 }
3452
GetAllocTrackerMax()3453 static size_t GetAllocTrackerMax() {
3454 #ifdef HAVE_ANDROID_OS
3455 // Check whether there's a system property overriding the number of records.
3456 const char* propertyName = "dalvik.vm.allocTrackerMax";
3457 char allocRecordMaxString[PROPERTY_VALUE_MAX];
3458 if (property_get(propertyName, allocRecordMaxString, "") > 0) {
3459 char* end;
3460 size_t value = strtoul(allocRecordMaxString, &end, 10);
3461 if (*end != '\0') {
3462 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString
3463 << "' --- invalid";
3464 return kDefaultNumAllocRecords;
3465 }
3466 if (!IsPowerOfTwo(value)) {
3467 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString
3468 << "' --- not power of two";
3469 return kDefaultNumAllocRecords;
3470 }
3471 return value;
3472 }
3473 #endif
3474 return kDefaultNumAllocRecords;
3475 }
3476
SetAllocTrackingEnabled(bool enabled)3477 void Dbg::SetAllocTrackingEnabled(bool enabled) {
3478 MutexLock mu(Thread::Current(), gAllocTrackerLock);
3479 if (enabled) {
3480 if (recent_allocation_records_ == NULL) {
3481 gAllocRecordMax = GetAllocTrackerMax();
3482 LOG(INFO) << "Enabling alloc tracker (" << gAllocRecordMax << " entries of "
3483 << kMaxAllocRecordStackDepth << " frames, taking "
3484 << PrettySize(sizeof(AllocRecord) * gAllocRecordMax) << ")";
3485 gAllocRecordHead = gAllocRecordCount = 0;
3486 recent_allocation_records_ = new AllocRecord[gAllocRecordMax];
3487 CHECK(recent_allocation_records_ != NULL);
3488 }
3489 } else {
3490 delete[] recent_allocation_records_;
3491 recent_allocation_records_ = NULL;
3492 }
3493 }
3494
3495 struct AllocRecordStackVisitor : public StackVisitor {
AllocRecordStackVisitorart::AllocRecordStackVisitor3496 AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
3497 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
3498 : StackVisitor(thread, NULL), record(record), depth(0) {}
3499
3500 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3501 // annotalysis.
VisitFrameart::AllocRecordStackVisitor3502 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3503 if (depth >= kMaxAllocRecordStackDepth) {
3504 return false;
3505 }
3506 mirror::ArtMethod* m = GetMethod();
3507 if (!m->IsRuntimeMethod()) {
3508 record->stack[depth].method = m;
3509 record->stack[depth].dex_pc = GetDexPc();
3510 ++depth;
3511 }
3512 return true;
3513 }
3514
~AllocRecordStackVisitorart::AllocRecordStackVisitor3515 ~AllocRecordStackVisitor() {
3516 // Clear out any unused stack trace elements.
3517 for (; depth < kMaxAllocRecordStackDepth; ++depth) {
3518 record->stack[depth].method = NULL;
3519 record->stack[depth].dex_pc = 0;
3520 }
3521 }
3522
3523 AllocRecord* record;
3524 size_t depth;
3525 };
3526
RecordAllocation(mirror::Class * type,size_t byte_count)3527 void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
3528 Thread* self = Thread::Current();
3529 CHECK(self != NULL);
3530
3531 MutexLock mu(self, gAllocTrackerLock);
3532 if (recent_allocation_records_ == NULL) {
3533 return;
3534 }
3535
3536 // Advance and clip.
3537 if (++gAllocRecordHead == gAllocRecordMax) {
3538 gAllocRecordHead = 0;
3539 }
3540
3541 // Fill in the basics.
3542 AllocRecord* record = &recent_allocation_records_[gAllocRecordHead];
3543 record->type = type;
3544 record->byte_count = byte_count;
3545 record->thin_lock_id = self->GetThinLockId();
3546
3547 // Fill in the stack trace.
3548 AllocRecordStackVisitor visitor(self, record);
3549 visitor.WalkStack();
3550
3551 if (gAllocRecordCount < gAllocRecordMax) {
3552 ++gAllocRecordCount;
3553 }
3554 }
3555
3556 // Returns the index of the head element.
3557 //
3558 // We point at the most-recently-written record, so if gAllocRecordCount is 1
3559 // we want to use the current element. Take "head+1" and subtract count
3560 // from it.
3561 //
3562 // We need to handle underflow in our circular buffer, so we add
3563 // gAllocRecordMax and then mask it back down.
HeadIndex()3564 static inline int HeadIndex() EXCLUSIVE_LOCKS_REQUIRED(gAllocTrackerLock) {
3565 return (gAllocRecordHead+1 + gAllocRecordMax - gAllocRecordCount) & (gAllocRecordMax-1);
3566 }
3567
DumpRecentAllocations()3568 void Dbg::DumpRecentAllocations() {
3569 ScopedObjectAccess soa(Thread::Current());
3570 MutexLock mu(soa.Self(), gAllocTrackerLock);
3571 if (recent_allocation_records_ == NULL) {
3572 LOG(INFO) << "Not recording tracked allocations";
3573 return;
3574 }
3575
3576 // "i" is the head of the list. We want to start at the end of the
3577 // list and move forward to the tail.
3578 size_t i = HeadIndex();
3579 size_t count = gAllocRecordCount;
3580
3581 LOG(INFO) << "Tracked allocations, (head=" << gAllocRecordHead << " count=" << count << ")";
3582 while (count--) {
3583 AllocRecord* record = &recent_allocation_records_[i];
3584
3585 LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->thin_lock_id, record->byte_count)
3586 << PrettyClass(record->type);
3587
3588 for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
3589 const mirror::ArtMethod* m = record->stack[stack_frame].method;
3590 if (m == NULL) {
3591 break;
3592 }
3593 LOG(INFO) << " " << PrettyMethod(m) << " line " << record->stack[stack_frame].LineNumber();
3594 }
3595
3596 // pause periodically to help logcat catch up
3597 if ((count % 5) == 0) {
3598 usleep(40000);
3599 }
3600
3601 i = (i + 1) & (gAllocRecordMax-1);
3602 }
3603 }
3604
3605 class StringTable {
3606 public:
StringTable()3607 StringTable() {
3608 }
3609
Add(const char * s)3610 void Add(const char* s) {
3611 table_.insert(s);
3612 }
3613
IndexOf(const char * s) const3614 size_t IndexOf(const char* s) const {
3615 auto it = table_.find(s);
3616 if (it == table_.end()) {
3617 LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
3618 }
3619 return std::distance(table_.begin(), it);
3620 }
3621
Size() const3622 size_t Size() const {
3623 return table_.size();
3624 }
3625
WriteTo(std::vector<uint8_t> & bytes) const3626 void WriteTo(std::vector<uint8_t>& bytes) const {
3627 for (const std::string& str : table_) {
3628 const char* s = str.c_str();
3629 size_t s_len = CountModifiedUtf8Chars(s);
3630 UniquePtr<uint16_t> s_utf16(new uint16_t[s_len]);
3631 ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
3632 JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
3633 }
3634 }
3635
3636 private:
3637 std::set<std::string> table_;
3638 DISALLOW_COPY_AND_ASSIGN(StringTable);
3639 };
3640
3641 /*
3642 * The data we send to DDMS contains everything we have recorded.
3643 *
3644 * Message header (all values big-endian):
3645 * (1b) message header len (to allow future expansion); includes itself
3646 * (1b) entry header len
3647 * (1b) stack frame len
3648 * (2b) number of entries
3649 * (4b) offset to string table from start of message
3650 * (2b) number of class name strings
3651 * (2b) number of method name strings
3652 * (2b) number of source file name strings
3653 * For each entry:
3654 * (4b) total allocation size
3655 * (2b) thread id
3656 * (2b) allocated object's class name index
3657 * (1b) stack depth
3658 * For each stack frame:
3659 * (2b) method's class name
3660 * (2b) method name
3661 * (2b) method source file
3662 * (2b) line number, clipped to 32767; -2 if native; -1 if no source
3663 * (xb) class name strings
3664 * (xb) method name strings
3665 * (xb) source file strings
3666 *
3667 * As with other DDM traffic, strings are sent as a 4-byte length
3668 * followed by UTF-16 data.
3669 *
3670 * We send up 16-bit unsigned indexes into string tables. In theory there
3671 * can be (kMaxAllocRecordStackDepth * gAllocRecordMax) unique strings in
3672 * each table, but in practice there should be far fewer.
3673 *
3674 * The chief reason for using a string table here is to keep the size of
3675 * the DDMS message to a minimum. This is partly to make the protocol
3676 * efficient, but also because we have to form the whole thing up all at
3677 * once in a memory buffer.
3678 *
3679 * We use separate string tables for class names, method names, and source
3680 * files to keep the indexes small. There will generally be no overlap
3681 * between the contents of these tables.
3682 */
GetRecentAllocations()3683 jbyteArray Dbg::GetRecentAllocations() {
3684 if (false) {
3685 DumpRecentAllocations();
3686 }
3687
3688 Thread* self = Thread::Current();
3689 std::vector<uint8_t> bytes;
3690 {
3691 MutexLock mu(self, gAllocTrackerLock);
3692 //
3693 // Part 1: generate string tables.
3694 //
3695 StringTable class_names;
3696 StringTable method_names;
3697 StringTable filenames;
3698
3699 int count = gAllocRecordCount;
3700 int idx = HeadIndex();
3701 while (count--) {
3702 AllocRecord* record = &recent_allocation_records_[idx];
3703
3704 class_names.Add(ClassHelper(record->type).GetDescriptor());
3705
3706 MethodHelper mh;
3707 for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
3708 mirror::ArtMethod* m = record->stack[i].method;
3709 if (m != NULL) {
3710 mh.ChangeMethod(m);
3711 class_names.Add(mh.GetDeclaringClassDescriptor());
3712 method_names.Add(mh.GetName());
3713 filenames.Add(mh.GetDeclaringClassSourceFile());
3714 }
3715 }
3716
3717 idx = (idx + 1) & (gAllocRecordMax-1);
3718 }
3719
3720 LOG(INFO) << "allocation records: " << gAllocRecordCount;
3721
3722 //
3723 // Part 2: Generate the output and store it in the buffer.
3724 //
3725
3726 // (1b) message header len (to allow future expansion); includes itself
3727 // (1b) entry header len
3728 // (1b) stack frame len
3729 const int kMessageHeaderLen = 15;
3730 const int kEntryHeaderLen = 9;
3731 const int kStackFrameLen = 8;
3732 JDWP::Append1BE(bytes, kMessageHeaderLen);
3733 JDWP::Append1BE(bytes, kEntryHeaderLen);
3734 JDWP::Append1BE(bytes, kStackFrameLen);
3735
3736 // (2b) number of entries
3737 // (4b) offset to string table from start of message
3738 // (2b) number of class name strings
3739 // (2b) number of method name strings
3740 // (2b) number of source file name strings
3741 JDWP::Append2BE(bytes, gAllocRecordCount);
3742 size_t string_table_offset = bytes.size();
3743 JDWP::Append4BE(bytes, 0); // We'll patch this later...
3744 JDWP::Append2BE(bytes, class_names.Size());
3745 JDWP::Append2BE(bytes, method_names.Size());
3746 JDWP::Append2BE(bytes, filenames.Size());
3747
3748 count = gAllocRecordCount;
3749 idx = HeadIndex();
3750 ClassHelper kh;
3751 while (count--) {
3752 // For each entry:
3753 // (4b) total allocation size
3754 // (2b) thread id
3755 // (2b) allocated object's class name index
3756 // (1b) stack depth
3757 AllocRecord* record = &recent_allocation_records_[idx];
3758 size_t stack_depth = record->GetDepth();
3759 kh.ChangeClass(record->type);
3760 size_t allocated_object_class_name_index = class_names.IndexOf(kh.GetDescriptor());
3761 JDWP::Append4BE(bytes, record->byte_count);
3762 JDWP::Append2BE(bytes, record->thin_lock_id);
3763 JDWP::Append2BE(bytes, allocated_object_class_name_index);
3764 JDWP::Append1BE(bytes, stack_depth);
3765
3766 MethodHelper mh;
3767 for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
3768 // For each stack frame:
3769 // (2b) method's class name
3770 // (2b) method name
3771 // (2b) method source file
3772 // (2b) line number, clipped to 32767; -2 if native; -1 if no source
3773 mh.ChangeMethod(record->stack[stack_frame].method);
3774 size_t class_name_index = class_names.IndexOf(mh.GetDeclaringClassDescriptor());
3775 size_t method_name_index = method_names.IndexOf(mh.GetName());
3776 size_t file_name_index = filenames.IndexOf(mh.GetDeclaringClassSourceFile());
3777 JDWP::Append2BE(bytes, class_name_index);
3778 JDWP::Append2BE(bytes, method_name_index);
3779 JDWP::Append2BE(bytes, file_name_index);
3780 JDWP::Append2BE(bytes, record->stack[stack_frame].LineNumber());
3781 }
3782
3783 idx = (idx + 1) & (gAllocRecordMax-1);
3784 }
3785
3786 // (xb) class name strings
3787 // (xb) method name strings
3788 // (xb) source file strings
3789 JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
3790 class_names.WriteTo(bytes);
3791 method_names.WriteTo(bytes);
3792 filenames.WriteTo(bytes);
3793 }
3794 JNIEnv* env = self->GetJniEnv();
3795 jbyteArray result = env->NewByteArray(bytes.size());
3796 if (result != NULL) {
3797 env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
3798 }
3799 return result;
3800 }
3801
3802 } // namespace art
3803