1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "debugger.h"
18
19 #include <sys/uio.h>
20
21 #include <set>
22
23 #include "arch/context.h"
24 #include "class_linker.h"
25 #include "class_linker-inl.h"
26 #include "dex_file-inl.h"
27 #include "dex_instruction.h"
28 #include "field_helper.h"
29 #include "gc/accounting/card_table-inl.h"
30 #include "gc/space/large_object_space.h"
31 #include "gc/space/space-inl.h"
32 #include "handle_scope.h"
33 #include "jdwp/object_registry.h"
34 #include "method_helper.h"
35 #include "mirror/art_field-inl.h"
36 #include "mirror/art_method-inl.h"
37 #include "mirror/class.h"
38 #include "mirror/class-inl.h"
39 #include "mirror/class_loader.h"
40 #include "mirror/object-inl.h"
41 #include "mirror/object_array-inl.h"
42 #include "mirror/string-inl.h"
43 #include "mirror/throwable.h"
44 #include "quick/inline_method_analyser.h"
45 #include "reflection.h"
46 #include "safe_map.h"
47 #include "scoped_thread_state_change.h"
48 #include "ScopedLocalRef.h"
49 #include "ScopedPrimitiveArray.h"
50 #include "handle_scope-inl.h"
51 #include "thread_list.h"
52 #include "throw_location.h"
53 #include "utf.h"
54 #include "verifier/method_verifier-inl.h"
55 #include "well_known_classes.h"
56
57 #ifdef HAVE_ANDROID_OS
58 #include "cutils/properties.h"
59 #endif
60
61 namespace art {
62
63 static const size_t kMaxAllocRecordStackDepth = 16; // Max 255.
64 static const size_t kDefaultNumAllocRecords = 64*1024; // Must be a power of 2. 2BE can hold 64k-1.
65
66 // Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
CappedAllocRecordCount(size_t alloc_record_count)67 static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
68 if (alloc_record_count > 0xffff) {
69 return 0xffff;
70 }
71 return alloc_record_count;
72 }
73
74 class AllocRecordStackTraceElement {
75 public:
AllocRecordStackTraceElement()76 AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {
77 }
78
LineNumber()79 int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
80 mirror::ArtMethod* method = Method();
81 DCHECK(method != nullptr);
82 return method->GetLineNumFromDexPC(DexPc());
83 }
84
Method()85 mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
86 ScopedObjectAccessUnchecked soa(Thread::Current());
87 return soa.DecodeMethod(method_);
88 }
89
SetMethod(mirror::ArtMethod * m)90 void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
91 ScopedObjectAccessUnchecked soa(Thread::Current());
92 method_ = soa.EncodeMethod(m);
93 }
94
DexPc() const95 uint32_t DexPc() const {
96 return dex_pc_;
97 }
98
SetDexPc(uint32_t pc)99 void SetDexPc(uint32_t pc) {
100 dex_pc_ = pc;
101 }
102
103 private:
104 jmethodID method_;
105 uint32_t dex_pc_;
106 };
107
Add(mirror::Class * t)108 jobject Dbg::TypeCache::Add(mirror::Class* t) {
109 ScopedObjectAccessUnchecked soa(Thread::Current());
110 int32_t hash_code = t->IdentityHashCode();
111 auto range = objects_.equal_range(hash_code);
112 for (auto it = range.first; it != range.second; ++it) {
113 if (soa.Decode<mirror::Class*>(it->second) == t) {
114 // Found a matching weak global, return it.
115 return it->second;
116 }
117 }
118 JNIEnv* env = soa.Env();
119 const jobject local_ref = soa.AddLocalReference<jobject>(t);
120 const jobject weak_global = env->NewWeakGlobalRef(local_ref);
121 env->DeleteLocalRef(local_ref);
122 objects_.insert(std::make_pair(hash_code, weak_global));
123 return weak_global;
124 }
125
Clear()126 void Dbg::TypeCache::Clear() {
127 JavaVMExt* vm = Runtime::Current()->GetJavaVM();
128 Thread* self = Thread::Current();
129 for (const auto& p : objects_) {
130 vm->DeleteWeakGlobalRef(self, p.second);
131 }
132 objects_.clear();
133 }
134
135 class AllocRecord {
136 public:
AllocRecord()137 AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
138
Type()139 mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
140 return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
141 }
142
SetType(mirror::Class * t)143 void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
144 Locks::alloc_tracker_lock_) {
145 type_ = Dbg::type_cache_.Add(t);
146 }
147
GetDepth()148 size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
149 size_t depth = 0;
150 while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != NULL) {
151 ++depth;
152 }
153 return depth;
154 }
155
ByteCount() const156 size_t ByteCount() const {
157 return byte_count_;
158 }
159
SetByteCount(size_t count)160 void SetByteCount(size_t count) {
161 byte_count_ = count;
162 }
163
ThinLockId() const164 uint16_t ThinLockId() const {
165 return thin_lock_id_;
166 }
167
SetThinLockId(uint16_t id)168 void SetThinLockId(uint16_t id) {
169 thin_lock_id_ = id;
170 }
171
StackElement(size_t index)172 AllocRecordStackTraceElement* StackElement(size_t index) {
173 DCHECK_LT(index, kMaxAllocRecordStackDepth);
174 return &stack_[index];
175 }
176
177 private:
178 jobject type_; // This is a weak global.
179 size_t byte_count_;
180 uint16_t thin_lock_id_;
181 AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth]; // Unused entries have NULL method.
182 };
183
184 class Breakpoint {
185 public:
Breakpoint(mirror::ArtMethod * method,uint32_t dex_pc,bool need_full_deoptimization)186 Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
187 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
188 : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
189 ScopedObjectAccessUnchecked soa(Thread::Current());
190 method_ = soa.EncodeMethod(method);
191 }
192
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)193 Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
194 : method_(nullptr), dex_pc_(other.dex_pc_),
195 need_full_deoptimization_(other.need_full_deoptimization_) {
196 ScopedObjectAccessUnchecked soa(Thread::Current());
197 method_ = soa.EncodeMethod(other.Method());
198 }
199
Method() const200 mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
201 ScopedObjectAccessUnchecked soa(Thread::Current());
202 return soa.DecodeMethod(method_);
203 }
204
DexPc() const205 uint32_t DexPc() const {
206 return dex_pc_;
207 }
208
NeedFullDeoptimization() const209 bool NeedFullDeoptimization() const {
210 return need_full_deoptimization_;
211 }
212
213 private:
214 // The location of this breakpoint.
215 jmethodID method_;
216 uint32_t dex_pc_;
217
218 // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
219 bool need_full_deoptimization_;
220 };
221
operator <<(std::ostream & os,const Breakpoint & rhs)222 static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
223 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
224 os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
225 return os;
226 }
227
228 class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
229 public:
DebugInstrumentationListener()230 DebugInstrumentationListener() {}
~DebugInstrumentationListener()231 virtual ~DebugInstrumentationListener() {}
232
MethodEntered(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t dex_pc)233 void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
234 uint32_t dex_pc)
235 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
236 if (method->IsNative()) {
237 // TODO: post location events is a suspension point and native method entry stubs aren't.
238 return;
239 }
240 Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
241 }
242
MethodExited(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t dex_pc,const JValue & return_value)243 void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
244 uint32_t dex_pc, const JValue& return_value)
245 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
246 if (method->IsNative()) {
247 // TODO: post location events is a suspension point and native method entry stubs aren't.
248 return;
249 }
250 Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value);
251 }
252
MethodUnwind(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t dex_pc)253 void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
254 uint32_t dex_pc)
255 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
256 // We're not recorded to listen to this kind of event, so complain.
257 LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
258 << " " << dex_pc;
259 }
260
DexPcMoved(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t new_dex_pc)261 void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
262 uint32_t new_dex_pc)
263 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
264 Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr);
265 }
266
FieldRead(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t dex_pc,mirror::ArtField * field)267 void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
268 uint32_t dex_pc, mirror::ArtField* field)
269 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
270 Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
271 }
272
FieldWritten(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * method,uint32_t dex_pc,mirror::ArtField * field,const JValue & field_value)273 void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
274 uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
275 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
276 Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
277 }
278
ExceptionCaught(Thread * thread,const ThrowLocation & throw_location,mirror::ArtMethod * catch_method,uint32_t catch_dex_pc,mirror::Throwable * exception_object)279 void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
280 mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
281 mirror::Throwable* exception_object)
282 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
283 Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
284 }
285
286 private:
287 DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
288 } gDebugInstrumentationListener;
289
290 // JDWP is allowed unless the Zygote forbids it.
291 static bool gJdwpAllowed = true;
292
293 // Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
294 static bool gJdwpConfigured = false;
295
296 // Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.)
297 static JDWP::JdwpOptions gJdwpOptions;
298
299 // Runtime JDWP state.
300 static JDWP::JdwpState* gJdwpState = NULL;
301 static bool gDebuggerConnected; // debugger or DDMS is connected.
302 static bool gDebuggerActive; // debugger is making requests.
303 static bool gDisposed; // debugger called VirtualMachine.Dispose, so we should drop the connection.
304
305 static bool gDdmThreadNotification = false;
306
307 // DDMS GC-related settings.
308 static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
309 static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
310 static Dbg::HpsgWhat gDdmHpsgWhat;
311 static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
312 static Dbg::HpsgWhat gDdmNhsgWhat;
313
314 ObjectRegistry* Dbg::gRegistry = nullptr;
315
316 // Recent allocation tracking.
317 AllocRecord* Dbg::recent_allocation_records_ = nullptr; // TODO: CircularBuffer<AllocRecord>
318 size_t Dbg::alloc_record_max_ = 0;
319 size_t Dbg::alloc_record_head_ = 0;
320 size_t Dbg::alloc_record_count_ = 0;
321 Dbg::TypeCache Dbg::type_cache_;
322
323 // Deoptimization support.
324 std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
325 size_t Dbg::full_deoptimization_event_count_ = 0;
326 size_t Dbg::delayed_full_undeoptimization_count_ = 0;
327
328 // Instrumentation event reference counters.
329 size_t Dbg::dex_pc_change_event_ref_count_ = 0;
330 size_t Dbg::method_enter_event_ref_count_ = 0;
331 size_t Dbg::method_exit_event_ref_count_ = 0;
332 size_t Dbg::field_read_event_ref_count_ = 0;
333 size_t Dbg::field_write_event_ref_count_ = 0;
334 size_t Dbg::exception_catch_event_ref_count_ = 0;
335 uint32_t Dbg::instrumentation_events_ = 0;
336
337 // Breakpoints.
338 static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
339
VisitRoots(RootCallback * callback,void * arg,uint32_t tid,RootType root_type)340 void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
341 RootType root_type) {
342 if (receiver != nullptr) {
343 callback(&receiver, arg, tid, root_type);
344 }
345 if (thread != nullptr) {
346 callback(&thread, arg, tid, root_type);
347 }
348 if (klass != nullptr) {
349 callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type);
350 }
351 if (method != nullptr) {
352 callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
353 }
354 }
355
Clear()356 void DebugInvokeReq::Clear() {
357 invoke_needed = false;
358 receiver = nullptr;
359 thread = nullptr;
360 klass = nullptr;
361 method = nullptr;
362 }
363
VisitRoots(RootCallback * callback,void * arg,uint32_t tid,RootType root_type)364 void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
365 RootType root_type) {
366 if (method != nullptr) {
367 callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
368 }
369 }
370
ContainsDexPc(uint32_t dex_pc) const371 bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
372 return dex_pcs.find(dex_pc) == dex_pcs.end();
373 }
374
Clear()375 void SingleStepControl::Clear() {
376 is_active = false;
377 method = nullptr;
378 dex_pcs.clear();
379 }
380
IsBreakpoint(const mirror::ArtMethod * m,uint32_t dex_pc)381 static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
382 LOCKS_EXCLUDED(Locks::breakpoint_lock_)
383 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
384 ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
385 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
386 if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
387 VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
388 return true;
389 }
390 }
391 return false;
392 }
393
IsSuspendedForDebugger(ScopedObjectAccessUnchecked & soa,Thread * thread)394 static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
395 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
396 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
397 // A thread may be suspended for GC; in this code, we really want to know whether
398 // there's a debugger suspension active.
399 return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
400 }
401
DecodeArray(JDWP::RefTypeId id,JDWP::JdwpError & status)402 static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status)
403 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
404 mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id);
405 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
406 status = JDWP::ERR_INVALID_OBJECT;
407 return NULL;
408 }
409 if (!o->IsArrayInstance()) {
410 status = JDWP::ERR_INVALID_ARRAY;
411 return NULL;
412 }
413 status = JDWP::ERR_NONE;
414 return o->AsArray();
415 }
416
DecodeClass(JDWP::RefTypeId id,JDWP::JdwpError & status)417 static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status)
418 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
419 mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id);
420 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
421 status = JDWP::ERR_INVALID_OBJECT;
422 return NULL;
423 }
424 if (!o->IsClass()) {
425 status = JDWP::ERR_INVALID_CLASS;
426 return NULL;
427 }
428 status = JDWP::ERR_NONE;
429 return o->AsClass();
430 }
431
DecodeThread(ScopedObjectAccessUnchecked & soa,JDWP::ObjectId thread_id,Thread * & thread)432 static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, Thread*& thread)
433 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
434 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
435 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
436 mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id);
437 if (thread_peer == NULL || thread_peer == ObjectRegistry::kInvalidObject) {
438 // This isn't even an object.
439 return JDWP::ERR_INVALID_OBJECT;
440 }
441
442 mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
443 if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
444 // This isn't a thread.
445 return JDWP::ERR_INVALID_THREAD;
446 }
447
448 thread = Thread::FromManagedThread(soa, thread_peer);
449 if (thread == NULL) {
450 // This is a java.lang.Thread without a Thread*. Must be a zombie.
451 return JDWP::ERR_THREAD_NOT_ALIVE;
452 }
453 return JDWP::ERR_NONE;
454 }
455
BasicTagFromDescriptor(const char * descriptor)456 static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
457 // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
458 // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
459 return static_cast<JDWP::JdwpTag>(descriptor[0]);
460 }
461
BasicTagFromClass(mirror::Class * klass)462 static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
463 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
464 std::string temp;
465 const char* descriptor = klass->GetDescriptor(&temp);
466 return BasicTagFromDescriptor(descriptor);
467 }
468
TagFromClass(const ScopedObjectAccessUnchecked & soa,mirror::Class * c)469 static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
470 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
471 CHECK(c != NULL);
472 if (c->IsArrayClass()) {
473 return JDWP::JT_ARRAY;
474 }
475 if (c->IsStringClass()) {
476 return JDWP::JT_STRING;
477 }
478 if (c->IsClassClass()) {
479 return JDWP::JT_CLASS_OBJECT;
480 }
481 {
482 mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
483 if (thread_class->IsAssignableFrom(c)) {
484 return JDWP::JT_THREAD;
485 }
486 }
487 {
488 mirror::Class* thread_group_class =
489 soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
490 if (thread_group_class->IsAssignableFrom(c)) {
491 return JDWP::JT_THREAD_GROUP;
492 }
493 }
494 {
495 mirror::Class* class_loader_class =
496 soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
497 if (class_loader_class->IsAssignableFrom(c)) {
498 return JDWP::JT_CLASS_LOADER;
499 }
500 }
501 return JDWP::JT_OBJECT;
502 }
503
504 /*
505 * Objects declared to hold Object might actually hold a more specific
506 * type. The debugger may take a special interest in these (e.g. it
507 * wants to display the contents of Strings), so we want to return an
508 * appropriate tag.
509 *
510 * Null objects are tagged JT_OBJECT.
511 */
TagFromObject(const ScopedObjectAccessUnchecked & soa,mirror::Object * o)512 JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) {
513 return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
514 }
515
IsPrimitiveTag(JDWP::JdwpTag tag)516 static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
517 switch (tag) {
518 case JDWP::JT_BOOLEAN:
519 case JDWP::JT_BYTE:
520 case JDWP::JT_CHAR:
521 case JDWP::JT_FLOAT:
522 case JDWP::JT_DOUBLE:
523 case JDWP::JT_INT:
524 case JDWP::JT_LONG:
525 case JDWP::JT_SHORT:
526 case JDWP::JT_VOID:
527 return true;
528 default:
529 return false;
530 }
531 }
532
533 /*
534 * Handle one of the JDWP name/value pairs.
535 *
536 * JDWP options are:
537 * help: if specified, show help message and bail
538 * transport: may be dt_socket or dt_shmem
539 * address: for dt_socket, "host:port", or just "port" when listening
540 * server: if "y", wait for debugger to attach; if "n", attach to debugger
541 * timeout: how long to wait for debugger to connect / listen
542 *
543 * Useful with server=n (these aren't supported yet):
544 * onthrow=<exception-name>: connect to debugger when exception thrown
545 * onuncaught=y|n: connect to debugger when uncaught exception thrown
546 * launch=<command-line>: launch the debugger itself
547 *
548 * The "transport" option is required, as is "address" if server=n.
549 */
ParseJdwpOption(const std::string & name,const std::string & value)550 static bool ParseJdwpOption(const std::string& name, const std::string& value) {
551 if (name == "transport") {
552 if (value == "dt_socket") {
553 gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
554 } else if (value == "dt_android_adb") {
555 gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
556 } else {
557 LOG(ERROR) << "JDWP transport not supported: " << value;
558 return false;
559 }
560 } else if (name == "server") {
561 if (value == "n") {
562 gJdwpOptions.server = false;
563 } else if (value == "y") {
564 gJdwpOptions.server = true;
565 } else {
566 LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
567 return false;
568 }
569 } else if (name == "suspend") {
570 if (value == "n") {
571 gJdwpOptions.suspend = false;
572 } else if (value == "y") {
573 gJdwpOptions.suspend = true;
574 } else {
575 LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
576 return false;
577 }
578 } else if (name == "address") {
579 /* this is either <port> or <host>:<port> */
580 std::string port_string;
581 gJdwpOptions.host.clear();
582 std::string::size_type colon = value.find(':');
583 if (colon != std::string::npos) {
584 gJdwpOptions.host = value.substr(0, colon);
585 port_string = value.substr(colon + 1);
586 } else {
587 port_string = value;
588 }
589 if (port_string.empty()) {
590 LOG(ERROR) << "JDWP address missing port: " << value;
591 return false;
592 }
593 char* end;
594 uint64_t port = strtoul(port_string.c_str(), &end, 10);
595 if (*end != '\0' || port > 0xffff) {
596 LOG(ERROR) << "JDWP address has junk in port field: " << value;
597 return false;
598 }
599 gJdwpOptions.port = port;
600 } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
601 /* valid but unsupported */
602 LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
603 } else {
604 LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
605 }
606
607 return true;
608 }
609
610 /*
611 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
612 * "transport=dt_socket,address=8000,server=y,suspend=n"
613 */
ParseJdwpOptions(const std::string & options)614 bool Dbg::ParseJdwpOptions(const std::string& options) {
615 VLOG(jdwp) << "ParseJdwpOptions: " << options;
616
617 std::vector<std::string> pairs;
618 Split(options, ',', pairs);
619
620 for (size_t i = 0; i < pairs.size(); ++i) {
621 std::string::size_type equals = pairs[i].find('=');
622 if (equals == std::string::npos) {
623 LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
624 return false;
625 }
626 ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
627 }
628
629 if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
630 LOG(ERROR) << "Must specify JDWP transport: " << options;
631 }
632 if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
633 LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
634 return false;
635 }
636
637 gJdwpConfigured = true;
638 return true;
639 }
640
StartJdwp()641 void Dbg::StartJdwp() {
642 if (!gJdwpAllowed || !IsJdwpConfigured()) {
643 // No JDWP for you!
644 return;
645 }
646
647 CHECK(gRegistry == nullptr);
648 gRegistry = new ObjectRegistry;
649
650 // Init JDWP if the debugger is enabled. This may connect out to a
651 // debugger, passively listen for a debugger, or block waiting for a
652 // debugger.
653 gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
654 if (gJdwpState == NULL) {
655 // We probably failed because some other process has the port already, which means that
656 // if we don't abort the user is likely to think they're talking to us when they're actually
657 // talking to that other process.
658 LOG(FATAL) << "Debugger thread failed to initialize";
659 }
660
661 // If a debugger has already attached, send the "welcome" message.
662 // This may cause us to suspend all threads.
663 if (gJdwpState->IsActive()) {
664 ScopedObjectAccess soa(Thread::Current());
665 if (!gJdwpState->PostVMStart()) {
666 LOG(WARNING) << "Failed to post 'start' message to debugger";
667 }
668 }
669 }
670
StopJdwp()671 void Dbg::StopJdwp() {
672 // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
673 // destruction of gJdwpState).
674 if (gJdwpState != nullptr && gJdwpState->IsActive()) {
675 gJdwpState->PostVMDeath();
676 }
677 // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
678 Disposed();
679 delete gJdwpState;
680 gJdwpState = nullptr;
681 delete gRegistry;
682 gRegistry = nullptr;
683 }
684
GcDidFinish()685 void Dbg::GcDidFinish() {
686 if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
687 ScopedObjectAccess soa(Thread::Current());
688 VLOG(jdwp) << "Sending heap info to DDM";
689 DdmSendHeapInfo(gDdmHpifWhen);
690 }
691 if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
692 ScopedObjectAccess soa(Thread::Current());
693 VLOG(jdwp) << "Dumping heap to DDM";
694 DdmSendHeapSegments(false);
695 }
696 if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
697 ScopedObjectAccess soa(Thread::Current());
698 VLOG(jdwp) << "Dumping native heap to DDM";
699 DdmSendHeapSegments(true);
700 }
701 }
702
SetJdwpAllowed(bool allowed)703 void Dbg::SetJdwpAllowed(bool allowed) {
704 gJdwpAllowed = allowed;
705 }
706
GetInvokeReq()707 DebugInvokeReq* Dbg::GetInvokeReq() {
708 return Thread::Current()->GetInvokeReq();
709 }
710
GetDebugThread()711 Thread* Dbg::GetDebugThread() {
712 return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL;
713 }
714
ClearWaitForEventThread()715 void Dbg::ClearWaitForEventThread() {
716 gJdwpState->ClearWaitForEventThread();
717 }
718
Connected()719 void Dbg::Connected() {
720 CHECK(!gDebuggerConnected);
721 VLOG(jdwp) << "JDWP has attached";
722 gDebuggerConnected = true;
723 gDisposed = false;
724 }
725
Disposed()726 void Dbg::Disposed() {
727 gDisposed = true;
728 }
729
IsDisposed()730 bool Dbg::IsDisposed() {
731 return gDisposed;
732 }
733
GoActive()734 void Dbg::GoActive() {
735 // Enable all debugging features, including scans for breakpoints.
736 // This is a no-op if we're already active.
737 // Only called from the JDWP handler thread.
738 if (gDebuggerActive) {
739 return;
740 }
741
742 {
743 // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
744 ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
745 CHECK_EQ(gBreakpoints.size(), 0U);
746 }
747
748 {
749 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
750 CHECK_EQ(deoptimization_requests_.size(), 0U);
751 CHECK_EQ(full_deoptimization_event_count_, 0U);
752 CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
753 CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
754 CHECK_EQ(method_enter_event_ref_count_, 0U);
755 CHECK_EQ(method_exit_event_ref_count_, 0U);
756 CHECK_EQ(field_read_event_ref_count_, 0U);
757 CHECK_EQ(field_write_event_ref_count_, 0U);
758 CHECK_EQ(exception_catch_event_ref_count_, 0U);
759 }
760
761 Runtime* runtime = Runtime::Current();
762 runtime->GetThreadList()->SuspendAll();
763 Thread* self = Thread::Current();
764 ThreadState old_state = self->SetStateUnsafe(kRunnable);
765 CHECK_NE(old_state, kRunnable);
766 runtime->GetInstrumentation()->EnableDeoptimization();
767 instrumentation_events_ = 0;
768 gDebuggerActive = true;
769 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
770 runtime->GetThreadList()->ResumeAll();
771
772 LOG(INFO) << "Debugger is active";
773 }
774
Disconnected()775 void Dbg::Disconnected() {
776 CHECK(gDebuggerConnected);
777
778 LOG(INFO) << "Debugger is no longer active";
779
780 // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
781 // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
782 // and clear the object registry.
783 Runtime* runtime = Runtime::Current();
784 runtime->GetThreadList()->SuspendAll();
785 Thread* self = Thread::Current();
786 ThreadState old_state = self->SetStateUnsafe(kRunnable);
787
788 // Debugger may not be active at this point.
789 if (gDebuggerActive) {
790 {
791 // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
792 // This prevents us from having any pending deoptimization request when the debugger attaches
793 // to us again while no event has been requested yet.
794 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
795 deoptimization_requests_.clear();
796 full_deoptimization_event_count_ = 0U;
797 delayed_full_undeoptimization_count_ = 0U;
798 }
799 if (instrumentation_events_ != 0) {
800 runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
801 instrumentation_events_);
802 instrumentation_events_ = 0;
803 }
804 runtime->GetInstrumentation()->DisableDeoptimization();
805 gDebuggerActive = false;
806 }
807 gRegistry->Clear();
808 gDebuggerConnected = false;
809 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
810 runtime->GetThreadList()->ResumeAll();
811 }
812
IsDebuggerActive()813 bool Dbg::IsDebuggerActive() {
814 return gDebuggerActive;
815 }
816
IsJdwpConfigured()817 bool Dbg::IsJdwpConfigured() {
818 return gJdwpConfigured;
819 }
820
LastDebuggerActivity()821 int64_t Dbg::LastDebuggerActivity() {
822 return gJdwpState->LastDebuggerActivity();
823 }
824
UndoDebuggerSuspensions()825 void Dbg::UndoDebuggerSuspensions() {
826 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
827 }
828
GetClassName(JDWP::RefTypeId class_id)829 std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
830 mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id);
831 if (o == NULL) {
832 return "NULL";
833 }
834 if (o == ObjectRegistry::kInvalidObject) {
835 return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
836 }
837 if (!o->IsClass()) {
838 return StringPrintf("non-class %p", o); // This is only used for debugging output anyway.
839 }
840 return GetClassName(o->AsClass());
841 }
842
GetClassName(mirror::Class * klass)843 std::string Dbg::GetClassName(mirror::Class* klass) {
844 if (klass == nullptr) {
845 return "NULL";
846 }
847 std::string temp;
848 return DescriptorToName(klass->GetDescriptor(&temp));
849 }
850
GetClassObject(JDWP::RefTypeId id,JDWP::ObjectId & class_object_id)851 JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) {
852 JDWP::JdwpError status;
853 mirror::Class* c = DecodeClass(id, status);
854 if (c == NULL) {
855 return status;
856 }
857 class_object_id = gRegistry->Add(c);
858 return JDWP::ERR_NONE;
859 }
860
GetSuperclass(JDWP::RefTypeId id,JDWP::RefTypeId & superclass_id)861 JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) {
862 JDWP::JdwpError status;
863 mirror::Class* c = DecodeClass(id, status);
864 if (c == NULL) {
865 return status;
866 }
867 if (c->IsInterface()) {
868 // http://code.google.com/p/android/issues/detail?id=20856
869 superclass_id = 0;
870 } else {
871 superclass_id = gRegistry->Add(c->GetSuperClass());
872 }
873 return JDWP::ERR_NONE;
874 }
875
GetClassLoader(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)876 JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
877 mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
878 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
879 return JDWP::ERR_INVALID_OBJECT;
880 }
881 expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
882 return JDWP::ERR_NONE;
883 }
884
GetModifiers(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)885 JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
886 JDWP::JdwpError status;
887 mirror::Class* c = DecodeClass(id, status);
888 if (c == NULL) {
889 return status;
890 }
891
892 uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
893
894 // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
895 // not interfaces.
896 // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
897 if ((access_flags & kAccInterface) == 0) {
898 access_flags |= kAccSuper;
899 }
900
901 expandBufAdd4BE(pReply, access_flags);
902
903 return JDWP::ERR_NONE;
904 }
905
GetMonitorInfo(JDWP::ObjectId object_id,JDWP::ExpandBuf * reply)906 JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
907 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
908 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
909 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
910 return JDWP::ERR_INVALID_OBJECT;
911 }
912
913 // Ensure all threads are suspended while we read objects' lock words.
914 Thread* self = Thread::Current();
915 CHECK_EQ(self->GetState(), kRunnable);
916 self->TransitionFromRunnableToSuspended(kSuspended);
917 Runtime::Current()->GetThreadList()->SuspendAll();
918
919 MonitorInfo monitor_info(o);
920
921 Runtime::Current()->GetThreadList()->ResumeAll();
922 self->TransitionFromSuspendedToRunnable();
923
924 if (monitor_info.owner_ != NULL) {
925 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
926 } else {
927 expandBufAddObjectId(reply, gRegistry->Add(NULL));
928 }
929 expandBufAdd4BE(reply, monitor_info.entry_count_);
930 expandBufAdd4BE(reply, monitor_info.waiters_.size());
931 for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
932 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
933 }
934 return JDWP::ERR_NONE;
935 }
936
GetOwnedMonitors(JDWP::ObjectId thread_id,std::vector<JDWP::ObjectId> & monitors,std::vector<uint32_t> & stack_depths)937 JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
938 std::vector<JDWP::ObjectId>& monitors,
939 std::vector<uint32_t>& stack_depths) {
940 struct OwnedMonitorVisitor : public StackVisitor {
941 OwnedMonitorVisitor(Thread* thread, Context* context,
942 std::vector<JDWP::ObjectId>* monitor_vector,
943 std::vector<uint32_t>* stack_depth_vector)
944 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
945 : StackVisitor(thread, context), current_stack_depth(0),
946 monitors(monitor_vector), stack_depths(stack_depth_vector) {}
947
948 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
949 // annotalysis.
950 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
951 if (!GetMethod()->IsRuntimeMethod()) {
952 Monitor::VisitLocks(this, AppendOwnedMonitors, this);
953 ++current_stack_depth;
954 }
955 return true;
956 }
957
958 static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
959 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
960 OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
961 visitor->monitors->push_back(gRegistry->Add(owned_monitor));
962 visitor->stack_depths->push_back(visitor->current_stack_depth);
963 }
964
965 size_t current_stack_depth;
966 std::vector<JDWP::ObjectId>* monitors;
967 std::vector<uint32_t>* stack_depths;
968 };
969
970 ScopedObjectAccessUnchecked soa(Thread::Current());
971 Thread* thread;
972 {
973 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
974 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
975 if (error != JDWP::ERR_NONE) {
976 return error;
977 }
978 if (!IsSuspendedForDebugger(soa, thread)) {
979 return JDWP::ERR_THREAD_NOT_SUSPENDED;
980 }
981 }
982 std::unique_ptr<Context> context(Context::Create());
983 OwnedMonitorVisitor visitor(thread, context.get(), &monitors, &stack_depths);
984 visitor.WalkStack();
985 return JDWP::ERR_NONE;
986 }
987
GetContendedMonitor(JDWP::ObjectId thread_id,JDWP::ObjectId & contended_monitor)988 JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
989 JDWP::ObjectId& contended_monitor) {
990 mirror::Object* contended_monitor_obj;
991 ScopedObjectAccessUnchecked soa(Thread::Current());
992 {
993 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
994 Thread* thread;
995 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
996 if (error != JDWP::ERR_NONE) {
997 return error;
998 }
999 if (!IsSuspendedForDebugger(soa, thread)) {
1000 return JDWP::ERR_THREAD_NOT_SUSPENDED;
1001 }
1002 contended_monitor_obj = Monitor::GetContendedMonitor(thread);
1003 }
1004 // Add() requires the thread_list_lock_ not held to avoid the lock
1005 // level violation.
1006 contended_monitor = gRegistry->Add(contended_monitor_obj);
1007 return JDWP::ERR_NONE;
1008 }
1009
GetInstanceCounts(const std::vector<JDWP::RefTypeId> & class_ids,std::vector<uint64_t> & counts)1010 JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
1011 std::vector<uint64_t>& counts)
1012 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1013 gc::Heap* heap = Runtime::Current()->GetHeap();
1014 heap->CollectGarbage(false);
1015 std::vector<mirror::Class*> classes;
1016 counts.clear();
1017 for (size_t i = 0; i < class_ids.size(); ++i) {
1018 JDWP::JdwpError status;
1019 mirror::Class* c = DecodeClass(class_ids[i], status);
1020 if (c == NULL) {
1021 return status;
1022 }
1023 classes.push_back(c);
1024 counts.push_back(0);
1025 }
1026 heap->CountInstances(classes, false, &counts[0]);
1027 return JDWP::ERR_NONE;
1028 }
1029
GetInstances(JDWP::RefTypeId class_id,int32_t max_count,std::vector<JDWP::ObjectId> & instances)1030 JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
1031 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1032 gc::Heap* heap = Runtime::Current()->GetHeap();
1033 // We only want reachable instances, so do a GC.
1034 heap->CollectGarbage(false);
1035 JDWP::JdwpError status;
1036 mirror::Class* c = DecodeClass(class_id, status);
1037 if (c == nullptr) {
1038 return status;
1039 }
1040 std::vector<mirror::Object*> raw_instances;
1041 Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
1042 for (size_t i = 0; i < raw_instances.size(); ++i) {
1043 instances.push_back(gRegistry->Add(raw_instances[i]));
1044 }
1045 return JDWP::ERR_NONE;
1046 }
1047
GetReferringObjects(JDWP::ObjectId object_id,int32_t max_count,std::vector<JDWP::ObjectId> & referring_objects)1048 JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
1049 std::vector<JDWP::ObjectId>& referring_objects)
1050 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1051 gc::Heap* heap = Runtime::Current()->GetHeap();
1052 heap->CollectGarbage(false);
1053 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1054 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1055 return JDWP::ERR_INVALID_OBJECT;
1056 }
1057 std::vector<mirror::Object*> raw_instances;
1058 heap->GetReferringObjects(o, max_count, raw_instances);
1059 for (size_t i = 0; i < raw_instances.size(); ++i) {
1060 referring_objects.push_back(gRegistry->Add(raw_instances[i]));
1061 }
1062 return JDWP::ERR_NONE;
1063 }
1064
DisableCollection(JDWP::ObjectId object_id)1065 JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id)
1066 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1067 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1068 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1069 return JDWP::ERR_INVALID_OBJECT;
1070 }
1071 gRegistry->DisableCollection(object_id);
1072 return JDWP::ERR_NONE;
1073 }
1074
EnableCollection(JDWP::ObjectId object_id)1075 JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id)
1076 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1077 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1078 // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
1079 // also ignores these cases and never return an error. However it's not obvious why this command
1080 // should behave differently from DisableCollection and IsCollected commands. So let's be more
1081 // strict and return an error if this happens.
1082 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1083 return JDWP::ERR_INVALID_OBJECT;
1084 }
1085 gRegistry->EnableCollection(object_id);
1086 return JDWP::ERR_NONE;
1087 }
1088
IsCollected(JDWP::ObjectId object_id,bool & is_collected)1089 JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected)
1090 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1091 if (object_id == 0) {
1092 // Null object id is invalid.
1093 return JDWP::ERR_INVALID_OBJECT;
1094 }
1095 // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
1096 // the RI seems to ignore this and assume object has been collected.
1097 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1098 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1099 is_collected = true;
1100 } else {
1101 is_collected = gRegistry->IsCollected(object_id);
1102 }
1103 return JDWP::ERR_NONE;
1104 }
1105
DisposeObject(JDWP::ObjectId object_id,uint32_t reference_count)1106 void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
1107 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1108 gRegistry->DisposeObject(object_id, reference_count);
1109 }
1110
GetTypeTag(mirror::Class * klass)1111 JDWP::JdwpTypeTag Dbg::GetTypeTag(mirror::Class* klass) {
1112 DCHECK(klass != nullptr);
1113 if (klass->IsArrayClass()) {
1114 return JDWP::TT_ARRAY;
1115 } else if (klass->IsInterface()) {
1116 return JDWP::TT_INTERFACE;
1117 } else {
1118 return JDWP::TT_CLASS;
1119 }
1120 }
1121
GetReflectedType(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)1122 JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1123 JDWP::JdwpError status;
1124 mirror::Class* c = DecodeClass(class_id, status);
1125 if (c == NULL) {
1126 return status;
1127 }
1128
1129 JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1130 expandBufAdd1(pReply, type_tag);
1131 expandBufAddRefTypeId(pReply, class_id);
1132 return JDWP::ERR_NONE;
1133 }
1134
GetClassList(std::vector<JDWP::RefTypeId> & classes)1135 void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) {
1136 // Get the complete list of reference classes (i.e. all classes except
1137 // the primitive types).
1138 // Returns a newly-allocated buffer full of RefTypeId values.
1139 struct ClassListCreator {
1140 explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) {
1141 }
1142
1143 static bool Visit(mirror::Class* c, void* arg) {
1144 return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
1145 }
1146
1147 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1148 // annotalysis.
1149 bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
1150 if (!c->IsPrimitive()) {
1151 classes.push_back(gRegistry->AddRefType(c));
1152 }
1153 return true;
1154 }
1155
1156 std::vector<JDWP::RefTypeId>& classes;
1157 };
1158
1159 ClassListCreator clc(classes);
1160 Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
1161 &clc);
1162 }
1163
GetClassInfo(JDWP::RefTypeId class_id,JDWP::JdwpTypeTag * pTypeTag,uint32_t * pStatus,std::string * pDescriptor)1164 JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1165 uint32_t* pStatus, std::string* pDescriptor) {
1166 JDWP::JdwpError status;
1167 mirror::Class* c = DecodeClass(class_id, status);
1168 if (c == NULL) {
1169 return status;
1170 }
1171
1172 if (c->IsArrayClass()) {
1173 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1174 *pTypeTag = JDWP::TT_ARRAY;
1175 } else {
1176 if (c->IsErroneous()) {
1177 *pStatus = JDWP::CS_ERROR;
1178 } else {
1179 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1180 }
1181 *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1182 }
1183
1184 if (pDescriptor != NULL) {
1185 std::string temp;
1186 *pDescriptor = c->GetDescriptor(&temp);
1187 }
1188 return JDWP::ERR_NONE;
1189 }
1190
FindLoadedClassBySignature(const char * descriptor,std::vector<JDWP::RefTypeId> & ids)1191 void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) {
1192 std::vector<mirror::Class*> classes;
1193 Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1194 ids.clear();
1195 for (size_t i = 0; i < classes.size(); ++i) {
1196 ids.push_back(gRegistry->Add(classes[i]));
1197 }
1198 }
1199
GetReferenceType(JDWP::ObjectId object_id,JDWP::ExpandBuf * pReply)1200 JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
1201 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1202 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1203 if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1204 return JDWP::ERR_INVALID_OBJECT;
1205 }
1206
1207 JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1208 JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1209
1210 expandBufAdd1(pReply, type_tag);
1211 expandBufAddRefTypeId(pReply, type_id);
1212
1213 return JDWP::ERR_NONE;
1214 }
1215
GetSignature(JDWP::RefTypeId class_id,std::string * signature)1216 JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1217 JDWP::JdwpError status;
1218 mirror::Class* c = DecodeClass(class_id, status);
1219 if (c == NULL) {
1220 return status;
1221 }
1222 std::string temp;
1223 *signature = c->GetDescriptor(&temp);
1224 return JDWP::ERR_NONE;
1225 }
1226
GetSourceFile(JDWP::RefTypeId class_id,std::string & result)1227 JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) {
1228 JDWP::JdwpError status;
1229 mirror::Class* c = DecodeClass(class_id, status);
1230 if (c == nullptr) {
1231 return status;
1232 }
1233 const char* source_file = c->GetSourceFile();
1234 if (source_file == nullptr) {
1235 return JDWP::ERR_ABSENT_INFORMATION;
1236 }
1237 result = source_file;
1238 return JDWP::ERR_NONE;
1239 }
1240
GetObjectTag(JDWP::ObjectId object_id,uint8_t & tag)1241 JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) {
1242 ScopedObjectAccessUnchecked soa(Thread::Current());
1243 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1244 if (o == ObjectRegistry::kInvalidObject) {
1245 return JDWP::ERR_INVALID_OBJECT;
1246 }
1247 tag = TagFromObject(soa, o);
1248 return JDWP::ERR_NONE;
1249 }
1250
GetTagWidth(JDWP::JdwpTag tag)1251 size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1252 switch (tag) {
1253 case JDWP::JT_VOID:
1254 return 0;
1255 case JDWP::JT_BYTE:
1256 case JDWP::JT_BOOLEAN:
1257 return 1;
1258 case JDWP::JT_CHAR:
1259 case JDWP::JT_SHORT:
1260 return 2;
1261 case JDWP::JT_FLOAT:
1262 case JDWP::JT_INT:
1263 return 4;
1264 case JDWP::JT_ARRAY:
1265 case JDWP::JT_OBJECT:
1266 case JDWP::JT_STRING:
1267 case JDWP::JT_THREAD:
1268 case JDWP::JT_THREAD_GROUP:
1269 case JDWP::JT_CLASS_LOADER:
1270 case JDWP::JT_CLASS_OBJECT:
1271 return sizeof(JDWP::ObjectId);
1272 case JDWP::JT_DOUBLE:
1273 case JDWP::JT_LONG:
1274 return 8;
1275 default:
1276 LOG(FATAL) << "Unknown tag " << tag;
1277 return -1;
1278 }
1279 }
1280
GetArrayLength(JDWP::ObjectId array_id,int & length)1281 JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) {
1282 JDWP::JdwpError status;
1283 mirror::Array* a = DecodeArray(array_id, status);
1284 if (a == NULL) {
1285 return status;
1286 }
1287 length = a->GetLength();
1288 return JDWP::ERR_NONE;
1289 }
1290
OutputArray(JDWP::ObjectId array_id,int offset,int count,JDWP::ExpandBuf * pReply)1291 JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1292 JDWP::JdwpError status;
1293 mirror::Array* a = DecodeArray(array_id, status);
1294 if (a == nullptr) {
1295 return status;
1296 }
1297
1298 if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1299 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1300 return JDWP::ERR_INVALID_LENGTH;
1301 }
1302 JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1303 expandBufAdd1(pReply, element_tag);
1304 expandBufAdd4BE(pReply, count);
1305
1306 if (IsPrimitiveTag(element_tag)) {
1307 size_t width = GetTagWidth(element_tag);
1308 uint8_t* dst = expandBufAddSpace(pReply, count * width);
1309 if (width == 8) {
1310 const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1311 for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1312 } else if (width == 4) {
1313 const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1314 for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1315 } else if (width == 2) {
1316 const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1317 for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1318 } else {
1319 const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1320 memcpy(dst, &src[offset * width], count * width);
1321 }
1322 } else {
1323 ScopedObjectAccessUnchecked soa(Thread::Current());
1324 mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1325 for (int i = 0; i < count; ++i) {
1326 mirror::Object* element = oa->Get(offset + i);
1327 JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1328 : element_tag;
1329 expandBufAdd1(pReply, specific_tag);
1330 expandBufAddObjectId(pReply, gRegistry->Add(element));
1331 }
1332 }
1333
1334 return JDWP::ERR_NONE;
1335 }
1336
1337 template <typename T>
CopyArrayData(mirror::Array * a,JDWP::Request & src,int offset,int count)1338 static void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count)
1339 NO_THREAD_SAFETY_ANALYSIS {
1340 // TODO: fix when annotalysis correctly handles non-member functions.
1341 DCHECK(a->GetClass()->IsPrimitiveArray());
1342
1343 T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1344 for (int i = 0; i < count; ++i) {
1345 *dst++ = src.ReadValue(sizeof(T));
1346 }
1347 }
1348
SetArrayElements(JDWP::ObjectId array_id,int offset,int count,JDWP::Request & request)1349 JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1350 JDWP::Request& request)
1351 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1352 JDWP::JdwpError status;
1353 mirror::Array* dst = DecodeArray(array_id, status);
1354 if (dst == NULL) {
1355 return status;
1356 }
1357
1358 if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1359 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1360 return JDWP::ERR_INVALID_LENGTH;
1361 }
1362 JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1363
1364 if (IsPrimitiveTag(element_tag)) {
1365 size_t width = GetTagWidth(element_tag);
1366 if (width == 8) {
1367 CopyArrayData<uint64_t>(dst, request, offset, count);
1368 } else if (width == 4) {
1369 CopyArrayData<uint32_t>(dst, request, offset, count);
1370 } else if (width == 2) {
1371 CopyArrayData<uint16_t>(dst, request, offset, count);
1372 } else {
1373 CopyArrayData<uint8_t>(dst, request, offset, count);
1374 }
1375 } else {
1376 mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1377 for (int i = 0; i < count; ++i) {
1378 JDWP::ObjectId id = request.ReadObjectId();
1379 mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
1380 if (o == ObjectRegistry::kInvalidObject) {
1381 return JDWP::ERR_INVALID_OBJECT;
1382 }
1383 oa->Set<false>(offset + i, o);
1384 }
1385 }
1386
1387 return JDWP::ERR_NONE;
1388 }
1389
CreateString(const std::string & str)1390 JDWP::ObjectId Dbg::CreateString(const std::string& str) {
1391 return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
1392 }
1393
CreateObject(JDWP::RefTypeId class_id,JDWP::ObjectId & new_object)1394 JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) {
1395 JDWP::JdwpError status;
1396 mirror::Class* c = DecodeClass(class_id, status);
1397 if (c == NULL) {
1398 return status;
1399 }
1400 new_object = gRegistry->Add(c->AllocObject(Thread::Current()));
1401 return JDWP::ERR_NONE;
1402 }
1403
1404 /*
1405 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1406 */
CreateArrayObject(JDWP::RefTypeId array_class_id,uint32_t length,JDWP::ObjectId & new_array)1407 JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1408 JDWP::ObjectId& new_array) {
1409 JDWP::JdwpError status;
1410 mirror::Class* c = DecodeClass(array_class_id, status);
1411 if (c == NULL) {
1412 return status;
1413 }
1414 new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length,
1415 c->GetComponentSize(),
1416 Runtime::Current()->GetHeap()->GetCurrentAllocator()));
1417 return JDWP::ERR_NONE;
1418 }
1419
ToFieldId(const mirror::ArtField * f)1420 JDWP::FieldId Dbg::ToFieldId(const mirror::ArtField* f) {
1421 CHECK(!kMovingFields);
1422 return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1423 }
1424
ToMethodId(const mirror::ArtMethod * m)1425 static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
1426 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1427 CHECK(!kMovingMethods);
1428 return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1429 }
1430
FromFieldId(JDWP::FieldId fid)1431 static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
1432 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1433 CHECK(!kMovingFields);
1434 return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
1435 }
1436
FromMethodId(JDWP::MethodId mid)1437 static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
1438 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1439 CHECK(!kMovingMethods);
1440 return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
1441 }
1442
MatchThread(JDWP::ObjectId expected_thread_id,Thread * event_thread)1443 bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
1444 CHECK(event_thread != nullptr);
1445 mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(expected_thread_id);
1446 return expected_thread_peer == event_thread->GetPeer();
1447 }
1448
MatchLocation(const JDWP::JdwpLocation & expected_location,const JDWP::EventLocation & event_location)1449 bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
1450 const JDWP::EventLocation& event_location) {
1451 if (expected_location.dex_pc != event_location.dex_pc) {
1452 return false;
1453 }
1454 mirror::ArtMethod* m = FromMethodId(expected_location.method_id);
1455 return m == event_location.method;
1456 }
1457
MatchType(mirror::Class * event_class,JDWP::RefTypeId class_id)1458 bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) {
1459 if (event_class == nullptr) {
1460 return false;
1461 }
1462 JDWP::JdwpError status;
1463 mirror::Class* expected_class = DecodeClass(class_id, status);
1464 CHECK(expected_class != nullptr);
1465 return expected_class->IsAssignableFrom(event_class);
1466 }
1467
MatchField(JDWP::RefTypeId expected_type_id,JDWP::FieldId expected_field_id,mirror::ArtField * event_field)1468 bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
1469 mirror::ArtField* event_field) {
1470 mirror::ArtField* expected_field = FromFieldId(expected_field_id);
1471 if (expected_field != event_field) {
1472 return false;
1473 }
1474 return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
1475 }
1476
MatchInstance(JDWP::ObjectId expected_instance_id,mirror::Object * event_instance)1477 bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
1478 mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id);
1479 return modifier_instance == event_instance;
1480 }
1481
SetJdwpLocation(JDWP::JdwpLocation * location,mirror::ArtMethod * m,uint32_t dex_pc)1482 void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
1483 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1484 if (m == nullptr) {
1485 memset(location, 0, sizeof(*location));
1486 } else {
1487 mirror::Class* c = m->GetDeclaringClass();
1488 location->type_tag = GetTypeTag(c);
1489 location->class_id = gRegistry->AddRefType(c);
1490 location->method_id = ToMethodId(m);
1491 location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1492 }
1493 }
1494
GetMethodName(JDWP::MethodId method_id)1495 std::string Dbg::GetMethodName(JDWP::MethodId method_id)
1496 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1497 mirror::ArtMethod* m = FromMethodId(method_id);
1498 if (m == nullptr) {
1499 return "NULL";
1500 }
1501 return m->GetName();
1502 }
1503
GetFieldName(JDWP::FieldId field_id)1504 std::string Dbg::GetFieldName(JDWP::FieldId field_id)
1505 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1506 mirror::ArtField* f = FromFieldId(field_id);
1507 if (f == nullptr) {
1508 return "NULL";
1509 }
1510 return f->GetName();
1511 }
1512
1513 /*
1514 * Augment the access flags for synthetic methods and fields by setting
1515 * the (as described by the spec) "0xf0000000 bit". Also, strip out any
1516 * flags not specified by the Java programming language.
1517 */
MangleAccessFlags(uint32_t accessFlags)1518 static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1519 accessFlags &= kAccJavaFlagsMask;
1520 if ((accessFlags & kAccSynthetic) != 0) {
1521 accessFlags |= 0xf0000000;
1522 }
1523 return accessFlags;
1524 }
1525
1526 /*
1527 * Circularly shifts registers so that arguments come first. Debuggers
1528 * expect slots to begin with arguments, but dex code places them at
1529 * the end.
1530 */
MangleSlot(uint16_t slot,mirror::ArtMethod * m)1531 static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
1532 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1533 const DexFile::CodeItem* code_item = m->GetCodeItem();
1534 if (code_item == nullptr) {
1535 // We should not get here for a method without code (native, proxy or abstract). Log it and
1536 // return the slot as is since all registers are arguments.
1537 LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1538 return slot;
1539 }
1540 uint16_t ins_size = code_item->ins_size_;
1541 uint16_t locals_size = code_item->registers_size_ - ins_size;
1542 if (slot >= locals_size) {
1543 return slot - locals_size;
1544 } else {
1545 return slot + ins_size;
1546 }
1547 }
1548
1549 /*
1550 * Circularly shifts registers so that arguments come last. Reverts
1551 * slots to dex style argument placement.
1552 */
DemangleSlot(uint16_t slot,mirror::ArtMethod * m)1553 static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
1554 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1555 const DexFile::CodeItem* code_item = m->GetCodeItem();
1556 if (code_item == nullptr) {
1557 // We should not get here for a method without code (native, proxy or abstract). Log it and
1558 // return the slot as is since all registers are arguments.
1559 LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1560 return slot;
1561 }
1562 uint16_t ins_size = code_item->ins_size_;
1563 uint16_t locals_size = code_item->registers_size_ - ins_size;
1564 if (slot < ins_size) {
1565 return slot + locals_size;
1566 } else {
1567 return slot - ins_size;
1568 }
1569 }
1570
OutputDeclaredFields(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1571 JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1572 JDWP::JdwpError status;
1573 mirror::Class* c = DecodeClass(class_id, status);
1574 if (c == NULL) {
1575 return status;
1576 }
1577
1578 size_t instance_field_count = c->NumInstanceFields();
1579 size_t static_field_count = c->NumStaticFields();
1580
1581 expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1582
1583 for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1584 mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
1585 expandBufAddFieldId(pReply, ToFieldId(f));
1586 expandBufAddUtf8String(pReply, f->GetName());
1587 expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1588 if (with_generic) {
1589 static const char genericSignature[1] = "";
1590 expandBufAddUtf8String(pReply, genericSignature);
1591 }
1592 expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1593 }
1594 return JDWP::ERR_NONE;
1595 }
1596
OutputDeclaredMethods(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1597 JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1598 JDWP::ExpandBuf* pReply) {
1599 JDWP::JdwpError status;
1600 mirror::Class* c = DecodeClass(class_id, status);
1601 if (c == NULL) {
1602 return status;
1603 }
1604
1605 size_t direct_method_count = c->NumDirectMethods();
1606 size_t virtual_method_count = c->NumVirtualMethods();
1607
1608 expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1609
1610 for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1611 mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
1612 expandBufAddMethodId(pReply, ToMethodId(m));
1613 expandBufAddUtf8String(pReply, m->GetName());
1614 expandBufAddUtf8String(pReply, m->GetSignature().ToString());
1615 if (with_generic) {
1616 static const char genericSignature[1] = "";
1617 expandBufAddUtf8String(pReply, genericSignature);
1618 }
1619 expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1620 }
1621 return JDWP::ERR_NONE;
1622 }
1623
OutputDeclaredInterfaces(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)1624 JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1625 JDWP::JdwpError status;
1626 Thread* self = Thread::Current();
1627 StackHandleScope<1> hs(self);
1628 Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, status)));
1629 if (c.Get() == nullptr) {
1630 return status;
1631 }
1632 size_t interface_count = c->NumDirectInterfaces();
1633 expandBufAdd4BE(pReply, interface_count);
1634 for (size_t i = 0; i < interface_count; ++i) {
1635 expandBufAddRefTypeId(pReply,
1636 gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1637 }
1638 return JDWP::ERR_NONE;
1639 }
1640
OutputLineTable(JDWP::RefTypeId,JDWP::MethodId method_id,JDWP::ExpandBuf * pReply)1641 void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply)
1642 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1643 struct DebugCallbackContext {
1644 int numItems;
1645 JDWP::ExpandBuf* pReply;
1646
1647 static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1648 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1649 expandBufAdd8BE(pContext->pReply, address);
1650 expandBufAdd4BE(pContext->pReply, line_number);
1651 pContext->numItems++;
1652 return false;
1653 }
1654 };
1655 mirror::ArtMethod* m = FromMethodId(method_id);
1656 const DexFile::CodeItem* code_item = m->GetCodeItem();
1657 uint64_t start, end;
1658 if (code_item == nullptr) {
1659 DCHECK(m->IsNative() || m->IsProxyMethod());
1660 start = -1;
1661 end = -1;
1662 } else {
1663 start = 0;
1664 // Return the index of the last instruction
1665 end = code_item->insns_size_in_code_units_ - 1;
1666 }
1667
1668 expandBufAdd8BE(pReply, start);
1669 expandBufAdd8BE(pReply, end);
1670
1671 // Add numLines later
1672 size_t numLinesOffset = expandBufGetLength(pReply);
1673 expandBufAdd4BE(pReply, 0);
1674
1675 DebugCallbackContext context;
1676 context.numItems = 0;
1677 context.pReply = pReply;
1678
1679 if (code_item != nullptr) {
1680 m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
1681 DebugCallbackContext::Callback, NULL, &context);
1682 }
1683
1684 JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1685 }
1686
OutputVariableTable(JDWP::RefTypeId,JDWP::MethodId method_id,bool with_generic,JDWP::ExpandBuf * pReply)1687 void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1688 JDWP::ExpandBuf* pReply) {
1689 struct DebugCallbackContext {
1690 mirror::ArtMethod* method;
1691 JDWP::ExpandBuf* pReply;
1692 size_t variable_count;
1693 bool with_generic;
1694
1695 static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
1696 const char* name, const char* descriptor, const char* signature)
1697 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1698 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1699
1700 VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1701 pContext->variable_count, startAddress, endAddress - startAddress,
1702 name, descriptor, signature, slot,
1703 MangleSlot(slot, pContext->method));
1704
1705 slot = MangleSlot(slot, pContext->method);
1706
1707 expandBufAdd8BE(pContext->pReply, startAddress);
1708 expandBufAddUtf8String(pContext->pReply, name);
1709 expandBufAddUtf8String(pContext->pReply, descriptor);
1710 if (pContext->with_generic) {
1711 expandBufAddUtf8String(pContext->pReply, signature);
1712 }
1713 expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1714 expandBufAdd4BE(pContext->pReply, slot);
1715
1716 ++pContext->variable_count;
1717 }
1718 };
1719 mirror::ArtMethod* m = FromMethodId(method_id);
1720
1721 // arg_count considers doubles and longs to take 2 units.
1722 // variable_count considers everything to take 1 unit.
1723 std::string shorty(m->GetShorty());
1724 expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
1725
1726 // We don't know the total number of variables yet, so leave a blank and update it later.
1727 size_t variable_count_offset = expandBufGetLength(pReply);
1728 expandBufAdd4BE(pReply, 0);
1729
1730 DebugCallbackContext context;
1731 context.method = m;
1732 context.pReply = pReply;
1733 context.variable_count = 0;
1734 context.with_generic = with_generic;
1735
1736 const DexFile::CodeItem* code_item = m->GetCodeItem();
1737 if (code_item != nullptr) {
1738 m->GetDexFile()->DecodeDebugInfo(
1739 code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL, DebugCallbackContext::Callback,
1740 &context);
1741 }
1742
1743 JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1744 }
1745
OutputMethodReturnValue(JDWP::MethodId method_id,const JValue * return_value,JDWP::ExpandBuf * pReply)1746 void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1747 JDWP::ExpandBuf* pReply) {
1748 mirror::ArtMethod* m = FromMethodId(method_id);
1749 JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1750 OutputJValue(tag, return_value, pReply);
1751 }
1752
OutputFieldValue(JDWP::FieldId field_id,const JValue * field_value,JDWP::ExpandBuf * pReply)1753 void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1754 JDWP::ExpandBuf* pReply) {
1755 mirror::ArtField* f = FromFieldId(field_id);
1756 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1757 OutputJValue(tag, field_value, pReply);
1758 }
1759
GetBytecodes(JDWP::RefTypeId,JDWP::MethodId method_id,std::vector<uint8_t> & bytecodes)1760 JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1761 std::vector<uint8_t>& bytecodes)
1762 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1763 mirror::ArtMethod* m = FromMethodId(method_id);
1764 if (m == NULL) {
1765 return JDWP::ERR_INVALID_METHODID;
1766 }
1767 const DexFile::CodeItem* code_item = m->GetCodeItem();
1768 size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1769 const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1770 const uint8_t* end = begin + byte_count;
1771 for (const uint8_t* p = begin; p != end; ++p) {
1772 bytecodes.push_back(*p);
1773 }
1774 return JDWP::ERR_NONE;
1775 }
1776
GetFieldBasicTag(JDWP::FieldId field_id)1777 JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1778 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1779 }
1780
GetStaticFieldBasicTag(JDWP::FieldId field_id)1781 JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1782 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1783 }
1784
GetFieldValueImpl(JDWP::RefTypeId ref_type_id,JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply,bool is_static)1785 static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1786 JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1787 bool is_static)
1788 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1789 JDWP::JdwpError status;
1790 mirror::Class* c = DecodeClass(ref_type_id, status);
1791 if (ref_type_id != 0 && c == NULL) {
1792 return status;
1793 }
1794
1795 mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id);
1796 if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1797 return JDWP::ERR_INVALID_OBJECT;
1798 }
1799 mirror::ArtField* f = FromFieldId(field_id);
1800
1801 mirror::Class* receiver_class = c;
1802 if (receiver_class == NULL && o != NULL) {
1803 receiver_class = o->GetClass();
1804 }
1805 // TODO: should we give up now if receiver_class is NULL?
1806 if (receiver_class != NULL && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1807 LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1808 return JDWP::ERR_INVALID_FIELDID;
1809 }
1810
1811 // The RI only enforces the static/non-static mismatch in one direction.
1812 // TODO: should we change the tests and check both?
1813 if (is_static) {
1814 if (!f->IsStatic()) {
1815 return JDWP::ERR_INVALID_FIELDID;
1816 }
1817 } else {
1818 if (f->IsStatic()) {
1819 LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1820 }
1821 }
1822 if (f->IsStatic()) {
1823 o = f->GetDeclaringClass();
1824 }
1825
1826 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1827 JValue field_value;
1828 if (tag == JDWP::JT_VOID) {
1829 LOG(FATAL) << "Unknown tag: " << tag;
1830 } else if (!IsPrimitiveTag(tag)) {
1831 field_value.SetL(f->GetObject(o));
1832 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1833 field_value.SetJ(f->Get64(o));
1834 } else {
1835 field_value.SetI(f->Get32(o));
1836 }
1837 Dbg::OutputJValue(tag, &field_value, pReply);
1838
1839 return JDWP::ERR_NONE;
1840 }
1841
GetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1842 JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1843 JDWP::ExpandBuf* pReply) {
1844 return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1845 }
1846
GetStaticFieldValue(JDWP::RefTypeId ref_type_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1847 JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) {
1848 return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1849 }
1850
SetFieldValueImpl(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width,bool is_static)1851 static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1852 uint64_t value, int width, bool is_static)
1853 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1854 mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id);
1855 if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1856 return JDWP::ERR_INVALID_OBJECT;
1857 }
1858 mirror::ArtField* f = FromFieldId(field_id);
1859
1860 // The RI only enforces the static/non-static mismatch in one direction.
1861 // TODO: should we change the tests and check both?
1862 if (is_static) {
1863 if (!f->IsStatic()) {
1864 return JDWP::ERR_INVALID_FIELDID;
1865 }
1866 } else {
1867 if (f->IsStatic()) {
1868 LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1869 }
1870 }
1871 if (f->IsStatic()) {
1872 o = f->GetDeclaringClass();
1873 }
1874
1875 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1876
1877 if (IsPrimitiveTag(tag)) {
1878 if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1879 CHECK_EQ(width, 8);
1880 // Debugging can't use transactional mode (runtime only).
1881 f->Set64<false>(o, value);
1882 } else {
1883 CHECK_LE(width, 4);
1884 // Debugging can't use transactional mode (runtime only).
1885 f->Set32<false>(o, value);
1886 }
1887 } else {
1888 mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value);
1889 if (v == ObjectRegistry::kInvalidObject) {
1890 return JDWP::ERR_INVALID_OBJECT;
1891 }
1892 if (v != NULL) {
1893 mirror::Class* field_type;
1894 {
1895 StackHandleScope<3> hs(Thread::Current());
1896 HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1897 HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
1898 HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1899 field_type = FieldHelper(h_f).GetType();
1900 }
1901 if (!field_type->IsAssignableFrom(v->GetClass())) {
1902 return JDWP::ERR_INVALID_OBJECT;
1903 }
1904 }
1905 // Debugging can't use transactional mode (runtime only).
1906 f->SetObject<false>(o, v);
1907 }
1908
1909 return JDWP::ERR_NONE;
1910 }
1911
SetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width)1912 JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1913 int width) {
1914 return SetFieldValueImpl(object_id, field_id, value, width, false);
1915 }
1916
SetStaticFieldValue(JDWP::FieldId field_id,uint64_t value,int width)1917 JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1918 return SetFieldValueImpl(0, field_id, value, width, true);
1919 }
1920
StringToUtf8(JDWP::ObjectId string_id,std::string * str)1921 JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
1922 mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id);
1923 if (obj == nullptr || obj == ObjectRegistry::kInvalidObject) {
1924 return JDWP::ERR_INVALID_OBJECT;
1925 }
1926 {
1927 ScopedObjectAccessUnchecked soa(Thread::Current());
1928 mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
1929 if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
1930 // This isn't a string.
1931 return JDWP::ERR_INVALID_STRING;
1932 }
1933 }
1934 *str = obj->AsString()->ToModifiedUtf8();
1935 return JDWP::ERR_NONE;
1936 }
1937
OutputJValue(JDWP::JdwpTag tag,const JValue * return_value,JDWP::ExpandBuf * pReply)1938 void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1939 if (IsPrimitiveTag(tag)) {
1940 expandBufAdd1(pReply, tag);
1941 if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1942 expandBufAdd1(pReply, return_value->GetI());
1943 } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1944 expandBufAdd2BE(pReply, return_value->GetI());
1945 } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1946 expandBufAdd4BE(pReply, return_value->GetI());
1947 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1948 expandBufAdd8BE(pReply, return_value->GetJ());
1949 } else {
1950 CHECK_EQ(tag, JDWP::JT_VOID);
1951 }
1952 } else {
1953 ScopedObjectAccessUnchecked soa(Thread::Current());
1954 mirror::Object* value = return_value->GetL();
1955 expandBufAdd1(pReply, TagFromObject(soa, value));
1956 expandBufAddObjectId(pReply, gRegistry->Add(value));
1957 }
1958 }
1959
GetThreadName(JDWP::ObjectId thread_id,std::string & name)1960 JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) {
1961 ScopedObjectAccessUnchecked soa(Thread::Current());
1962 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1963 Thread* thread;
1964 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1965 if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1966 return error;
1967 }
1968
1969 // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1970 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1971 mirror::ArtField* java_lang_Thread_name_field =
1972 soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1973 mirror::String* s =
1974 reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1975 if (s != NULL) {
1976 name = s->ToModifiedUtf8();
1977 }
1978 return JDWP::ERR_NONE;
1979 }
1980
GetThreadGroup(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)1981 JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1982 ScopedObjectAccessUnchecked soa(Thread::Current());
1983 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1984 if (thread_object == ObjectRegistry::kInvalidObject) {
1985 return JDWP::ERR_INVALID_OBJECT;
1986 }
1987 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup");
1988 // Okay, so it's an object, but is it actually a thread?
1989 JDWP::JdwpError error;
1990 {
1991 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1992 Thread* thread;
1993 error = DecodeThread(soa, thread_id, thread);
1994 }
1995 if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1996 // Zombie threads are in the null group.
1997 expandBufAddObjectId(pReply, JDWP::ObjectId(0));
1998 error = JDWP::ERR_NONE;
1999 } else if (error == JDWP::ERR_NONE) {
2000 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
2001 CHECK(c != nullptr);
2002 mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
2003 CHECK(f != nullptr);
2004 mirror::Object* group = f->GetObject(thread_object);
2005 CHECK(group != nullptr);
2006 JDWP::ObjectId thread_group_id = gRegistry->Add(group);
2007 expandBufAddObjectId(pReply, thread_group_id);
2008 }
2009 soa.Self()->EndAssertNoThreadSuspension(old_cause);
2010 return error;
2011 }
2012
DecodeThreadGroup(ScopedObjectAccessUnchecked & soa,JDWP::ObjectId thread_group_id,JDWP::JdwpError * error)2013 static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
2014 JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
2015 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2016 mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id);
2017 if (thread_group == nullptr || thread_group == ObjectRegistry::kInvalidObject) {
2018 *error = JDWP::ERR_INVALID_OBJECT;
2019 return nullptr;
2020 }
2021 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2022 CHECK(c != nullptr);
2023 if (!c->IsAssignableFrom(thread_group->GetClass())) {
2024 // This is not a java.lang.ThreadGroup.
2025 *error = JDWP::ERR_INVALID_THREAD_GROUP;
2026 return nullptr;
2027 }
2028 *error = JDWP::ERR_NONE;
2029 return thread_group;
2030 }
2031
GetThreadGroupName(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2032 JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2033 ScopedObjectAccessUnchecked soa(Thread::Current());
2034 JDWP::JdwpError error;
2035 mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2036 if (error != JDWP::ERR_NONE) {
2037 return error;
2038 }
2039 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
2040 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2041 mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
2042 CHECK(f != NULL);
2043 mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
2044 soa.Self()->EndAssertNoThreadSuspension(old_cause);
2045
2046 std::string thread_group_name(s->ToModifiedUtf8());
2047 expandBufAddUtf8String(pReply, thread_group_name);
2048 return JDWP::ERR_NONE;
2049 }
2050
GetThreadGroupParent(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2051 JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2052 ScopedObjectAccessUnchecked soa(Thread::Current());
2053 JDWP::JdwpError error;
2054 mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2055 if (error != JDWP::ERR_NONE) {
2056 return error;
2057 }
2058 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
2059 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2060 CHECK(c != nullptr);
2061 mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
2062 CHECK(f != NULL);
2063 mirror::Object* parent = f->GetObject(thread_group);
2064 soa.Self()->EndAssertNoThreadSuspension(old_cause);
2065
2066 JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
2067 expandBufAddObjectId(pReply, parent_group_id);
2068 return JDWP::ERR_NONE;
2069 }
2070
GetChildThreadGroups(ScopedObjectAccessUnchecked & soa,mirror::Object * thread_group,std::vector<JDWP::ObjectId> * child_thread_group_ids)2071 static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
2072 std::vector<JDWP::ObjectId>* child_thread_group_ids)
2073 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2074 CHECK(thread_group != nullptr);
2075
2076 // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
2077 mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
2078 mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
2079
2080 // Get the array and size out of the ArrayList<ThreadGroup>...
2081 mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
2082 mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
2083 mirror::ObjectArray<mirror::Object>* groups_array =
2084 array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
2085 const int32_t size = size_field->GetInt(groups_array_list);
2086
2087 // Copy the first 'size' elements out of the array into the result.
2088 ObjectRegistry* registry = Dbg::GetObjectRegistry();
2089 for (int32_t i = 0; i < size; ++i) {
2090 child_thread_group_ids->push_back(registry->Add(groups_array->Get(i)));
2091 }
2092 }
2093
GetThreadGroupChildren(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2094 JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
2095 JDWP::ExpandBuf* pReply) {
2096 ScopedObjectAccessUnchecked soa(Thread::Current());
2097 JDWP::JdwpError error;
2098 mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2099 if (error != JDWP::ERR_NONE) {
2100 return error;
2101 }
2102
2103 // Add child threads.
2104 {
2105 std::vector<JDWP::ObjectId> child_thread_ids;
2106 GetThreads(thread_group, &child_thread_ids);
2107 expandBufAdd4BE(pReply, child_thread_ids.size());
2108 for (JDWP::ObjectId child_thread_id : child_thread_ids) {
2109 expandBufAddObjectId(pReply, child_thread_id);
2110 }
2111 }
2112
2113 // Add child thread groups.
2114 {
2115 std::vector<JDWP::ObjectId> child_thread_groups_ids;
2116 GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
2117 expandBufAdd4BE(pReply, child_thread_groups_ids.size());
2118 for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
2119 expandBufAddObjectId(pReply, child_thread_group_id);
2120 }
2121 }
2122
2123 return JDWP::ERR_NONE;
2124 }
2125
GetSystemThreadGroupId()2126 JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
2127 ScopedObjectAccessUnchecked soa(Thread::Current());
2128 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
2129 mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2130 return gRegistry->Add(group);
2131 }
2132
ToJdwpThreadStatus(ThreadState state)2133 JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2134 switch (state) {
2135 case kBlocked:
2136 return JDWP::TS_MONITOR;
2137 case kNative:
2138 case kRunnable:
2139 case kSuspended:
2140 return JDWP::TS_RUNNING;
2141 case kSleeping:
2142 return JDWP::TS_SLEEPING;
2143 case kStarting:
2144 case kTerminated:
2145 return JDWP::TS_ZOMBIE;
2146 case kTimedWaiting:
2147 case kWaitingForCheckPointsToRun:
2148 case kWaitingForDebuggerSend:
2149 case kWaitingForDebuggerSuspension:
2150 case kWaitingForDebuggerToAttach:
2151 case kWaitingForDeoptimization:
2152 case kWaitingForGcToComplete:
2153 case kWaitingForJniOnLoad:
2154 case kWaitingForMethodTracingStart:
2155 case kWaitingForSignalCatcherOutput:
2156 case kWaitingInMainDebuggerLoop:
2157 case kWaitingInMainSignalCatcherLoop:
2158 case kWaitingPerformingGc:
2159 case kWaiting:
2160 return JDWP::TS_WAIT;
2161 // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2162 }
2163 LOG(FATAL) << "Unknown thread state: " << state;
2164 return JDWP::TS_ZOMBIE;
2165 }
2166
GetThreadStatus(JDWP::ObjectId thread_id,JDWP::JdwpThreadStatus * pThreadStatus,JDWP::JdwpSuspendStatus * pSuspendStatus)2167 JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2168 JDWP::JdwpSuspendStatus* pSuspendStatus) {
2169 ScopedObjectAccess soa(Thread::Current());
2170
2171 *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2172
2173 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2174 Thread* thread;
2175 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2176 if (error != JDWP::ERR_NONE) {
2177 if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2178 *pThreadStatus = JDWP::TS_ZOMBIE;
2179 return JDWP::ERR_NONE;
2180 }
2181 return error;
2182 }
2183
2184 if (IsSuspendedForDebugger(soa, thread)) {
2185 *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2186 }
2187
2188 *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2189 return JDWP::ERR_NONE;
2190 }
2191
GetThreadDebugSuspendCount(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)2192 JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2193 ScopedObjectAccess soa(Thread::Current());
2194 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2195 Thread* thread;
2196 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2197 if (error != JDWP::ERR_NONE) {
2198 return error;
2199 }
2200 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2201 expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2202 return JDWP::ERR_NONE;
2203 }
2204
Interrupt(JDWP::ObjectId thread_id)2205 JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2206 ScopedObjectAccess soa(Thread::Current());
2207 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2208 Thread* thread;
2209 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2210 if (error != JDWP::ERR_NONE) {
2211 return error;
2212 }
2213 thread->Interrupt(soa.Self());
2214 return JDWP::ERR_NONE;
2215 }
2216
IsInDesiredThreadGroup(ScopedObjectAccessUnchecked & soa,mirror::Object * desired_thread_group,mirror::Object * peer)2217 static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2218 mirror::Object* desired_thread_group, mirror::Object* peer)
2219 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2220 // Do we want threads from all thread groups?
2221 if (desired_thread_group == nullptr) {
2222 return true;
2223 }
2224 mirror::ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2225 DCHECK(thread_group_field != nullptr);
2226 mirror::Object* group = thread_group_field->GetObject(peer);
2227 return (group == desired_thread_group);
2228 }
2229
GetThreads(mirror::Object * thread_group,std::vector<JDWP::ObjectId> * thread_ids)2230 void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
2231 ScopedObjectAccessUnchecked soa(Thread::Current());
2232 std::list<Thread*> all_threads_list;
2233 {
2234 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2235 all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2236 }
2237 for (Thread* t : all_threads_list) {
2238 if (t == Dbg::GetDebugThread()) {
2239 // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2240 // query all threads, so it's easier if we just don't tell them about this thread.
2241 continue;
2242 }
2243 if (t->IsStillStarting()) {
2244 // This thread is being started (and has been registered in the thread list). However, it is
2245 // not completely started yet so we must ignore it.
2246 continue;
2247 }
2248 mirror::Object* peer = t->GetPeer();
2249 if (peer == nullptr) {
2250 // peer might be NULL if the thread is still starting up. We can't tell the debugger about
2251 // this thread yet.
2252 // TODO: if we identified threads to the debugger by their Thread*
2253 // rather than their peer's mirror::Object*, we could fix this.
2254 // Doing so might help us report ZOMBIE threads too.
2255 continue;
2256 }
2257 if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2258 thread_ids->push_back(gRegistry->Add(peer));
2259 }
2260 }
2261 }
2262
GetStackDepth(Thread * thread)2263 static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2264 struct CountStackDepthVisitor : public StackVisitor {
2265 explicit CountStackDepthVisitor(Thread* thread)
2266 : StackVisitor(thread, NULL), depth(0) {}
2267
2268 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2269 // annotalysis.
2270 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2271 if (!GetMethod()->IsRuntimeMethod()) {
2272 ++depth;
2273 }
2274 return true;
2275 }
2276 size_t depth;
2277 };
2278
2279 CountStackDepthVisitor visitor(thread);
2280 visitor.WalkStack();
2281 return visitor.depth;
2282 }
2283
GetThreadFrameCount(JDWP::ObjectId thread_id,size_t & result)2284 JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result) {
2285 ScopedObjectAccess soa(Thread::Current());
2286 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2287 Thread* thread;
2288 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2289 if (error != JDWP::ERR_NONE) {
2290 return error;
2291 }
2292 if (!IsSuspendedForDebugger(soa, thread)) {
2293 return JDWP::ERR_THREAD_NOT_SUSPENDED;
2294 }
2295 result = GetStackDepth(thread);
2296 return JDWP::ERR_NONE;
2297 }
2298
GetThreadFrames(JDWP::ObjectId thread_id,size_t start_frame,size_t frame_count,JDWP::ExpandBuf * buf)2299 JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2300 size_t frame_count, JDWP::ExpandBuf* buf) {
2301 class GetFrameVisitor : public StackVisitor {
2302 public:
2303 GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
2304 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2305 : StackVisitor(thread, NULL), depth_(0),
2306 start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
2307 expandBufAdd4BE(buf_, frame_count_);
2308 }
2309
2310 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2311 // annotalysis.
2312 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2313 if (GetMethod()->IsRuntimeMethod()) {
2314 return true; // The debugger can't do anything useful with a frame that has no Method*.
2315 }
2316 if (depth_ >= start_frame_ + frame_count_) {
2317 return false;
2318 }
2319 if (depth_ >= start_frame_) {
2320 JDWP::FrameId frame_id(GetFrameId());
2321 JDWP::JdwpLocation location;
2322 SetJdwpLocation(&location, GetMethod(), GetDexPc());
2323 VLOG(jdwp) << StringPrintf(" Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2324 expandBufAdd8BE(buf_, frame_id);
2325 expandBufAddLocation(buf_, location);
2326 }
2327 ++depth_;
2328 return true;
2329 }
2330
2331 private:
2332 size_t depth_;
2333 const size_t start_frame_;
2334 const size_t frame_count_;
2335 JDWP::ExpandBuf* buf_;
2336 };
2337
2338 ScopedObjectAccessUnchecked soa(Thread::Current());
2339 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2340 Thread* thread;
2341 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2342 if (error != JDWP::ERR_NONE) {
2343 return error;
2344 }
2345 if (!IsSuspendedForDebugger(soa, thread)) {
2346 return JDWP::ERR_THREAD_NOT_SUSPENDED;
2347 }
2348 GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2349 visitor.WalkStack();
2350 return JDWP::ERR_NONE;
2351 }
2352
GetThreadSelfId()2353 JDWP::ObjectId Dbg::GetThreadSelfId() {
2354 return GetThreadId(Thread::Current());
2355 }
2356
GetThreadId(Thread * thread)2357 JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
2358 ScopedObjectAccessUnchecked soa(Thread::Current());
2359 return gRegistry->Add(thread->GetPeer());
2360 }
2361
SuspendVM()2362 void Dbg::SuspendVM() {
2363 Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2364 }
2365
ResumeVM()2366 void Dbg::ResumeVM() {
2367 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
2368 }
2369
SuspendThread(JDWP::ObjectId thread_id,bool request_suspension)2370 JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2371 Thread* self = Thread::Current();
2372 ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL);
2373 {
2374 ScopedObjectAccess soa(self);
2375 peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
2376 }
2377 if (peer.get() == NULL) {
2378 return JDWP::ERR_THREAD_NOT_ALIVE;
2379 }
2380 // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
2381 // trying to suspend this one.
2382 MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
2383 bool timed_out;
2384 ThreadList* thread_list = Runtime::Current()->GetThreadList();
2385 Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2386 &timed_out);
2387 if (thread != NULL) {
2388 return JDWP::ERR_NONE;
2389 } else if (timed_out) {
2390 return JDWP::ERR_INTERNAL;
2391 } else {
2392 return JDWP::ERR_THREAD_NOT_ALIVE;
2393 }
2394 }
2395
ResumeThread(JDWP::ObjectId thread_id)2396 void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2397 ScopedObjectAccessUnchecked soa(Thread::Current());
2398 mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id);
2399 Thread* thread;
2400 {
2401 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2402 thread = Thread::FromManagedThread(soa, peer);
2403 }
2404 if (thread == NULL) {
2405 LOG(WARNING) << "No such thread for resume: " << peer;
2406 return;
2407 }
2408 bool needs_resume;
2409 {
2410 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2411 needs_resume = thread->GetSuspendCount() > 0;
2412 }
2413 if (needs_resume) {
2414 Runtime::Current()->GetThreadList()->Resume(thread, true);
2415 }
2416 }
2417
SuspendSelf()2418 void Dbg::SuspendSelf() {
2419 Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2420 }
2421
2422 struct GetThisVisitor : public StackVisitor {
GetThisVisitorart::GetThisVisitor2423 GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2424 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2425 : StackVisitor(thread, context), this_object(NULL), frame_id(frame_id) {}
2426
2427 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2428 // annotalysis.
VisitFrameart::GetThisVisitor2429 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2430 if (frame_id != GetFrameId()) {
2431 return true; // continue
2432 } else {
2433 this_object = GetThisObject();
2434 return false;
2435 }
2436 }
2437
2438 mirror::Object* this_object;
2439 JDWP::FrameId frame_id;
2440 };
2441
GetThisObject(JDWP::ObjectId thread_id,JDWP::FrameId frame_id,JDWP::ObjectId * result)2442 JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2443 JDWP::ObjectId* result) {
2444 ScopedObjectAccessUnchecked soa(Thread::Current());
2445 Thread* thread;
2446 {
2447 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2448 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2449 if (error != JDWP::ERR_NONE) {
2450 return error;
2451 }
2452 if (!IsSuspendedForDebugger(soa, thread)) {
2453 return JDWP::ERR_THREAD_NOT_SUSPENDED;
2454 }
2455 }
2456 std::unique_ptr<Context> context(Context::Create());
2457 GetThisVisitor visitor(thread, context.get(), frame_id);
2458 visitor.WalkStack();
2459 *result = gRegistry->Add(visitor.this_object);
2460 return JDWP::ERR_NONE;
2461 }
2462
GetLocalValue(JDWP::ObjectId thread_id,JDWP::FrameId frame_id,int slot,JDWP::JdwpTag tag,uint8_t * buf,size_t width)2463 JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2464 JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2465 struct GetLocalVisitor : public StackVisitor {
2466 GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context,
2467 JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width)
2468 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2469 : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag),
2470 buf_(buf), width_(width), error_(JDWP::ERR_NONE) {}
2471
2472 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2473 // annotalysis.
2474 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2475 if (GetFrameId() != frame_id_) {
2476 return true; // Not our frame, carry on.
2477 }
2478 // TODO: check that the tag is compatible with the actual type of the slot!
2479 // TODO: check slot is valid for this method or return INVALID_SLOT error.
2480 mirror::ArtMethod* m = GetMethod();
2481 if (m->IsNative()) {
2482 // We can't read local value from native method.
2483 error_ = JDWP::ERR_OPAQUE_FRAME;
2484 return false;
2485 }
2486 uint16_t reg = DemangleSlot(slot_, m);
2487 constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2488 switch (tag_) {
2489 case JDWP::JT_BOOLEAN: {
2490 CHECK_EQ(width_, 1U);
2491 uint32_t intVal;
2492 if (GetVReg(m, reg, kIntVReg, &intVal)) {
2493 VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
2494 JDWP::Set1(buf_+1, intVal != 0);
2495 } else {
2496 VLOG(jdwp) << "failed to get boolean local " << reg;
2497 error_ = kFailureErrorCode;
2498 }
2499 break;
2500 }
2501 case JDWP::JT_BYTE: {
2502 CHECK_EQ(width_, 1U);
2503 uint32_t intVal;
2504 if (GetVReg(m, reg, kIntVReg, &intVal)) {
2505 VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
2506 JDWP::Set1(buf_+1, intVal);
2507 } else {
2508 VLOG(jdwp) << "failed to get byte local " << reg;
2509 error_ = kFailureErrorCode;
2510 }
2511 break;
2512 }
2513 case JDWP::JT_SHORT:
2514 case JDWP::JT_CHAR: {
2515 CHECK_EQ(width_, 2U);
2516 uint32_t intVal;
2517 if (GetVReg(m, reg, kIntVReg, &intVal)) {
2518 VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
2519 JDWP::Set2BE(buf_+1, intVal);
2520 } else {
2521 VLOG(jdwp) << "failed to get short/char local " << reg;
2522 error_ = kFailureErrorCode;
2523 }
2524 break;
2525 }
2526 case JDWP::JT_INT: {
2527 CHECK_EQ(width_, 4U);
2528 uint32_t intVal;
2529 if (GetVReg(m, reg, kIntVReg, &intVal)) {
2530 VLOG(jdwp) << "get int local " << reg << " = " << intVal;
2531 JDWP::Set4BE(buf_+1, intVal);
2532 } else {
2533 VLOG(jdwp) << "failed to get int local " << reg;
2534 error_ = kFailureErrorCode;
2535 }
2536 break;
2537 }
2538 case JDWP::JT_FLOAT: {
2539 CHECK_EQ(width_, 4U);
2540 uint32_t intVal;
2541 if (GetVReg(m, reg, kFloatVReg, &intVal)) {
2542 VLOG(jdwp) << "get float local " << reg << " = " << intVal;
2543 JDWP::Set4BE(buf_+1, intVal);
2544 } else {
2545 VLOG(jdwp) << "failed to get float local " << reg;
2546 error_ = kFailureErrorCode;
2547 }
2548 break;
2549 }
2550 case JDWP::JT_ARRAY:
2551 case JDWP::JT_CLASS_LOADER:
2552 case JDWP::JT_CLASS_OBJECT:
2553 case JDWP::JT_OBJECT:
2554 case JDWP::JT_STRING:
2555 case JDWP::JT_THREAD:
2556 case JDWP::JT_THREAD_GROUP: {
2557 CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2558 uint32_t intVal;
2559 if (GetVReg(m, reg, kReferenceVReg, &intVal)) {
2560 mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2561 VLOG(jdwp) << "get " << tag_ << " object local " << reg << " = " << o;
2562 if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2563 LOG(FATAL) << "Register " << reg << " expected to hold " << tag_ << " object: " << o;
2564 }
2565 tag_ = TagFromObject(soa_, o);
2566 JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
2567 } else {
2568 VLOG(jdwp) << "failed to get " << tag_ << " object local " << reg;
2569 error_ = kFailureErrorCode;
2570 }
2571 break;
2572 }
2573 case JDWP::JT_DOUBLE: {
2574 CHECK_EQ(width_, 8U);
2575 uint64_t longVal;
2576 if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2577 VLOG(jdwp) << "get double local " << reg << " = " << longVal;
2578 JDWP::Set8BE(buf_+1, longVal);
2579 } else {
2580 VLOG(jdwp) << "failed to get double local " << reg;
2581 error_ = kFailureErrorCode;
2582 }
2583 break;
2584 }
2585 case JDWP::JT_LONG: {
2586 CHECK_EQ(width_, 8U);
2587 uint64_t longVal;
2588 if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
2589 VLOG(jdwp) << "get long local " << reg << " = " << longVal;
2590 JDWP::Set8BE(buf_+1, longVal);
2591 } else {
2592 VLOG(jdwp) << "failed to get long local " << reg;
2593 error_ = kFailureErrorCode;
2594 }
2595 break;
2596 }
2597 default:
2598 LOG(FATAL) << "Unknown tag " << tag_;
2599 break;
2600 }
2601
2602 // Prepend tag, which may have been updated.
2603 JDWP::Set1(buf_, tag_);
2604 return false;
2605 }
2606 const ScopedObjectAccessUnchecked& soa_;
2607 const JDWP::FrameId frame_id_;
2608 const int slot_;
2609 JDWP::JdwpTag tag_;
2610 uint8_t* const buf_;
2611 const size_t width_;
2612 JDWP::JdwpError error_;
2613 };
2614
2615 ScopedObjectAccessUnchecked soa(Thread::Current());
2616 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2617 Thread* thread;
2618 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2619 if (error != JDWP::ERR_NONE) {
2620 return error;
2621 }
2622 // TODO check thread is suspended by the debugger ?
2623 std::unique_ptr<Context> context(Context::Create());
2624 GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width);
2625 visitor.WalkStack();
2626 return visitor.error_;
2627 }
2628
SetLocalValue(JDWP::ObjectId thread_id,JDWP::FrameId frame_id,int slot,JDWP::JdwpTag tag,uint64_t value,size_t width)2629 JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2630 JDWP::JdwpTag tag, uint64_t value, size_t width) {
2631 struct SetLocalVisitor : public StackVisitor {
2632 SetLocalVisitor(Thread* thread, Context* context,
2633 JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value,
2634 size_t width)
2635 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2636 : StackVisitor(thread, context),
2637 frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width),
2638 error_(JDWP::ERR_NONE) {}
2639
2640 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2641 // annotalysis.
2642 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2643 if (GetFrameId() != frame_id_) {
2644 return true; // Not our frame, carry on.
2645 }
2646 // TODO: check that the tag is compatible with the actual type of the slot!
2647 // TODO: check slot is valid for this method or return INVALID_SLOT error.
2648 mirror::ArtMethod* m = GetMethod();
2649 if (m->IsNative()) {
2650 // We can't read local value from native method.
2651 error_ = JDWP::ERR_OPAQUE_FRAME;
2652 return false;
2653 }
2654 uint16_t reg = DemangleSlot(slot_, m);
2655 constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2656 switch (tag_) {
2657 case JDWP::JT_BOOLEAN:
2658 case JDWP::JT_BYTE:
2659 CHECK_EQ(width_, 1U);
2660 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2661 VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
2662 << static_cast<uint32_t>(value_);
2663 error_ = kFailureErrorCode;
2664 }
2665 break;
2666 case JDWP::JT_SHORT:
2667 case JDWP::JT_CHAR:
2668 CHECK_EQ(width_, 2U);
2669 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2670 VLOG(jdwp) << "failed to set short/char local " << reg << " = "
2671 << static_cast<uint32_t>(value_);
2672 error_ = kFailureErrorCode;
2673 }
2674 break;
2675 case JDWP::JT_INT:
2676 CHECK_EQ(width_, 4U);
2677 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2678 VLOG(jdwp) << "failed to set int local " << reg << " = "
2679 << static_cast<uint32_t>(value_);
2680 error_ = kFailureErrorCode;
2681 }
2682 break;
2683 case JDWP::JT_FLOAT:
2684 CHECK_EQ(width_, 4U);
2685 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg)) {
2686 VLOG(jdwp) << "failed to set float local " << reg << " = "
2687 << static_cast<uint32_t>(value_);
2688 error_ = kFailureErrorCode;
2689 }
2690 break;
2691 case JDWP::JT_ARRAY:
2692 case JDWP::JT_CLASS_LOADER:
2693 case JDWP::JT_CLASS_OBJECT:
2694 case JDWP::JT_OBJECT:
2695 case JDWP::JT_STRING:
2696 case JDWP::JT_THREAD:
2697 case JDWP::JT_THREAD_GROUP: {
2698 CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2699 mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_));
2700 if (o == ObjectRegistry::kInvalidObject) {
2701 VLOG(jdwp) << tag_ << " object " << o << " is an invalid object";
2702 error_ = JDWP::ERR_INVALID_OBJECT;
2703 } else if (!SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2704 kReferenceVReg)) {
2705 VLOG(jdwp) << "failed to set " << tag_ << " object local " << reg << " = " << o;
2706 error_ = kFailureErrorCode;
2707 }
2708 break;
2709 }
2710 case JDWP::JT_DOUBLE: {
2711 CHECK_EQ(width_, 8U);
2712 bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg);
2713 if (!success) {
2714 VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2715 error_ = kFailureErrorCode;
2716 }
2717 break;
2718 }
2719 case JDWP::JT_LONG: {
2720 CHECK_EQ(width_, 8U);
2721 bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg);
2722 if (!success) {
2723 VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2724 error_ = kFailureErrorCode;
2725 }
2726 break;
2727 }
2728 default:
2729 LOG(FATAL) << "Unknown tag " << tag_;
2730 break;
2731 }
2732 return false;
2733 }
2734
2735 const JDWP::FrameId frame_id_;
2736 const int slot_;
2737 const JDWP::JdwpTag tag_;
2738 const uint64_t value_;
2739 const size_t width_;
2740 JDWP::JdwpError error_;
2741 };
2742
2743 ScopedObjectAccessUnchecked soa(Thread::Current());
2744 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2745 Thread* thread;
2746 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2747 if (error != JDWP::ERR_NONE) {
2748 return error;
2749 }
2750 // TODO check thread is suspended by the debugger ?
2751 std::unique_ptr<Context> context(Context::Create());
2752 SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width);
2753 visitor.WalkStack();
2754 return visitor.error_;
2755 }
2756
SetEventLocation(JDWP::EventLocation * location,mirror::ArtMethod * m,uint32_t dex_pc)2757 static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
2758 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2759 DCHECK(location != nullptr);
2760 if (m == nullptr) {
2761 memset(location, 0, sizeof(*location));
2762 } else {
2763 location->method = m;
2764 location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
2765 }
2766 }
2767
PostLocationEvent(mirror::ArtMethod * m,int dex_pc,mirror::Object * this_object,int event_flags,const JValue * return_value)2768 void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
2769 int event_flags, const JValue* return_value) {
2770 if (!IsDebuggerActive()) {
2771 return;
2772 }
2773 DCHECK(m != nullptr);
2774 DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2775 JDWP::EventLocation location;
2776 SetEventLocation(&location, m, dex_pc);
2777
2778 gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
2779 }
2780
PostFieldAccessEvent(mirror::ArtMethod * m,int dex_pc,mirror::Object * this_object,mirror::ArtField * f)2781 void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
2782 mirror::Object* this_object, mirror::ArtField* f) {
2783 if (!IsDebuggerActive()) {
2784 return;
2785 }
2786 DCHECK(m != nullptr);
2787 DCHECK(f != nullptr);
2788 JDWP::EventLocation location;
2789 SetEventLocation(&location, m, dex_pc);
2790
2791 gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
2792 }
2793
PostFieldModificationEvent(mirror::ArtMethod * m,int dex_pc,mirror::Object * this_object,mirror::ArtField * f,const JValue * field_value)2794 void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
2795 mirror::Object* this_object, mirror::ArtField* f,
2796 const JValue* field_value) {
2797 if (!IsDebuggerActive()) {
2798 return;
2799 }
2800 DCHECK(m != nullptr);
2801 DCHECK(f != nullptr);
2802 DCHECK(field_value != nullptr);
2803 JDWP::EventLocation location;
2804 SetEventLocation(&location, m, dex_pc);
2805
2806 gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
2807 }
2808
PostException(const ThrowLocation & throw_location,mirror::ArtMethod * catch_method,uint32_t catch_dex_pc,mirror::Throwable * exception_object)2809 void Dbg::PostException(const ThrowLocation& throw_location,
2810 mirror::ArtMethod* catch_method,
2811 uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
2812 if (!IsDebuggerActive()) {
2813 return;
2814 }
2815 JDWP::EventLocation exception_throw_location;
2816 SetEventLocation(&exception_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
2817 JDWP::EventLocation exception_catch_location;
2818 SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
2819
2820 gJdwpState->PostException(&exception_throw_location, exception_object, &exception_catch_location,
2821 throw_location.GetThis());
2822 }
2823
PostClassPrepare(mirror::Class * c)2824 void Dbg::PostClassPrepare(mirror::Class* c) {
2825 if (!IsDebuggerActive()) {
2826 return;
2827 }
2828 gJdwpState->PostClassPrepare(c);
2829 }
2830
UpdateDebugger(Thread * thread,mirror::Object * this_object,mirror::ArtMethod * m,uint32_t dex_pc,int event_flags,const JValue * return_value)2831 void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2832 mirror::ArtMethod* m, uint32_t dex_pc,
2833 int event_flags, const JValue* return_value) {
2834 if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2835 return;
2836 }
2837
2838 if (IsBreakpoint(m, dex_pc)) {
2839 event_flags |= kBreakpoint;
2840 }
2841
2842 // If the debugger is single-stepping one of our threads, check to
2843 // see if we're that thread and we've reached a step point.
2844 const SingleStepControl* single_step_control = thread->GetSingleStepControl();
2845 DCHECK(single_step_control != nullptr);
2846 if (single_step_control->is_active) {
2847 CHECK(!m->IsNative());
2848 if (single_step_control->step_depth == JDWP::SD_INTO) {
2849 // Step into method calls. We break when the line number
2850 // or method pointer changes. If we're in SS_MIN mode, we
2851 // always stop.
2852 if (single_step_control->method != m) {
2853 event_flags |= kSingleStep;
2854 VLOG(jdwp) << "SS new method";
2855 } else if (single_step_control->step_size == JDWP::SS_MIN) {
2856 event_flags |= kSingleStep;
2857 VLOG(jdwp) << "SS new instruction";
2858 } else if (single_step_control->ContainsDexPc(dex_pc)) {
2859 event_flags |= kSingleStep;
2860 VLOG(jdwp) << "SS new line";
2861 }
2862 } else if (single_step_control->step_depth == JDWP::SD_OVER) {
2863 // Step over method calls. We break when the line number is
2864 // different and the frame depth is <= the original frame
2865 // depth. (We can't just compare on the method, because we
2866 // might get unrolled past it by an exception, and it's tricky
2867 // to identify recursion.)
2868
2869 int stack_depth = GetStackDepth(thread);
2870
2871 if (stack_depth < single_step_control->stack_depth) {
2872 // Popped up one or more frames, always trigger.
2873 event_flags |= kSingleStep;
2874 VLOG(jdwp) << "SS method pop";
2875 } else if (stack_depth == single_step_control->stack_depth) {
2876 // Same depth, see if we moved.
2877 if (single_step_control->step_size == JDWP::SS_MIN) {
2878 event_flags |= kSingleStep;
2879 VLOG(jdwp) << "SS new instruction";
2880 } else if (single_step_control->ContainsDexPc(dex_pc)) {
2881 event_flags |= kSingleStep;
2882 VLOG(jdwp) << "SS new line";
2883 }
2884 }
2885 } else {
2886 CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT);
2887 // Return from the current method. We break when the frame
2888 // depth pops up.
2889
2890 // This differs from the "method exit" break in that it stops
2891 // with the PC at the next instruction in the returned-to
2892 // function, rather than the end of the returning function.
2893
2894 int stack_depth = GetStackDepth(thread);
2895 if (stack_depth < single_step_control->stack_depth) {
2896 event_flags |= kSingleStep;
2897 VLOG(jdwp) << "SS method pop";
2898 }
2899 }
2900 }
2901
2902 // If there's something interesting going on, see if it matches one
2903 // of the debugger filters.
2904 if (event_flags != 0) {
2905 Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
2906 }
2907 }
2908
GetReferenceCounterForEvent(uint32_t instrumentation_event)2909 size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
2910 switch (instrumentation_event) {
2911 case instrumentation::Instrumentation::kMethodEntered:
2912 return &method_enter_event_ref_count_;
2913 case instrumentation::Instrumentation::kMethodExited:
2914 return &method_exit_event_ref_count_;
2915 case instrumentation::Instrumentation::kDexPcMoved:
2916 return &dex_pc_change_event_ref_count_;
2917 case instrumentation::Instrumentation::kFieldRead:
2918 return &field_read_event_ref_count_;
2919 case instrumentation::Instrumentation::kFieldWritten:
2920 return &field_write_event_ref_count_;
2921 case instrumentation::Instrumentation::kExceptionCaught:
2922 return &exception_catch_event_ref_count_;
2923 default:
2924 return nullptr;
2925 }
2926 }
2927
2928 // Process request while all mutator threads are suspended.
ProcessDeoptimizationRequest(const DeoptimizationRequest & request)2929 void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
2930 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
2931 switch (request.GetKind()) {
2932 case DeoptimizationRequest::kNothing:
2933 LOG(WARNING) << "Ignoring empty deoptimization request.";
2934 break;
2935 case DeoptimizationRequest::kRegisterForEvent:
2936 VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
2937 request.InstrumentationEvent());
2938 instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
2939 instrumentation_events_ |= request.InstrumentationEvent();
2940 break;
2941 case DeoptimizationRequest::kUnregisterForEvent:
2942 VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
2943 request.InstrumentationEvent());
2944 instrumentation->RemoveListener(&gDebugInstrumentationListener,
2945 request.InstrumentationEvent());
2946 instrumentation_events_ &= ~request.InstrumentationEvent();
2947 break;
2948 case DeoptimizationRequest::kFullDeoptimization:
2949 VLOG(jdwp) << "Deoptimize the world ...";
2950 instrumentation->DeoptimizeEverything();
2951 VLOG(jdwp) << "Deoptimize the world DONE";
2952 break;
2953 case DeoptimizationRequest::kFullUndeoptimization:
2954 VLOG(jdwp) << "Undeoptimize the world ...";
2955 instrumentation->UndeoptimizeEverything();
2956 VLOG(jdwp) << "Undeoptimize the world DONE";
2957 break;
2958 case DeoptimizationRequest::kSelectiveDeoptimization:
2959 VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
2960 instrumentation->Deoptimize(request.Method());
2961 VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
2962 break;
2963 case DeoptimizationRequest::kSelectiveUndeoptimization:
2964 VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
2965 instrumentation->Undeoptimize(request.Method());
2966 VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
2967 break;
2968 default:
2969 LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
2970 break;
2971 }
2972 }
2973
DelayFullUndeoptimization()2974 void Dbg::DelayFullUndeoptimization() {
2975 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2976 ++delayed_full_undeoptimization_count_;
2977 DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
2978 }
2979
ProcessDelayedFullUndeoptimizations()2980 void Dbg::ProcessDelayedFullUndeoptimizations() {
2981 // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
2982 {
2983 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2984 while (delayed_full_undeoptimization_count_ > 0) {
2985 DeoptimizationRequest req;
2986 req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
2987 req.SetMethod(nullptr);
2988 RequestDeoptimizationLocked(req);
2989 --delayed_full_undeoptimization_count_;
2990 }
2991 }
2992 ManageDeoptimization();
2993 }
2994
RequestDeoptimization(const DeoptimizationRequest & req)2995 void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
2996 if (req.GetKind() == DeoptimizationRequest::kNothing) {
2997 // Nothing to do.
2998 return;
2999 }
3000 MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3001 RequestDeoptimizationLocked(req);
3002 }
3003
RequestDeoptimizationLocked(const DeoptimizationRequest & req)3004 void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
3005 switch (req.GetKind()) {
3006 case DeoptimizationRequest::kRegisterForEvent: {
3007 DCHECK_NE(req.InstrumentationEvent(), 0u);
3008 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3009 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3010 req.InstrumentationEvent());
3011 if (*counter == 0) {
3012 VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
3013 deoptimization_requests_.size(), req.InstrumentationEvent());
3014 deoptimization_requests_.push_back(req);
3015 }
3016 *counter = *counter + 1;
3017 break;
3018 }
3019 case DeoptimizationRequest::kUnregisterForEvent: {
3020 DCHECK_NE(req.InstrumentationEvent(), 0u);
3021 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3022 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3023 req.InstrumentationEvent());
3024 *counter = *counter - 1;
3025 if (*counter == 0) {
3026 VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
3027 deoptimization_requests_.size(), req.InstrumentationEvent());
3028 deoptimization_requests_.push_back(req);
3029 }
3030 break;
3031 }
3032 case DeoptimizationRequest::kFullDeoptimization: {
3033 DCHECK(req.Method() == nullptr);
3034 if (full_deoptimization_event_count_ == 0) {
3035 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3036 << " for full deoptimization";
3037 deoptimization_requests_.push_back(req);
3038 }
3039 ++full_deoptimization_event_count_;
3040 break;
3041 }
3042 case DeoptimizationRequest::kFullUndeoptimization: {
3043 DCHECK(req.Method() == nullptr);
3044 DCHECK_GT(full_deoptimization_event_count_, 0U);
3045 --full_deoptimization_event_count_;
3046 if (full_deoptimization_event_count_ == 0) {
3047 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3048 << " for full undeoptimization";
3049 deoptimization_requests_.push_back(req);
3050 }
3051 break;
3052 }
3053 case DeoptimizationRequest::kSelectiveDeoptimization: {
3054 DCHECK(req.Method() != nullptr);
3055 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3056 << " for deoptimization of " << PrettyMethod(req.Method());
3057 deoptimization_requests_.push_back(req);
3058 break;
3059 }
3060 case DeoptimizationRequest::kSelectiveUndeoptimization: {
3061 DCHECK(req.Method() != nullptr);
3062 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3063 << " for undeoptimization of " << PrettyMethod(req.Method());
3064 deoptimization_requests_.push_back(req);
3065 break;
3066 }
3067 default: {
3068 LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
3069 break;
3070 }
3071 }
3072 }
3073
ManageDeoptimization()3074 void Dbg::ManageDeoptimization() {
3075 Thread* const self = Thread::Current();
3076 {
3077 // Avoid suspend/resume if there is no pending request.
3078 MutexLock mu(self, *Locks::deoptimization_lock_);
3079 if (deoptimization_requests_.empty()) {
3080 return;
3081 }
3082 }
3083 CHECK_EQ(self->GetState(), kRunnable);
3084 self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
3085 // We need to suspend mutator threads first.
3086 Runtime* const runtime = Runtime::Current();
3087 runtime->GetThreadList()->SuspendAll();
3088 const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3089 {
3090 MutexLock mu(self, *Locks::deoptimization_lock_);
3091 size_t req_index = 0;
3092 for (DeoptimizationRequest& request : deoptimization_requests_) {
3093 VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3094 ProcessDeoptimizationRequest(request);
3095 }
3096 deoptimization_requests_.clear();
3097 }
3098 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3099 runtime->GetThreadList()->ResumeAll();
3100 self->TransitionFromSuspendedToRunnable();
3101 }
3102
IsMethodPossiblyInlined(Thread * self,mirror::ArtMethod * m)3103 static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
3104 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3105 const DexFile::CodeItem* code_item = m->GetCodeItem();
3106 if (code_item == nullptr) {
3107 // TODO We should not be asked to watch location in a native or abstract method so the code item
3108 // should never be null. We could just check we never encounter this case.
3109 return false;
3110 }
3111 // Note: method verifier may cause thread suspension.
3112 self->AssertThreadSuspensionIsAllowable();
3113 StackHandleScope<2> hs(self);
3114 mirror::Class* declaring_class = m->GetDeclaringClass();
3115 Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
3116 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
3117 verifier::MethodVerifier verifier(dex_cache->GetDexFile(), &dex_cache, &class_loader,
3118 &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
3119 m->GetAccessFlags(), false, true, false);
3120 // Note: we don't need to verify the method.
3121 return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
3122 }
3123
FindFirstBreakpointForMethod(mirror::ArtMethod * m)3124 static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
3125 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3126 for (Breakpoint& breakpoint : gBreakpoints) {
3127 if (breakpoint.Method() == m) {
3128 return &breakpoint;
3129 }
3130 }
3131 return nullptr;
3132 }
3133
3134 // Sanity checks all existing breakpoints on the same method.
SanityCheckExistingBreakpoints(mirror::ArtMethod * m,bool need_full_deoptimization)3135 static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
3136 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3137 for (const Breakpoint& breakpoint : gBreakpoints) {
3138 CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
3139 }
3140 if (need_full_deoptimization) {
3141 // We should have deoptimized everything but not "selectively" deoptimized this method.
3142 CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
3143 CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3144 } else {
3145 // We should have "selectively" deoptimized this method.
3146 // Note: while we have not deoptimized everything for this method, we may have done it for
3147 // another event.
3148 CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3149 }
3150 }
3151
3152 // Installs a breakpoint at the specified location. Also indicates through the deoptimization
3153 // request if we need to deoptimize.
WatchLocation(const JDWP::JdwpLocation * location,DeoptimizationRequest * req)3154 void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3155 Thread* const self = Thread::Current();
3156 mirror::ArtMethod* m = FromMethodId(location->method_id);
3157 DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3158
3159 const Breakpoint* existing_breakpoint;
3160 {
3161 ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3162 existing_breakpoint = FindFirstBreakpointForMethod(m);
3163 }
3164 bool need_full_deoptimization;
3165 if (existing_breakpoint == nullptr) {
3166 // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
3167 // inlined, we deoptimize everything; otherwise we deoptimize only this method.
3168 // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
3169 // Therefore we must not hold any lock when we call it.
3170 need_full_deoptimization = IsMethodPossiblyInlined(self, m);
3171 if (need_full_deoptimization) {
3172 req->SetKind(DeoptimizationRequest::kFullDeoptimization);
3173 req->SetMethod(nullptr);
3174 } else {
3175 req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
3176 req->SetMethod(m);
3177 }
3178 } else {
3179 // There is at least one breakpoint for this method: we don't need to deoptimize.
3180 req->SetKind(DeoptimizationRequest::kNothing);
3181 req->SetMethod(nullptr);
3182
3183 need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
3184 if (kIsDebugBuild) {
3185 ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3186 SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3187 }
3188 }
3189
3190 {
3191 WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3192 gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
3193 VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3194 << gBreakpoints[gBreakpoints.size() - 1];
3195 }
3196 }
3197
3198 // Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3199 // request if we need to undeoptimize.
UnwatchLocation(const JDWP::JdwpLocation * location,DeoptimizationRequest * req)3200 void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3201 WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3202 mirror::ArtMethod* m = FromMethodId(location->method_id);
3203 DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3204 bool need_full_deoptimization = false;
3205 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3206 if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
3207 VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3208 need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
3209 DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3210 gBreakpoints.erase(gBreakpoints.begin() + i);
3211 break;
3212 }
3213 }
3214 const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3215 if (existing_breakpoint == nullptr) {
3216 // There is no more breakpoint on this method: we need to undeoptimize.
3217 if (need_full_deoptimization) {
3218 // This method required full deoptimization: we need to undeoptimize everything.
3219 req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3220 req->SetMethod(nullptr);
3221 } else {
3222 // This method required selective deoptimization: we need to undeoptimize only that method.
3223 req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3224 req->SetMethod(m);
3225 }
3226 } else {
3227 // There is at least one breakpoint for this method: we don't need to undeoptimize.
3228 req->SetKind(DeoptimizationRequest::kNothing);
3229 req->SetMethod(nullptr);
3230 if (kIsDebugBuild) {
3231 SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3232 }
3233 }
3234 }
3235
3236 // Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3237 // cause suspension if the thread is the current thread.
3238 class ScopedThreadSuspension {
3239 public:
ScopedThreadSuspension(Thread * self,JDWP::ObjectId thread_id)3240 ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3241 LOCKS_EXCLUDED(Locks::thread_list_lock_)
3242 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
3243 thread_(nullptr),
3244 error_(JDWP::ERR_NONE),
3245 self_suspend_(false),
3246 other_suspend_(false) {
3247 ScopedObjectAccessUnchecked soa(self);
3248 {
3249 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3250 error_ = DecodeThread(soa, thread_id, thread_);
3251 }
3252 if (error_ == JDWP::ERR_NONE) {
3253 if (thread_ == soa.Self()) {
3254 self_suspend_ = true;
3255 } else {
3256 soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3257 jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
3258 bool timed_out;
3259 Thread* suspended_thread;
3260 {
3261 // Take suspend thread lock to avoid races with threads trying to suspend this one.
3262 MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
3263 ThreadList* thread_list = Runtime::Current()->GetThreadList();
3264 suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
3265 }
3266 CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
3267 if (suspended_thread == nullptr) {
3268 // Thread terminated from under us while suspending.
3269 error_ = JDWP::ERR_INVALID_THREAD;
3270 } else {
3271 CHECK_EQ(suspended_thread, thread_);
3272 other_suspend_ = true;
3273 }
3274 }
3275 }
3276 }
3277
GetThread() const3278 Thread* GetThread() const {
3279 return thread_;
3280 }
3281
GetError() const3282 JDWP::JdwpError GetError() const {
3283 return error_;
3284 }
3285
~ScopedThreadSuspension()3286 ~ScopedThreadSuspension() {
3287 if (other_suspend_) {
3288 Runtime::Current()->GetThreadList()->Resume(thread_, true);
3289 }
3290 }
3291
3292 private:
3293 Thread* thread_;
3294 JDWP::JdwpError error_;
3295 bool self_suspend_;
3296 bool other_suspend_;
3297 };
3298
ConfigureStep(JDWP::ObjectId thread_id,JDWP::JdwpStepSize step_size,JDWP::JdwpStepDepth step_depth)3299 JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3300 JDWP::JdwpStepDepth step_depth) {
3301 Thread* self = Thread::Current();
3302 ScopedThreadSuspension sts(self, thread_id);
3303 if (sts.GetError() != JDWP::ERR_NONE) {
3304 return sts.GetError();
3305 }
3306
3307 //
3308 // Work out what Method* we're in, the current line number, and how deep the stack currently
3309 // is for step-out.
3310 //
3311
3312 struct SingleStepStackVisitor : public StackVisitor {
3313 explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control,
3314 int32_t* line_number)
3315 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
3316 : StackVisitor(thread, NULL), single_step_control_(single_step_control),
3317 line_number_(line_number) {
3318 DCHECK_EQ(single_step_control_, thread->GetSingleStepControl());
3319 single_step_control_->method = NULL;
3320 single_step_control_->stack_depth = 0;
3321 }
3322
3323 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3324 // annotalysis.
3325 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3326 mirror::ArtMethod* m = GetMethod();
3327 if (!m->IsRuntimeMethod()) {
3328 ++single_step_control_->stack_depth;
3329 if (single_step_control_->method == NULL) {
3330 mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3331 single_step_control_->method = m;
3332 *line_number_ = -1;
3333 if (dex_cache != NULL) {
3334 const DexFile& dex_file = *dex_cache->GetDexFile();
3335 *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc());
3336 }
3337 }
3338 }
3339 return true;
3340 }
3341
3342 SingleStepControl* const single_step_control_;
3343 int32_t* const line_number_;
3344 };
3345
3346 Thread* const thread = sts.GetThread();
3347 SingleStepControl* const single_step_control = thread->GetSingleStepControl();
3348 DCHECK(single_step_control != nullptr);
3349 int32_t line_number = -1;
3350 SingleStepStackVisitor visitor(thread, single_step_control, &line_number);
3351 visitor.WalkStack();
3352
3353 //
3354 // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3355 //
3356
3357 struct DebugCallbackContext {
3358 explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number,
3359 const DexFile::CodeItem* code_item)
3360 : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item),
3361 last_pc_valid(false), last_pc(0) {
3362 }
3363
3364 static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
3365 DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3366 if (static_cast<int32_t>(line_number) == context->line_number_) {
3367 if (!context->last_pc_valid) {
3368 // Everything from this address until the next line change is ours.
3369 context->last_pc = address;
3370 context->last_pc_valid = true;
3371 }
3372 // Otherwise, if we're already in a valid range for this line,
3373 // just keep going (shouldn't really happen)...
3374 } else if (context->last_pc_valid) { // and the line number is new
3375 // Add everything from the last entry up until here to the set
3376 for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
3377 context->single_step_control_->dex_pcs.insert(dex_pc);
3378 }
3379 context->last_pc_valid = false;
3380 }
3381 return false; // There may be multiple entries for any given line.
3382 }
3383
3384 ~DebugCallbackContext() {
3385 // If the line number was the last in the position table...
3386 if (last_pc_valid) {
3387 size_t end = code_item_->insns_size_in_code_units_;
3388 for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3389 single_step_control_->dex_pcs.insert(dex_pc);
3390 }
3391 }
3392 }
3393
3394 SingleStepControl* const single_step_control_;
3395 const int32_t line_number_;
3396 const DexFile::CodeItem* const code_item_;
3397 bool last_pc_valid;
3398 uint32_t last_pc;
3399 };
3400 single_step_control->dex_pcs.clear();
3401 mirror::ArtMethod* m = single_step_control->method;
3402 if (!m->IsNative()) {
3403 const DexFile::CodeItem* const code_item = m->GetCodeItem();
3404 DebugCallbackContext context(single_step_control, line_number, code_item);
3405 m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
3406 DebugCallbackContext::Callback, NULL, &context);
3407 }
3408
3409 //
3410 // Everything else...
3411 //
3412
3413 single_step_control->step_size = step_size;
3414 single_step_control->step_depth = step_depth;
3415 single_step_control->is_active = true;
3416
3417 if (VLOG_IS_ON(jdwp)) {
3418 VLOG(jdwp) << "Single-step thread: " << *thread;
3419 VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size;
3420 VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth;
3421 VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method);
3422 VLOG(jdwp) << "Single-step current line: " << line_number;
3423 VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth;
3424 VLOG(jdwp) << "Single-step dex_pc values:";
3425 for (uint32_t dex_pc : single_step_control->dex_pcs) {
3426 VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3427 }
3428 }
3429
3430 return JDWP::ERR_NONE;
3431 }
3432
UnconfigureStep(JDWP::ObjectId thread_id)3433 void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3434 ScopedObjectAccessUnchecked soa(Thread::Current());
3435 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3436 Thread* thread;
3437 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
3438 if (error == JDWP::ERR_NONE) {
3439 SingleStepControl* single_step_control = thread->GetSingleStepControl();
3440 DCHECK(single_step_control != nullptr);
3441 single_step_control->Clear();
3442 }
3443 }
3444
JdwpTagToShortyChar(JDWP::JdwpTag tag)3445 static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3446 switch (tag) {
3447 default:
3448 LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3449
3450 // Primitives.
3451 case JDWP::JT_BYTE: return 'B';
3452 case JDWP::JT_CHAR: return 'C';
3453 case JDWP::JT_FLOAT: return 'F';
3454 case JDWP::JT_DOUBLE: return 'D';
3455 case JDWP::JT_INT: return 'I';
3456 case JDWP::JT_LONG: return 'J';
3457 case JDWP::JT_SHORT: return 'S';
3458 case JDWP::JT_VOID: return 'V';
3459 case JDWP::JT_BOOLEAN: return 'Z';
3460
3461 // Reference types.
3462 case JDWP::JT_ARRAY:
3463 case JDWP::JT_OBJECT:
3464 case JDWP::JT_STRING:
3465 case JDWP::JT_THREAD:
3466 case JDWP::JT_THREAD_GROUP:
3467 case JDWP::JT_CLASS_LOADER:
3468 case JDWP::JT_CLASS_OBJECT:
3469 return 'L';
3470 }
3471 }
3472
InvokeMethod(JDWP::ObjectId thread_id,JDWP::ObjectId object_id,JDWP::RefTypeId class_id,JDWP::MethodId method_id,uint32_t arg_count,uint64_t * arg_values,JDWP::JdwpTag * arg_types,uint32_t options,JDWP::JdwpTag * pResultTag,uint64_t * pResultValue,JDWP::ObjectId * pExceptionId)3473 JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
3474 JDWP::RefTypeId class_id, JDWP::MethodId method_id,
3475 uint32_t arg_count, uint64_t* arg_values,
3476 JDWP::JdwpTag* arg_types, uint32_t options,
3477 JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
3478 JDWP::ObjectId* pExceptionId) {
3479 ThreadList* thread_list = Runtime::Current()->GetThreadList();
3480
3481 Thread* targetThread = NULL;
3482 DebugInvokeReq* req = NULL;
3483 Thread* self = Thread::Current();
3484 {
3485 ScopedObjectAccessUnchecked soa(self);
3486 MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3487 JDWP::JdwpError error = DecodeThread(soa, thread_id, targetThread);
3488 if (error != JDWP::ERR_NONE) {
3489 LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3490 return error;
3491 }
3492 req = targetThread->GetInvokeReq();
3493 if (!req->ready) {
3494 LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3495 return JDWP::ERR_INVALID_THREAD;
3496 }
3497
3498 /*
3499 * We currently have a bug where we don't successfully resume the
3500 * target thread if the suspend count is too deep. We're expected to
3501 * require one "resume" for each "suspend", but when asked to execute
3502 * a method we have to resume fully and then re-suspend it back to the
3503 * same level. (The easiest way to cause this is to type "suspend"
3504 * multiple times in jdb.)
3505 *
3506 * It's unclear what this means when the event specifies "resume all"
3507 * and some threads are suspended more deeply than others. This is
3508 * a rare problem, so for now we just prevent it from hanging forever
3509 * by rejecting the method invocation request. Without this, we will
3510 * be stuck waiting on a suspended thread.
3511 */
3512 int suspend_count;
3513 {
3514 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3515 suspend_count = targetThread->GetSuspendCount();
3516 }
3517 if (suspend_count > 1) {
3518 LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
3519 return JDWP::ERR_THREAD_SUSPENDED; // Probably not expected here.
3520 }
3521
3522 JDWP::JdwpError status;
3523 mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id);
3524 if (receiver == ObjectRegistry::kInvalidObject) {
3525 return JDWP::ERR_INVALID_OBJECT;
3526 }
3527
3528 mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id);
3529 if (thread == ObjectRegistry::kInvalidObject) {
3530 return JDWP::ERR_INVALID_OBJECT;
3531 }
3532 // TODO: check that 'thread' is actually a java.lang.Thread!
3533
3534 mirror::Class* c = DecodeClass(class_id, status);
3535 if (c == NULL) {
3536 return status;
3537 }
3538
3539 mirror::ArtMethod* m = FromMethodId(method_id);
3540 if (m->IsStatic() != (receiver == NULL)) {
3541 return JDWP::ERR_INVALID_METHODID;
3542 }
3543 if (m->IsStatic()) {
3544 if (m->GetDeclaringClass() != c) {
3545 return JDWP::ERR_INVALID_METHODID;
3546 }
3547 } else {
3548 if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3549 return JDWP::ERR_INVALID_METHODID;
3550 }
3551 }
3552
3553 // Check the argument list matches the method.
3554 uint32_t shorty_len = 0;
3555 const char* shorty = m->GetShorty(&shorty_len);
3556 if (shorty_len - 1 != arg_count) {
3557 return JDWP::ERR_ILLEGAL_ARGUMENT;
3558 }
3559
3560 {
3561 StackHandleScope<3> hs(soa.Self());
3562 MethodHelper mh(hs.NewHandle(m));
3563 HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3564 HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3565 const DexFile::TypeList* types = m->GetParameterTypeList();
3566 for (size_t i = 0; i < arg_count; ++i) {
3567 if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3568 return JDWP::ERR_ILLEGAL_ARGUMENT;
3569 }
3570
3571 if (shorty[i + 1] == 'L') {
3572 // Did we really get an argument of an appropriate reference type?
3573 mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
3574 mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i]);
3575 if (argument == ObjectRegistry::kInvalidObject) {
3576 return JDWP::ERR_INVALID_OBJECT;
3577 }
3578 if (argument != NULL && !argument->InstanceOf(parameter_type)) {
3579 return JDWP::ERR_ILLEGAL_ARGUMENT;
3580 }
3581
3582 // Turn the on-the-wire ObjectId into a jobject.
3583 jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3584 v.l = gRegistry->GetJObject(arg_values[i]);
3585 }
3586 }
3587 // Update in case it moved.
3588 m = mh.GetMethod();
3589 }
3590
3591 req->receiver = receiver;
3592 req->thread = thread;
3593 req->klass = c;
3594 req->method = m;
3595 req->arg_count = arg_count;
3596 req->arg_values = arg_values;
3597 req->options = options;
3598 req->invoke_needed = true;
3599 }
3600
3601 // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3602 // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
3603 // call, and it's unwise to hold it during WaitForSuspend.
3604
3605 {
3606 /*
3607 * We change our (JDWP thread) status, which should be THREAD_RUNNING,
3608 * so we can suspend for a GC if the invoke request causes us to
3609 * run out of memory. It's also a good idea to change it before locking
3610 * the invokeReq mutex, although that should never be held for long.
3611 */
3612 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
3613
3614 VLOG(jdwp) << " Transferring control to event thread";
3615 {
3616 MutexLock mu(self, req->lock);
3617
3618 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3619 VLOG(jdwp) << " Resuming all threads";
3620 thread_list->UndoDebuggerSuspensions();
3621 } else {
3622 VLOG(jdwp) << " Resuming event thread only";
3623 thread_list->Resume(targetThread, true);
3624 }
3625
3626 // Wait for the request to finish executing.
3627 while (req->invoke_needed) {
3628 req->cond.Wait(self);
3629 }
3630 }
3631 VLOG(jdwp) << " Control has returned from event thread";
3632
3633 /* wait for thread to re-suspend itself */
3634 SuspendThread(thread_id, false /* request_suspension */);
3635 self->TransitionFromSuspendedToRunnable();
3636 }
3637
3638 /*
3639 * Suspend the threads. We waited for the target thread to suspend
3640 * itself, so all we need to do is suspend the others.
3641 *
3642 * The suspendAllThreads() call will double-suspend the event thread,
3643 * so we want to resume the target thread once to keep the books straight.
3644 */
3645 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3646 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3647 VLOG(jdwp) << " Suspending all threads";
3648 thread_list->SuspendAllForDebugger();
3649 self->TransitionFromSuspendedToRunnable();
3650 VLOG(jdwp) << " Resuming event thread to balance the count";
3651 thread_list->Resume(targetThread, true);
3652 }
3653
3654 // Copy the result.
3655 *pResultTag = req->result_tag;
3656 if (IsPrimitiveTag(req->result_tag)) {
3657 *pResultValue = req->result_value.GetJ();
3658 } else {
3659 *pResultValue = gRegistry->Add(req->result_value.GetL());
3660 }
3661 *pExceptionId = req->exception;
3662 return req->error;
3663 }
3664
ExecuteMethod(DebugInvokeReq * pReq)3665 void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3666 ScopedObjectAccess soa(Thread::Current());
3667
3668 // We can be called while an exception is pending. We need
3669 // to preserve that across the method invocation.
3670 StackHandleScope<4> hs(soa.Self());
3671 auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
3672 auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
3673 auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
3674 uint32_t old_throw_dex_pc;
3675 bool old_exception_report_flag;
3676 {
3677 ThrowLocation old_throw_location;
3678 mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
3679 old_throw_this_object.Assign(old_throw_location.GetThis());
3680 old_throw_method.Assign(old_throw_location.GetMethod());
3681 old_exception.Assign(old_exception_obj);
3682 old_throw_dex_pc = old_throw_location.GetDexPc();
3683 old_exception_report_flag = soa.Self()->IsExceptionReportedToInstrumentation();
3684 soa.Self()->ClearException();
3685 }
3686
3687 // Translate the method through the vtable, unless the debugger wants to suppress it.
3688 Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
3689 if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) {
3690 mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
3691 if (actual_method != m.Get()) {
3692 VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
3693 m.Assign(actual_method);
3694 }
3695 }
3696 VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
3697 << " receiver=" << pReq->receiver
3698 << " arg_count=" << pReq->arg_count;
3699 CHECK(m.Get() != nullptr);
3700
3701 CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
3702
3703 pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
3704 reinterpret_cast<jvalue*>(pReq->arg_values));
3705
3706 mirror::Throwable* exception = soa.Self()->GetException(NULL);
3707 soa.Self()->ClearException();
3708 pReq->exception = gRegistry->Add(exception);
3709 pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
3710 if (pReq->exception != 0) {
3711 VLOG(jdwp) << " JDWP invocation returning with exception=" << exception
3712 << " " << exception->Dump();
3713 pReq->result_value.SetJ(0);
3714 } else if (pReq->result_tag == JDWP::JT_OBJECT) {
3715 /* if no exception thrown, examine object result more closely */
3716 JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
3717 if (new_tag != pReq->result_tag) {
3718 VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
3719 pReq->result_tag = new_tag;
3720 }
3721
3722 /*
3723 * Register the object. We don't actually need an ObjectId yet,
3724 * but we do need to be sure that the GC won't move or discard the
3725 * object when we switch out of RUNNING. The ObjectId conversion
3726 * will add the object to the "do not touch" list.
3727 *
3728 * We can't use the "tracked allocation" mechanism here because
3729 * the object is going to be handed off to a different thread.
3730 */
3731 gRegistry->Add(pReq->result_value.GetL());
3732 }
3733
3734 if (old_exception.Get() != NULL) {
3735 ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
3736 old_throw_dex_pc);
3737 soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
3738 soa.Self()->SetExceptionReportedToInstrumentation(old_exception_report_flag);
3739 }
3740 }
3741
3742 /*
3743 * "request" contains a full JDWP packet, possibly with multiple chunks. We
3744 * need to process each, accumulate the replies, and ship the whole thing
3745 * back.
3746 *
3747 * Returns "true" if we have a reply. The reply buffer is newly allocated,
3748 * and includes the chunk type/length, followed by the data.
3749 *
3750 * OLD-TODO: we currently assume that the request and reply include a single
3751 * chunk. If this becomes inconvenient we will need to adapt.
3752 */
DdmHandlePacket(JDWP::Request & request,uint8_t ** pReplyBuf,int * pReplyLen)3753 bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen) {
3754 Thread* self = Thread::Current();
3755 JNIEnv* env = self->GetJniEnv();
3756
3757 uint32_t type = request.ReadUnsigned32("type");
3758 uint32_t length = request.ReadUnsigned32("length");
3759
3760 // Create a byte[] corresponding to 'request'.
3761 size_t request_length = request.size();
3762 ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
3763 if (dataArray.get() == NULL) {
3764 LOG(WARNING) << "byte[] allocation failed: " << request_length;
3765 env->ExceptionClear();
3766 return false;
3767 }
3768 env->SetByteArrayRegion(dataArray.get(), 0, request_length, reinterpret_cast<const jbyte*>(request.data()));
3769 request.Skip(request_length);
3770
3771 // Run through and find all chunks. [Currently just find the first.]
3772 ScopedByteArrayRO contents(env, dataArray.get());
3773 if (length != request_length) {
3774 LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
3775 return false;
3776 }
3777
3778 // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
3779 ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3780 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
3781 type, dataArray.get(), 0, length));
3782 if (env->ExceptionCheck()) {
3783 LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
3784 env->ExceptionDescribe();
3785 env->ExceptionClear();
3786 return false;
3787 }
3788
3789 if (chunk.get() == NULL) {
3790 return false;
3791 }
3792
3793 /*
3794 * Pull the pieces out of the chunk. We copy the results into a
3795 * newly-allocated buffer that the caller can free. We don't want to
3796 * continue using the Chunk object because nothing has a reference to it.
3797 *
3798 * We could avoid this by returning type/data/offset/length and having
3799 * the caller be aware of the object lifetime issues, but that
3800 * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
3801 * if we have responses for multiple chunks.
3802 *
3803 * So we're pretty much stuck with copying data around multiple times.
3804 */
3805 ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
3806 jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
3807 length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
3808 type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
3809
3810 VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
3811 if (length == 0 || replyData.get() == NULL) {
3812 return false;
3813 }
3814
3815 const int kChunkHdrLen = 8;
3816 uint8_t* reply = new uint8_t[length + kChunkHdrLen];
3817 if (reply == NULL) {
3818 LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
3819 return false;
3820 }
3821 JDWP::Set4BE(reply + 0, type);
3822 JDWP::Set4BE(reply + 4, length);
3823 env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
3824
3825 *pReplyBuf = reply;
3826 *pReplyLen = length + kChunkHdrLen;
3827
3828 VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
3829 return true;
3830 }
3831
DdmBroadcast(bool connect)3832 void Dbg::DdmBroadcast(bool connect) {
3833 VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
3834
3835 Thread* self = Thread::Current();
3836 if (self->GetState() != kRunnable) {
3837 LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
3838 /* try anyway? */
3839 }
3840
3841 JNIEnv* env = self->GetJniEnv();
3842 jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
3843 env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3844 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
3845 event);
3846 if (env->ExceptionCheck()) {
3847 LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
3848 env->ExceptionDescribe();
3849 env->ExceptionClear();
3850 }
3851 }
3852
DdmConnected()3853 void Dbg::DdmConnected() {
3854 Dbg::DdmBroadcast(true);
3855 }
3856
DdmDisconnected()3857 void Dbg::DdmDisconnected() {
3858 Dbg::DdmBroadcast(false);
3859 gDdmThreadNotification = false;
3860 }
3861
3862 /*
3863 * Send a notification when a thread starts, stops, or changes its name.
3864 *
3865 * Because we broadcast the full set of threads when the notifications are
3866 * first enabled, it's possible for "thread" to be actively executing.
3867 */
DdmSendThreadNotification(Thread * t,uint32_t type)3868 void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
3869 if (!gDdmThreadNotification) {
3870 return;
3871 }
3872
3873 if (type == CHUNK_TYPE("THDE")) {
3874 uint8_t buf[4];
3875 JDWP::Set4BE(&buf[0], t->GetThreadId());
3876 Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
3877 } else {
3878 CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
3879 ScopedObjectAccessUnchecked soa(Thread::Current());
3880 StackHandleScope<1> hs(soa.Self());
3881 Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
3882 size_t char_count = (name.Get() != NULL) ? name->GetLength() : 0;
3883 const jchar* chars = (name.Get() != NULL) ? name->GetCharArray()->GetData() : NULL;
3884
3885 std::vector<uint8_t> bytes;
3886 JDWP::Append4BE(bytes, t->GetThreadId());
3887 JDWP::AppendUtf16BE(bytes, chars, char_count);
3888 CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
3889 Dbg::DdmSendChunk(type, bytes);
3890 }
3891 }
3892
DdmSetThreadNotification(bool enable)3893 void Dbg::DdmSetThreadNotification(bool enable) {
3894 // Enable/disable thread notifications.
3895 gDdmThreadNotification = enable;
3896 if (enable) {
3897 // Suspend the VM then post thread start notifications for all threads. Threads attaching will
3898 // see a suspension in progress and block until that ends. They then post their own start
3899 // notification.
3900 SuspendVM();
3901 std::list<Thread*> threads;
3902 Thread* self = Thread::Current();
3903 {
3904 MutexLock mu(self, *Locks::thread_list_lock_);
3905 threads = Runtime::Current()->GetThreadList()->GetList();
3906 }
3907 {
3908 ScopedObjectAccess soa(self);
3909 for (Thread* thread : threads) {
3910 Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
3911 }
3912 }
3913 ResumeVM();
3914 }
3915 }
3916
PostThreadStartOrStop(Thread * t,uint32_t type)3917 void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
3918 if (IsDebuggerActive()) {
3919 gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
3920 }
3921 Dbg::DdmSendThreadNotification(t, type);
3922 }
3923
PostThreadStart(Thread * t)3924 void Dbg::PostThreadStart(Thread* t) {
3925 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
3926 }
3927
PostThreadDeath(Thread * t)3928 void Dbg::PostThreadDeath(Thread* t) {
3929 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
3930 }
3931
DdmSendChunk(uint32_t type,size_t byte_count,const uint8_t * buf)3932 void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
3933 CHECK(buf != NULL);
3934 iovec vec[1];
3935 vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
3936 vec[0].iov_len = byte_count;
3937 Dbg::DdmSendChunkV(type, vec, 1);
3938 }
3939
DdmSendChunk(uint32_t type,const std::vector<uint8_t> & bytes)3940 void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
3941 DdmSendChunk(type, bytes.size(), &bytes[0]);
3942 }
3943
DdmSendChunkV(uint32_t type,const iovec * iov,int iov_count)3944 void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
3945 if (gJdwpState == NULL) {
3946 VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
3947 } else {
3948 gJdwpState->DdmSendChunkV(type, iov, iov_count);
3949 }
3950 }
3951
DdmHandleHpifChunk(HpifWhen when)3952 int Dbg::DdmHandleHpifChunk(HpifWhen when) {
3953 if (when == HPIF_WHEN_NOW) {
3954 DdmSendHeapInfo(when);
3955 return true;
3956 }
3957
3958 if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
3959 LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
3960 return false;
3961 }
3962
3963 gDdmHpifWhen = when;
3964 return true;
3965 }
3966
DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when,Dbg::HpsgWhat what,bool native)3967 bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
3968 if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
3969 LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
3970 return false;
3971 }
3972
3973 if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
3974 LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
3975 return false;
3976 }
3977
3978 if (native) {
3979 gDdmNhsgWhen = when;
3980 gDdmNhsgWhat = what;
3981 } else {
3982 gDdmHpsgWhen = when;
3983 gDdmHpsgWhat = what;
3984 }
3985 return true;
3986 }
3987
DdmSendHeapInfo(HpifWhen reason)3988 void Dbg::DdmSendHeapInfo(HpifWhen reason) {
3989 // If there's a one-shot 'when', reset it.
3990 if (reason == gDdmHpifWhen) {
3991 if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
3992 gDdmHpifWhen = HPIF_WHEN_NEVER;
3993 }
3994 }
3995
3996 /*
3997 * Chunk HPIF (client --> server)
3998 *
3999 * Heap Info. General information about the heap,
4000 * suitable for a summary display.
4001 *
4002 * [u4]: number of heaps
4003 *
4004 * For each heap:
4005 * [u4]: heap ID
4006 * [u8]: timestamp in ms since Unix epoch
4007 * [u1]: capture reason (same as 'when' value from server)
4008 * [u4]: max heap size in bytes (-Xmx)
4009 * [u4]: current heap size in bytes
4010 * [u4]: current number of bytes allocated
4011 * [u4]: current number of objects allocated
4012 */
4013 uint8_t heap_count = 1;
4014 gc::Heap* heap = Runtime::Current()->GetHeap();
4015 std::vector<uint8_t> bytes;
4016 JDWP::Append4BE(bytes, heap_count);
4017 JDWP::Append4BE(bytes, 1); // Heap id (bogus; we only have one heap).
4018 JDWP::Append8BE(bytes, MilliTime());
4019 JDWP::Append1BE(bytes, reason);
4020 JDWP::Append4BE(bytes, heap->GetMaxMemory()); // Max allowed heap size in bytes.
4021 JDWP::Append4BE(bytes, heap->GetTotalMemory()); // Current heap size in bytes.
4022 JDWP::Append4BE(bytes, heap->GetBytesAllocated());
4023 JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
4024 CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
4025 Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
4026 }
4027
4028 enum HpsgSolidity {
4029 SOLIDITY_FREE = 0,
4030 SOLIDITY_HARD = 1,
4031 SOLIDITY_SOFT = 2,
4032 SOLIDITY_WEAK = 3,
4033 SOLIDITY_PHANTOM = 4,
4034 SOLIDITY_FINALIZABLE = 5,
4035 SOLIDITY_SWEEP = 6,
4036 };
4037
4038 enum HpsgKind {
4039 KIND_OBJECT = 0,
4040 KIND_CLASS_OBJECT = 1,
4041 KIND_ARRAY_1 = 2,
4042 KIND_ARRAY_2 = 3,
4043 KIND_ARRAY_4 = 4,
4044 KIND_ARRAY_8 = 5,
4045 KIND_UNKNOWN = 6,
4046 KIND_NATIVE = 7,
4047 };
4048
4049 #define HPSG_PARTIAL (1<<7)
4050 #define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
4051
4052 class HeapChunkContext {
4053 public:
4054 // Maximum chunk size. Obtain this from the formula:
4055 // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
HeapChunkContext(bool merge,bool native)4056 HeapChunkContext(bool merge, bool native)
4057 : buf_(16384 - 16),
4058 type_(0),
4059 merge_(merge),
4060 chunk_overhead_(0) {
4061 Reset();
4062 if (native) {
4063 type_ = CHUNK_TYPE("NHSG");
4064 } else {
4065 type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
4066 }
4067 }
4068
~HeapChunkContext()4069 ~HeapChunkContext() {
4070 if (p_ > &buf_[0]) {
4071 Flush();
4072 }
4073 }
4074
SetChunkOverhead(size_t chunk_overhead)4075 void SetChunkOverhead(size_t chunk_overhead) {
4076 chunk_overhead_ = chunk_overhead;
4077 }
4078
ResetStartOfNextChunk()4079 void ResetStartOfNextChunk() {
4080 startOfNextMemoryChunk_ = nullptr;
4081 }
4082
EnsureHeader(const void * chunk_ptr)4083 void EnsureHeader(const void* chunk_ptr) {
4084 if (!needHeader_) {
4085 return;
4086 }
4087
4088 // Start a new HPSx chunk.
4089 JDWP::Write4BE(&p_, 1); // Heap id (bogus; we only have one heap).
4090 JDWP::Write1BE(&p_, 8); // Size of allocation unit, in bytes.
4091
4092 JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr)); // virtual address of segment start.
4093 JDWP::Write4BE(&p_, 0); // offset of this piece (relative to the virtual address).
4094 // [u4]: length of piece, in allocation units
4095 // We won't know this until we're done, so save the offset and stuff in a dummy value.
4096 pieceLenField_ = p_;
4097 JDWP::Write4BE(&p_, 0x55555555);
4098 needHeader_ = false;
4099 }
4100
Flush()4101 void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4102 if (pieceLenField_ == NULL) {
4103 // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4104 CHECK(needHeader_);
4105 return;
4106 }
4107 // Patch the "length of piece" field.
4108 CHECK_LE(&buf_[0], pieceLenField_);
4109 CHECK_LE(pieceLenField_, p_);
4110 JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4111
4112 Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4113 Reset();
4114 }
4115
HeapChunkCallback(void * start,void * end,size_t used_bytes,void * arg)4116 static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg)
4117 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4118 Locks::mutator_lock_) {
4119 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes);
4120 }
4121
4122 private:
4123 enum { ALLOCATION_UNIT_SIZE = 8 };
4124
Reset()4125 void Reset() {
4126 p_ = &buf_[0];
4127 ResetStartOfNextChunk();
4128 totalAllocationUnits_ = 0;
4129 needHeader_ = true;
4130 pieceLenField_ = NULL;
4131 }
4132
HeapChunkCallback(void * start,void *,size_t used_bytes)4133 void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes)
4134 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4135 Locks::mutator_lock_) {
4136 // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4137 // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4138 if (used_bytes == 0) {
4139 if (start == NULL) {
4140 // Reset for start of new heap.
4141 startOfNextMemoryChunk_ = NULL;
4142 Flush();
4143 }
4144 // Only process in use memory so that free region information
4145 // also includes dlmalloc book keeping.
4146 return;
4147 }
4148
4149 /* If we're looking at the native heap, we'll just return
4150 * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
4151 */
4152 bool native = type_ == CHUNK_TYPE("NHSG");
4153
4154 // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
4155 // count gaps inbetween spaces as free memory.
4156 if (startOfNextMemoryChunk_ != NULL) {
4157 // Transmit any pending free memory. Native free memory of
4158 // over kMaxFreeLen could be because of the use of mmaps, so
4159 // don't report. If not free memory then start a new segment.
4160 bool flush = true;
4161 if (start > startOfNextMemoryChunk_) {
4162 const size_t kMaxFreeLen = 2 * kPageSize;
4163 void* freeStart = startOfNextMemoryChunk_;
4164 void* freeEnd = start;
4165 size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart);
4166 if (!native || freeLen < kMaxFreeLen) {
4167 AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen);
4168 flush = false;
4169 }
4170 }
4171 if (flush) {
4172 startOfNextMemoryChunk_ = NULL;
4173 Flush();
4174 }
4175 }
4176 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
4177
4178 // Determine the type of this chunk.
4179 // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4180 // If it's the same, we should combine them.
4181 uint8_t state = ExamineObject(obj, native);
4182 AppendChunk(state, start, used_bytes + chunk_overhead_);
4183 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4184 }
4185
AppendChunk(uint8_t state,void * ptr,size_t length)4186 void AppendChunk(uint8_t state, void* ptr, size_t length)
4187 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4188 // Make sure there's enough room left in the buffer.
4189 // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4190 // 17 bytes for any header.
4191 size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
4192 size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4193 if (bytesLeft < needed) {
4194 Flush();
4195 }
4196
4197 bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4198 if (bytesLeft < needed) {
4199 LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4200 << needed << " bytes)";
4201 return;
4202 }
4203 EnsureHeader(ptr);
4204 // Write out the chunk description.
4205 length /= ALLOCATION_UNIT_SIZE; // Convert to allocation units.
4206 totalAllocationUnits_ += length;
4207 while (length > 256) {
4208 *p_++ = state | HPSG_PARTIAL;
4209 *p_++ = 255; // length - 1
4210 length -= 256;
4211 }
4212 *p_++ = state;
4213 *p_++ = length - 1;
4214 }
4215
ExamineObject(mirror::Object * o,bool is_native_heap)4216 uint8_t ExamineObject(mirror::Object* o, bool is_native_heap)
4217 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4218 if (o == NULL) {
4219 return HPSG_STATE(SOLIDITY_FREE, 0);
4220 }
4221
4222 // It's an allocated chunk. Figure out what it is.
4223
4224 // If we're looking at the native heap, we'll just return
4225 // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
4226 if (is_native_heap) {
4227 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4228 }
4229
4230 if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
4231 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4232 }
4233
4234 mirror::Class* c = o->GetClass();
4235 if (c == NULL) {
4236 // The object was probably just created but hasn't been initialized yet.
4237 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4238 }
4239
4240 if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
4241 LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4242 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4243 }
4244
4245 if (c->IsClassClass()) {
4246 return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4247 }
4248
4249 if (c->IsArrayClass()) {
4250 if (o->IsObjectArray()) {
4251 return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4252 }
4253 switch (c->GetComponentSize()) {
4254 case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4255 case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4256 case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4257 case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4258 }
4259 }
4260
4261 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4262 }
4263
4264 std::vector<uint8_t> buf_;
4265 uint8_t* p_;
4266 uint8_t* pieceLenField_;
4267 void* startOfNextMemoryChunk_;
4268 size_t totalAllocationUnits_;
4269 uint32_t type_;
4270 bool merge_;
4271 bool needHeader_;
4272 size_t chunk_overhead_;
4273
4274 DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4275 };
4276
BumpPointerSpaceCallback(mirror::Object * obj,void * arg)4277 static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4278 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
4279 const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4280 HeapChunkContext::HeapChunkCallback(
4281 obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4282 }
4283
DdmSendHeapSegments(bool native)4284 void Dbg::DdmSendHeapSegments(bool native) {
4285 Dbg::HpsgWhen when;
4286 Dbg::HpsgWhat what;
4287 if (!native) {
4288 when = gDdmHpsgWhen;
4289 what = gDdmHpsgWhat;
4290 } else {
4291 when = gDdmNhsgWhen;
4292 what = gDdmNhsgWhat;
4293 }
4294 if (when == HPSG_WHEN_NEVER) {
4295 return;
4296 }
4297
4298 // Figure out what kind of chunks we'll be sending.
4299 CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
4300
4301 // First, send a heap start chunk.
4302 uint8_t heap_id[4];
4303 JDWP::Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap).
4304 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4305
4306 Thread* self = Thread::Current();
4307
4308 // To allow the Walk/InspectAll() below to exclusively-lock the
4309 // mutator lock, temporarily release the shared access to the
4310 // mutator lock here by transitioning to the suspended state.
4311 Locks::mutator_lock_->AssertSharedHeld(self);
4312 self->TransitionFromRunnableToSuspended(kSuspended);
4313
4314 // Send a series of heap segment chunks.
4315 HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
4316 if (native) {
4317 #ifdef USE_DLMALLOC
4318 dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
4319 #else
4320 UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
4321 #endif
4322 } else {
4323 gc::Heap* heap = Runtime::Current()->GetHeap();
4324 for (const auto& space : heap->GetContinuousSpaces()) {
4325 if (space->IsDlMallocSpace()) {
4326 // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4327 // allocation then the first sizeof(size_t) may belong to it.
4328 context.SetChunkOverhead(sizeof(size_t));
4329 space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4330 } else if (space->IsRosAllocSpace()) {
4331 context.SetChunkOverhead(0);
4332 space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4333 } else if (space->IsBumpPointerSpace()) {
4334 context.SetChunkOverhead(0);
4335 ReaderMutexLock mu(self, *Locks::mutator_lock_);
4336 WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
4337 space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4338 } else {
4339 UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4340 }
4341 context.ResetStartOfNextChunk();
4342 }
4343 // Walk the large objects, these are not in the AllocSpace.
4344 context.SetChunkOverhead(0);
4345 heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4346 }
4347
4348 // Shared-lock the mutator lock back.
4349 self->TransitionFromSuspendedToRunnable();
4350 Locks::mutator_lock_->AssertSharedHeld(self);
4351
4352 // Finally, send a heap end chunk.
4353 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4354 }
4355
GetAllocTrackerMax()4356 static size_t GetAllocTrackerMax() {
4357 #ifdef HAVE_ANDROID_OS
4358 // Check whether there's a system property overriding the number of records.
4359 const char* propertyName = "dalvik.vm.allocTrackerMax";
4360 char allocRecordMaxString[PROPERTY_VALUE_MAX];
4361 if (property_get(propertyName, allocRecordMaxString, "") > 0) {
4362 char* end;
4363 size_t value = strtoul(allocRecordMaxString, &end, 10);
4364 if (*end != '\0') {
4365 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString
4366 << "' --- invalid";
4367 return kDefaultNumAllocRecords;
4368 }
4369 if (!IsPowerOfTwo(value)) {
4370 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString
4371 << "' --- not power of two";
4372 return kDefaultNumAllocRecords;
4373 }
4374 return value;
4375 }
4376 #endif
4377 return kDefaultNumAllocRecords;
4378 }
4379
SetAllocTrackingEnabled(bool enable)4380 void Dbg::SetAllocTrackingEnabled(bool enable) {
4381 Thread* self = Thread::Current();
4382 if (enable) {
4383 {
4384 MutexLock mu(self, *Locks::alloc_tracker_lock_);
4385 if (recent_allocation_records_ != NULL) {
4386 return; // Already enabled, bail.
4387 }
4388 alloc_record_max_ = GetAllocTrackerMax();
4389 LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
4390 << kMaxAllocRecordStackDepth << " frames, taking "
4391 << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
4392 DCHECK_EQ(alloc_record_head_, 0U);
4393 DCHECK_EQ(alloc_record_count_, 0U);
4394 recent_allocation_records_ = new AllocRecord[alloc_record_max_];
4395 CHECK(recent_allocation_records_ != NULL);
4396 }
4397 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4398 } else {
4399 {
4400 ScopedObjectAccess soa(self); // For type_cache_.Clear();
4401 MutexLock mu(self, *Locks::alloc_tracker_lock_);
4402 if (recent_allocation_records_ == NULL) {
4403 return; // Already disabled, bail.
4404 }
4405 LOG(INFO) << "Disabling alloc tracker";
4406 delete[] recent_allocation_records_;
4407 recent_allocation_records_ = NULL;
4408 alloc_record_head_ = 0;
4409 alloc_record_count_ = 0;
4410 type_cache_.Clear();
4411 }
4412 // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
4413 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4414 }
4415 }
4416
4417 struct AllocRecordStackVisitor : public StackVisitor {
AllocRecordStackVisitorart::AllocRecordStackVisitor4418 AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
4419 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
4420 : StackVisitor(thread, NULL), record(record), depth(0) {}
4421
4422 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
4423 // annotalysis.
VisitFrameart::AllocRecordStackVisitor4424 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
4425 if (depth >= kMaxAllocRecordStackDepth) {
4426 return false;
4427 }
4428 mirror::ArtMethod* m = GetMethod();
4429 if (!m->IsRuntimeMethod()) {
4430 record->StackElement(depth)->SetMethod(m);
4431 record->StackElement(depth)->SetDexPc(GetDexPc());
4432 ++depth;
4433 }
4434 return true;
4435 }
4436
~AllocRecordStackVisitorart::AllocRecordStackVisitor4437 ~AllocRecordStackVisitor() {
4438 // Clear out any unused stack trace elements.
4439 for (; depth < kMaxAllocRecordStackDepth; ++depth) {
4440 record->StackElement(depth)->SetMethod(nullptr);
4441 record->StackElement(depth)->SetDexPc(0);
4442 }
4443 }
4444
4445 AllocRecord* record;
4446 size_t depth;
4447 };
4448
RecordAllocation(mirror::Class * type,size_t byte_count)4449 void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
4450 Thread* self = Thread::Current();
4451 CHECK(self != NULL);
4452
4453 MutexLock mu(self, *Locks::alloc_tracker_lock_);
4454 if (recent_allocation_records_ == NULL) {
4455 // In the process of shutting down recording, bail.
4456 return;
4457 }
4458
4459 // Advance and clip.
4460 if (++alloc_record_head_ == alloc_record_max_) {
4461 alloc_record_head_ = 0;
4462 }
4463
4464 // Fill in the basics.
4465 AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
4466 record->SetType(type);
4467 record->SetByteCount(byte_count);
4468 record->SetThinLockId(self->GetThreadId());
4469
4470 // Fill in the stack trace.
4471 AllocRecordStackVisitor visitor(self, record);
4472 visitor.WalkStack();
4473
4474 if (alloc_record_count_ < alloc_record_max_) {
4475 ++alloc_record_count_;
4476 }
4477 }
4478
4479 // Returns the index of the head element.
4480 //
4481 // We point at the most-recently-written record, so if alloc_record_count_ is 1
4482 // we want to use the current element. Take "head+1" and subtract count
4483 // from it.
4484 //
4485 // We need to handle underflow in our circular buffer, so we add
4486 // alloc_record_max_ and then mask it back down.
HeadIndex()4487 size_t Dbg::HeadIndex() {
4488 return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
4489 (Dbg::alloc_record_max_ - 1);
4490 }
4491
DumpRecentAllocations()4492 void Dbg::DumpRecentAllocations() {
4493 ScopedObjectAccess soa(Thread::Current());
4494 MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4495 if (recent_allocation_records_ == NULL) {
4496 LOG(INFO) << "Not recording tracked allocations";
4497 return;
4498 }
4499
4500 // "i" is the head of the list. We want to start at the end of the
4501 // list and move forward to the tail.
4502 size_t i = HeadIndex();
4503 const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4504 uint16_t count = capped_count;
4505
4506 LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
4507 while (count--) {
4508 AllocRecord* record = &recent_allocation_records_[i];
4509
4510 LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount())
4511 << PrettyClass(record->Type());
4512
4513 for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
4514 AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
4515 mirror::ArtMethod* m = stack_element->Method();
4516 if (m == NULL) {
4517 break;
4518 }
4519 LOG(INFO) << " " << PrettyMethod(m) << " line " << stack_element->LineNumber();
4520 }
4521
4522 // pause periodically to help logcat catch up
4523 if ((count % 5) == 0) {
4524 usleep(40000);
4525 }
4526
4527 i = (i + 1) & (alloc_record_max_ - 1);
4528 }
4529 }
4530
4531 class StringTable {
4532 public:
StringTable()4533 StringTable() {
4534 }
4535
Add(const std::string & str)4536 void Add(const std::string& str) {
4537 table_.insert(str);
4538 }
4539
Add(const char * str)4540 void Add(const char* str) {
4541 table_.insert(str);
4542 }
4543
IndexOf(const char * s) const4544 size_t IndexOf(const char* s) const {
4545 auto it = table_.find(s);
4546 if (it == table_.end()) {
4547 LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4548 }
4549 return std::distance(table_.begin(), it);
4550 }
4551
Size() const4552 size_t Size() const {
4553 return table_.size();
4554 }
4555
WriteTo(std::vector<uint8_t> & bytes) const4556 void WriteTo(std::vector<uint8_t>& bytes) const {
4557 for (const std::string& str : table_) {
4558 const char* s = str.c_str();
4559 size_t s_len = CountModifiedUtf8Chars(s);
4560 std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]);
4561 ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4562 JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4563 }
4564 }
4565
4566 private:
4567 std::set<std::string> table_;
4568 DISALLOW_COPY_AND_ASSIGN(StringTable);
4569 };
4570
GetMethodSourceFile(mirror::ArtMethod * method)4571 static const char* GetMethodSourceFile(mirror::ArtMethod* method)
4572 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4573 DCHECK(method != nullptr);
4574 const char* source_file = method->GetDeclaringClassSourceFile();
4575 return (source_file != nullptr) ? source_file : "";
4576 }
4577
4578 /*
4579 * The data we send to DDMS contains everything we have recorded.
4580 *
4581 * Message header (all values big-endian):
4582 * (1b) message header len (to allow future expansion); includes itself
4583 * (1b) entry header len
4584 * (1b) stack frame len
4585 * (2b) number of entries
4586 * (4b) offset to string table from start of message
4587 * (2b) number of class name strings
4588 * (2b) number of method name strings
4589 * (2b) number of source file name strings
4590 * For each entry:
4591 * (4b) total allocation size
4592 * (2b) thread id
4593 * (2b) allocated object's class name index
4594 * (1b) stack depth
4595 * For each stack frame:
4596 * (2b) method's class name
4597 * (2b) method name
4598 * (2b) method source file
4599 * (2b) line number, clipped to 32767; -2 if native; -1 if no source
4600 * (xb) class name strings
4601 * (xb) method name strings
4602 * (xb) source file strings
4603 *
4604 * As with other DDM traffic, strings are sent as a 4-byte length
4605 * followed by UTF-16 data.
4606 *
4607 * We send up 16-bit unsigned indexes into string tables. In theory there
4608 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4609 * each table, but in practice there should be far fewer.
4610 *
4611 * The chief reason for using a string table here is to keep the size of
4612 * the DDMS message to a minimum. This is partly to make the protocol
4613 * efficient, but also because we have to form the whole thing up all at
4614 * once in a memory buffer.
4615 *
4616 * We use separate string tables for class names, method names, and source
4617 * files to keep the indexes small. There will generally be no overlap
4618 * between the contents of these tables.
4619 */
GetRecentAllocations()4620 jbyteArray Dbg::GetRecentAllocations() {
4621 if (false) {
4622 DumpRecentAllocations();
4623 }
4624
4625 Thread* self = Thread::Current();
4626 std::vector<uint8_t> bytes;
4627 {
4628 MutexLock mu(self, *Locks::alloc_tracker_lock_);
4629 //
4630 // Part 1: generate string tables.
4631 //
4632 StringTable class_names;
4633 StringTable method_names;
4634 StringTable filenames;
4635
4636 const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4637 uint16_t count = capped_count;
4638 size_t idx = HeadIndex();
4639 while (count--) {
4640 AllocRecord* record = &recent_allocation_records_[idx];
4641 std::string temp;
4642 class_names.Add(record->Type()->GetDescriptor(&temp));
4643 for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
4644 mirror::ArtMethod* m = record->StackElement(i)->Method();
4645 if (m != NULL) {
4646 class_names.Add(m->GetDeclaringClassDescriptor());
4647 method_names.Add(m->GetName());
4648 filenames.Add(GetMethodSourceFile(m));
4649 }
4650 }
4651
4652 idx = (idx + 1) & (alloc_record_max_ - 1);
4653 }
4654
4655 LOG(INFO) << "allocation records: " << capped_count;
4656
4657 //
4658 // Part 2: Generate the output and store it in the buffer.
4659 //
4660
4661 // (1b) message header len (to allow future expansion); includes itself
4662 // (1b) entry header len
4663 // (1b) stack frame len
4664 const int kMessageHeaderLen = 15;
4665 const int kEntryHeaderLen = 9;
4666 const int kStackFrameLen = 8;
4667 JDWP::Append1BE(bytes, kMessageHeaderLen);
4668 JDWP::Append1BE(bytes, kEntryHeaderLen);
4669 JDWP::Append1BE(bytes, kStackFrameLen);
4670
4671 // (2b) number of entries
4672 // (4b) offset to string table from start of message
4673 // (2b) number of class name strings
4674 // (2b) number of method name strings
4675 // (2b) number of source file name strings
4676 JDWP::Append2BE(bytes, capped_count);
4677 size_t string_table_offset = bytes.size();
4678 JDWP::Append4BE(bytes, 0); // We'll patch this later...
4679 JDWP::Append2BE(bytes, class_names.Size());
4680 JDWP::Append2BE(bytes, method_names.Size());
4681 JDWP::Append2BE(bytes, filenames.Size());
4682
4683 idx = HeadIndex();
4684 std::string temp;
4685 for (count = capped_count; count != 0; --count) {
4686 // For each entry:
4687 // (4b) total allocation size
4688 // (2b) thread id
4689 // (2b) allocated object's class name index
4690 // (1b) stack depth
4691 AllocRecord* record = &recent_allocation_records_[idx];
4692 size_t stack_depth = record->GetDepth();
4693 size_t allocated_object_class_name_index =
4694 class_names.IndexOf(record->Type()->GetDescriptor(&temp));
4695 JDWP::Append4BE(bytes, record->ByteCount());
4696 JDWP::Append2BE(bytes, record->ThinLockId());
4697 JDWP::Append2BE(bytes, allocated_object_class_name_index);
4698 JDWP::Append1BE(bytes, stack_depth);
4699
4700 for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
4701 // For each stack frame:
4702 // (2b) method's class name
4703 // (2b) method name
4704 // (2b) method source file
4705 // (2b) line number, clipped to 32767; -2 if native; -1 if no source
4706 mirror::ArtMethod* m = record->StackElement(stack_frame)->Method();
4707 size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
4708 size_t method_name_index = method_names.IndexOf(m->GetName());
4709 size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
4710 JDWP::Append2BE(bytes, class_name_index);
4711 JDWP::Append2BE(bytes, method_name_index);
4712 JDWP::Append2BE(bytes, file_name_index);
4713 JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
4714 }
4715 idx = (idx + 1) & (alloc_record_max_ - 1);
4716 }
4717
4718 // (xb) class name strings
4719 // (xb) method name strings
4720 // (xb) source file strings
4721 JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
4722 class_names.WriteTo(bytes);
4723 method_names.WriteTo(bytes);
4724 filenames.WriteTo(bytes);
4725 }
4726 JNIEnv* env = self->GetJniEnv();
4727 jbyteArray result = env->NewByteArray(bytes.size());
4728 if (result != NULL) {
4729 env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
4730 }
4731 return result;
4732 }
4733
Method() const4734 mirror::ArtMethod* DeoptimizationRequest::Method() const {
4735 ScopedObjectAccessUnchecked soa(Thread::Current());
4736 return soa.DecodeMethod(method_);
4737 }
4738
SetMethod(mirror::ArtMethod * m)4739 void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
4740 ScopedObjectAccessUnchecked soa(Thread::Current());
4741 method_ = soa.EncodeMethod(m);
4742 }
4743
4744 } // namespace art
4745