• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "debugger.h"
18 
19 #include <sys/uio.h>
20 
21 #include <set>
22 
23 #include "arch/context.h"
24 #include "art_field-inl.h"
25 #include "art_method-inl.h"
26 #include "base/time_utils.h"
27 #include "class_linker.h"
28 #include "class_linker-inl.h"
29 #include "dex_file-inl.h"
30 #include "dex_instruction.h"
31 #include "gc/accounting/card_table-inl.h"
32 #include "gc/space/large_object_space.h"
33 #include "gc/space/space-inl.h"
34 #include "handle_scope.h"
35 #include "jdwp/jdwp_priv.h"
36 #include "jdwp/object_registry.h"
37 #include "mirror/class.h"
38 #include "mirror/class-inl.h"
39 #include "mirror/class_loader.h"
40 #include "mirror/object-inl.h"
41 #include "mirror/object_array-inl.h"
42 #include "mirror/string-inl.h"
43 #include "mirror/throwable.h"
44 #include "quick/inline_method_analyser.h"
45 #include "reflection.h"
46 #include "safe_map.h"
47 #include "scoped_thread_state_change.h"
48 #include "ScopedLocalRef.h"
49 #include "ScopedPrimitiveArray.h"
50 #include "handle_scope-inl.h"
51 #include "thread_list.h"
52 #include "utf.h"
53 #include "verifier/method_verifier-inl.h"
54 #include "well_known_classes.h"
55 
56 #ifdef HAVE_ANDROID_OS
57 #include "cutils/properties.h"
58 #endif
59 
60 namespace art {
61 
62 // The key identifying the debugger to update instrumentation.
63 static constexpr const char* kDbgInstrumentationKey = "Debugger";
64 
65 static const size_t kMaxAllocRecordStackDepth = 16;  // Max 255.
66 static const size_t kDefaultNumAllocRecords = 64*1024;  // Must be a power of 2. 2BE can hold 64k-1.
67 
68 // Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
CappedAllocRecordCount(size_t alloc_record_count)69 static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
70   if (alloc_record_count > 0xffff) {
71     return 0xffff;
72   }
73   return alloc_record_count;
74 }
75 
76 class AllocRecordStackTraceElement {
77  public:
AllocRecordStackTraceElement()78   AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {
79   }
80 
LineNumber()81   int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
82     ArtMethod* method = Method();
83     DCHECK(method != nullptr);
84     return method->GetLineNumFromDexPC(DexPc());
85   }
86 
Method()87   ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
88     ScopedObjectAccessUnchecked soa(Thread::Current());
89     return soa.DecodeMethod(method_);
90   }
91 
SetMethod(ArtMethod * m)92   void SetMethod(ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
93     ScopedObjectAccessUnchecked soa(Thread::Current());
94     method_ = soa.EncodeMethod(m);
95   }
96 
DexPc() const97   uint32_t DexPc() const {
98     return dex_pc_;
99   }
100 
SetDexPc(uint32_t pc)101   void SetDexPc(uint32_t pc) {
102     dex_pc_ = pc;
103   }
104 
105  private:
106   jmethodID method_;
107   uint32_t dex_pc_;
108 };
109 
Add(mirror::Class * t)110 jobject Dbg::TypeCache::Add(mirror::Class* t) {
111   ScopedObjectAccessUnchecked soa(Thread::Current());
112   JNIEnv* const env = soa.Env();
113   ScopedLocalRef<jobject> local_ref(soa.Env(), soa.AddLocalReference<jobject>(t));
114   const int32_t hash_code = soa.Decode<mirror::Class*>(local_ref.get())->IdentityHashCode();
115   auto range = objects_.equal_range(hash_code);
116   for (auto it = range.first; it != range.second; ++it) {
117     if (soa.Decode<mirror::Class*>(it->second) == soa.Decode<mirror::Class*>(local_ref.get())) {
118       // Found a matching weak global, return it.
119       return it->second;
120     }
121   }
122   const jobject weak_global = env->NewWeakGlobalRef(local_ref.get());
123   objects_.insert(std::make_pair(hash_code, weak_global));
124   return weak_global;
125 }
126 
Clear()127 void Dbg::TypeCache::Clear() {
128   JavaVMExt* vm = Runtime::Current()->GetJavaVM();
129   Thread* self = Thread::Current();
130   for (const auto& p : objects_) {
131     vm->DeleteWeakGlobalRef(self, p.second);
132   }
133   objects_.clear();
134 }
135 
136 class AllocRecord {
137  public:
AllocRecord()138   AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
139 
Type()140   mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
141     return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
142   }
143 
SetType(mirror::Class * t)144   void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
145                                                        Locks::alloc_tracker_lock_) {
146     type_ = Dbg::type_cache_.Add(t);
147   }
148 
GetDepth()149   size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
150     size_t depth = 0;
151     while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != nullptr) {
152       ++depth;
153     }
154     return depth;
155   }
156 
ByteCount() const157   size_t ByteCount() const {
158     return byte_count_;
159   }
160 
SetByteCount(size_t count)161   void SetByteCount(size_t count) {
162     byte_count_ = count;
163   }
164 
ThinLockId() const165   uint16_t ThinLockId() const {
166     return thin_lock_id_;
167   }
168 
SetThinLockId(uint16_t id)169   void SetThinLockId(uint16_t id) {
170     thin_lock_id_ = id;
171   }
172 
StackElement(size_t index)173   AllocRecordStackTraceElement* StackElement(size_t index) {
174     DCHECK_LT(index, kMaxAllocRecordStackDepth);
175     return &stack_[index];
176   }
177 
178  private:
179   jobject type_;  // This is a weak global.
180   size_t byte_count_;
181   uint16_t thin_lock_id_;
182   // Unused entries have null method.
183   AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];
184 };
185 
186 class Breakpoint {
187  public:
Breakpoint(ArtMethod * method,uint32_t dex_pc,DeoptimizationRequest::Kind deoptimization_kind)188   Breakpoint(ArtMethod* method, uint32_t dex_pc,
189              DeoptimizationRequest::Kind deoptimization_kind)
190     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
191     : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
192     CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
193           deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
194           deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
195     ScopedObjectAccessUnchecked soa(Thread::Current());
196     method_ = soa.EncodeMethod(method);
197   }
198 
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)199   Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
200     : method_(nullptr), dex_pc_(other.dex_pc_),
201       deoptimization_kind_(other.deoptimization_kind_) {
202     ScopedObjectAccessUnchecked soa(Thread::Current());
203     method_ = soa.EncodeMethod(other.Method());
204   }
205 
Method() const206   ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
207     ScopedObjectAccessUnchecked soa(Thread::Current());
208     return soa.DecodeMethod(method_);
209   }
210 
DexPc() const211   uint32_t DexPc() const {
212     return dex_pc_;
213   }
214 
GetDeoptimizationKind() const215   DeoptimizationRequest::Kind GetDeoptimizationKind() const {
216     return deoptimization_kind_;
217   }
218 
219  private:
220   // The location of this breakpoint.
221   jmethodID method_;
222   uint32_t dex_pc_;
223 
224   // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
225   DeoptimizationRequest::Kind deoptimization_kind_;
226 };
227 
operator <<(std::ostream & os,const Breakpoint & rhs)228 static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
229     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
230   os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
231   return os;
232 }
233 
234 class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
235  public:
DebugInstrumentationListener()236   DebugInstrumentationListener() {}
~DebugInstrumentationListener()237   virtual ~DebugInstrumentationListener() {}
238 
MethodEntered(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc)239   void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
240                      uint32_t dex_pc)
241       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
242     if (method->IsNative()) {
243       // TODO: post location events is a suspension point and native method entry stubs aren't.
244       return;
245     }
246     if (IsListeningToDexPcMoved()) {
247       // We also listen to kDexPcMoved instrumentation event so we know the DexPcMoved method is
248       // going to be called right after us. To avoid sending JDWP events twice for this location,
249       // we report the event in DexPcMoved. However, we must remind this is method entry so we
250       // send the METHOD_ENTRY event. And we can also group it with other events for this location
251       // like BREAKPOINT or SINGLE_STEP (or even METHOD_EXIT if this is a RETURN instruction).
252       thread->SetDebugMethodEntry();
253     } else if (IsListeningToMethodExit() && IsReturn(method, dex_pc)) {
254       // We also listen to kMethodExited instrumentation event and the current instruction is a
255       // RETURN so we know the MethodExited method is going to be called right after us. To avoid
256       // sending JDWP events twice for this location, we report the event(s) in MethodExited.
257       // However, we must remind this is method entry so we send the METHOD_ENTRY event. And we can
258       // also group it with other events for this location like BREAKPOINT or SINGLE_STEP.
259       thread->SetDebugMethodEntry();
260     } else {
261       Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
262     }
263   }
264 
MethodExited(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,const JValue & return_value)265   void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
266                     uint32_t dex_pc, const JValue& return_value)
267       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
268     if (method->IsNative()) {
269       // TODO: post location events is a suspension point and native method entry stubs aren't.
270       return;
271     }
272     uint32_t events = Dbg::kMethodExit;
273     if (thread->IsDebugMethodEntry()) {
274       // It is also the method entry.
275       DCHECK(IsReturn(method, dex_pc));
276       events |= Dbg::kMethodEntry;
277       thread->ClearDebugMethodEntry();
278     }
279     Dbg::UpdateDebugger(thread, this_object, method, dex_pc, events, &return_value);
280   }
281 
MethodUnwind(Thread * thread ATTRIBUTE_UNUSED,mirror::Object * this_object ATTRIBUTE_UNUSED,ArtMethod * method,uint32_t dex_pc)282   void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
283                     ArtMethod* method, uint32_t dex_pc)
284       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
285     // We're not recorded to listen to this kind of event, so complain.
286     LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
287                << " " << dex_pc;
288   }
289 
DexPcMoved(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t new_dex_pc)290   void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
291                   uint32_t new_dex_pc)
292       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
293     if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
294       // We also listen to kMethodExited instrumentation event and the current instruction is a
295       // RETURN so we know the MethodExited method is going to be called right after us. Like in
296       // MethodEntered, we delegate event reporting to MethodExited.
297       // Besides, if this RETURN instruction is the only one in the method, we can send multiple
298       // JDWP events in the same packet: METHOD_ENTRY, METHOD_EXIT, BREAKPOINT and/or SINGLE_STEP.
299       // Therefore, we must not clear the debug method entry flag here.
300     } else {
301       uint32_t events = 0;
302       if (thread->IsDebugMethodEntry()) {
303         // It is also the method entry.
304         events = Dbg::kMethodEntry;
305         thread->ClearDebugMethodEntry();
306       }
307       Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, events, nullptr);
308     }
309   }
310 
FieldRead(Thread * thread ATTRIBUTE_UNUSED,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field)311   void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
312                  ArtMethod* method, uint32_t dex_pc, ArtField* field)
313       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
314     Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
315   }
316 
FieldWritten(Thread * thread ATTRIBUTE_UNUSED,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field,const JValue & field_value)317   void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
318                     ArtMethod* method, uint32_t dex_pc, ArtField* field,
319                     const JValue& field_value)
320       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
321     Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
322   }
323 
ExceptionCaught(Thread * thread ATTRIBUTE_UNUSED,mirror::Throwable * exception_object)324   void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
325       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
326     Dbg::PostException(exception_object);
327   }
328 
329   // We only care about how many backward branches were executed in the Jit.
BackwardBranch(Thread *,ArtMethod * method,int32_t dex_pc_offset)330   void BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t dex_pc_offset)
331       OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
332     LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method)
333                << " " << dex_pc_offset;
334   }
335 
336  private:
IsReturn(ArtMethod * method,uint32_t dex_pc)337   static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
338       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
339     const DexFile::CodeItem* code_item = method->GetCodeItem();
340     const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
341     return instruction->IsReturn();
342   }
343 
IsListeningToDexPcMoved()344   static bool IsListeningToDexPcMoved() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
345     return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
346   }
347 
IsListeningToMethodExit()348   static bool IsListeningToMethodExit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
349     return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
350   }
351 
IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)352   static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
353       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
354     return (Dbg::GetInstrumentationEvents() & event) != 0;
355   }
356 
357   DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
358 } gDebugInstrumentationListener;
359 
360 // JDWP is allowed unless the Zygote forbids it.
361 static bool gJdwpAllowed = true;
362 
363 // Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
364 static bool gJdwpConfigured = false;
365 
366 // JDWP options for debugging. Only valid if IsJdwpConfigured() is true.
367 static JDWP::JdwpOptions gJdwpOptions;
368 
369 // Runtime JDWP state.
370 static JDWP::JdwpState* gJdwpState = nullptr;
371 static bool gDebuggerConnected;  // debugger or DDMS is connected.
372 
373 static bool gDdmThreadNotification = false;
374 
375 // DDMS GC-related settings.
376 static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
377 static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
378 static Dbg::HpsgWhat gDdmHpsgWhat;
379 static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
380 static Dbg::HpsgWhat gDdmNhsgWhat;
381 
382 bool Dbg::gDebuggerActive = false;
383 bool Dbg::gDisposed = false;
384 ObjectRegistry* Dbg::gRegistry = nullptr;
385 
386 // Recent allocation tracking.
387 AllocRecord* Dbg::recent_allocation_records_ = nullptr;  // TODO: CircularBuffer<AllocRecord>
388 size_t Dbg::alloc_record_max_ = 0;
389 size_t Dbg::alloc_record_head_ = 0;
390 size_t Dbg::alloc_record_count_ = 0;
391 Dbg::TypeCache Dbg::type_cache_;
392 
393 // Deoptimization support.
394 std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
395 size_t Dbg::full_deoptimization_event_count_ = 0;
396 
397 // Instrumentation event reference counters.
398 size_t Dbg::dex_pc_change_event_ref_count_ = 0;
399 size_t Dbg::method_enter_event_ref_count_ = 0;
400 size_t Dbg::method_exit_event_ref_count_ = 0;
401 size_t Dbg::field_read_event_ref_count_ = 0;
402 size_t Dbg::field_write_event_ref_count_ = 0;
403 size_t Dbg::exception_catch_event_ref_count_ = 0;
404 uint32_t Dbg::instrumentation_events_ = 0;
405 
406 // Breakpoints.
407 static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
408 
VisitRoots(RootVisitor * visitor,const RootInfo & root_info)409 void DebugInvokeReq::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
410   receiver.VisitRootIfNonNull(visitor, root_info);  // null for static method call.
411   klass.VisitRoot(visitor, root_info);
412 }
413 
AddDexPc(uint32_t dex_pc)414 void SingleStepControl::AddDexPc(uint32_t dex_pc) {
415   dex_pcs_.insert(dex_pc);
416 }
417 
ContainsDexPc(uint32_t dex_pc) const418 bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
419   return dex_pcs_.find(dex_pc) == dex_pcs_.end();
420 }
421 
IsBreakpoint(const ArtMethod * m,uint32_t dex_pc)422 static bool IsBreakpoint(const ArtMethod* m, uint32_t dex_pc)
423     LOCKS_EXCLUDED(Locks::breakpoint_lock_)
424     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
425   ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
426   for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
427     if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
428       VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
429       return true;
430     }
431   }
432   return false;
433 }
434 
IsSuspendedForDebugger(ScopedObjectAccessUnchecked & soa,Thread * thread)435 static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
436     LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
437   MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
438   // A thread may be suspended for GC; in this code, we really want to know whether
439   // there's a debugger suspension active.
440   return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
441 }
442 
DecodeNonNullArray(JDWP::RefTypeId id,JDWP::JdwpError * error)443 static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
444     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
445   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
446   if (o == nullptr) {
447     *error = JDWP::ERR_INVALID_OBJECT;
448     return nullptr;
449   }
450   if (!o->IsArrayInstance()) {
451     *error = JDWP::ERR_INVALID_ARRAY;
452     return nullptr;
453   }
454   *error = JDWP::ERR_NONE;
455   return o->AsArray();
456 }
457 
DecodeClass(JDWP::RefTypeId id,JDWP::JdwpError * error)458 static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
459     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
460   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
461   if (o == nullptr) {
462     *error = JDWP::ERR_INVALID_OBJECT;
463     return nullptr;
464   }
465   if (!o->IsClass()) {
466     *error = JDWP::ERR_INVALID_CLASS;
467     return nullptr;
468   }
469   *error = JDWP::ERR_NONE;
470   return o->AsClass();
471 }
472 
DecodeThread(ScopedObjectAccessUnchecked & soa,JDWP::ObjectId thread_id,JDWP::JdwpError * error)473 static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
474                             JDWP::JdwpError* error)
475     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
476     LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_) {
477   mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
478   if (thread_peer == nullptr) {
479     // This isn't even an object.
480     *error = JDWP::ERR_INVALID_OBJECT;
481     return nullptr;
482   }
483 
484   mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
485   if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
486     // This isn't a thread.
487     *error = JDWP::ERR_INVALID_THREAD;
488     return nullptr;
489   }
490 
491   MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
492   Thread* thread = Thread::FromManagedThread(soa, thread_peer);
493   // If thread is null then this a java.lang.Thread without a Thread*. Must be a un-started or a
494   // zombie.
495   *error = (thread == nullptr) ? JDWP::ERR_THREAD_NOT_ALIVE : JDWP::ERR_NONE;
496   return thread;
497 }
498 
BasicTagFromDescriptor(const char * descriptor)499 static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
500   // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
501   // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
502   return static_cast<JDWP::JdwpTag>(descriptor[0]);
503 }
504 
BasicTagFromClass(mirror::Class * klass)505 static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
506     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
507   std::string temp;
508   const char* descriptor = klass->GetDescriptor(&temp);
509   return BasicTagFromDescriptor(descriptor);
510 }
511 
TagFromClass(const ScopedObjectAccessUnchecked & soa,mirror::Class * c)512 static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
513     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
514   CHECK(c != nullptr);
515   if (c->IsArrayClass()) {
516     return JDWP::JT_ARRAY;
517   }
518   if (c->IsStringClass()) {
519     return JDWP::JT_STRING;
520   }
521   if (c->IsClassClass()) {
522     return JDWP::JT_CLASS_OBJECT;
523   }
524   {
525     mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
526     if (thread_class->IsAssignableFrom(c)) {
527       return JDWP::JT_THREAD;
528     }
529   }
530   {
531     mirror::Class* thread_group_class =
532         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
533     if (thread_group_class->IsAssignableFrom(c)) {
534       return JDWP::JT_THREAD_GROUP;
535     }
536   }
537   {
538     mirror::Class* class_loader_class =
539         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
540     if (class_loader_class->IsAssignableFrom(c)) {
541       return JDWP::JT_CLASS_LOADER;
542     }
543   }
544   return JDWP::JT_OBJECT;
545 }
546 
547 /*
548  * Objects declared to hold Object might actually hold a more specific
549  * type.  The debugger may take a special interest in these (e.g. it
550  * wants to display the contents of Strings), so we want to return an
551  * appropriate tag.
552  *
553  * Null objects are tagged JT_OBJECT.
554  */
TagFromObject(const ScopedObjectAccessUnchecked & soa,mirror::Object * o)555 JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) {
556   return (o == nullptr) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
557 }
558 
IsPrimitiveTag(JDWP::JdwpTag tag)559 static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
560   switch (tag) {
561   case JDWP::JT_BOOLEAN:
562   case JDWP::JT_BYTE:
563   case JDWP::JT_CHAR:
564   case JDWP::JT_FLOAT:
565   case JDWP::JT_DOUBLE:
566   case JDWP::JT_INT:
567   case JDWP::JT_LONG:
568   case JDWP::JT_SHORT:
569   case JDWP::JT_VOID:
570     return true;
571   default:
572     return false;
573   }
574 }
575 
StartJdwp()576 void Dbg::StartJdwp() {
577   if (!gJdwpAllowed || !IsJdwpConfigured()) {
578     // No JDWP for you!
579     return;
580   }
581 
582   CHECK(gRegistry == nullptr);
583   gRegistry = new ObjectRegistry;
584 
585   // Init JDWP if the debugger is enabled. This may connect out to a
586   // debugger, passively listen for a debugger, or block waiting for a
587   // debugger.
588   gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
589   if (gJdwpState == nullptr) {
590     // We probably failed because some other process has the port already, which means that
591     // if we don't abort the user is likely to think they're talking to us when they're actually
592     // talking to that other process.
593     LOG(FATAL) << "Debugger thread failed to initialize";
594   }
595 
596   // If a debugger has already attached, send the "welcome" message.
597   // This may cause us to suspend all threads.
598   if (gJdwpState->IsActive()) {
599     ScopedObjectAccess soa(Thread::Current());
600     gJdwpState->PostVMStart();
601   }
602 }
603 
StopJdwp()604 void Dbg::StopJdwp() {
605   // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
606   // destruction of gJdwpState).
607   if (gJdwpState != nullptr && gJdwpState->IsActive()) {
608     gJdwpState->PostVMDeath();
609   }
610   // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
611   Dispose();
612   delete gJdwpState;
613   gJdwpState = nullptr;
614   delete gRegistry;
615   gRegistry = nullptr;
616 }
617 
GcDidFinish()618 void Dbg::GcDidFinish() {
619   if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
620     ScopedObjectAccess soa(Thread::Current());
621     VLOG(jdwp) << "Sending heap info to DDM";
622     DdmSendHeapInfo(gDdmHpifWhen);
623   }
624   if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
625     ScopedObjectAccess soa(Thread::Current());
626     VLOG(jdwp) << "Dumping heap to DDM";
627     DdmSendHeapSegments(false);
628   }
629   if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
630     ScopedObjectAccess soa(Thread::Current());
631     VLOG(jdwp) << "Dumping native heap to DDM";
632     DdmSendHeapSegments(true);
633   }
634 }
635 
SetJdwpAllowed(bool allowed)636 void Dbg::SetJdwpAllowed(bool allowed) {
637   gJdwpAllowed = allowed;
638 }
639 
GetInvokeReq()640 DebugInvokeReq* Dbg::GetInvokeReq() {
641   return Thread::Current()->GetInvokeReq();
642 }
643 
GetDebugThread()644 Thread* Dbg::GetDebugThread() {
645   return (gJdwpState != nullptr) ? gJdwpState->GetDebugThread() : nullptr;
646 }
647 
ClearWaitForEventThread()648 void Dbg::ClearWaitForEventThread() {
649   gJdwpState->ReleaseJdwpTokenForEvent();
650 }
651 
Connected()652 void Dbg::Connected() {
653   CHECK(!gDebuggerConnected);
654   VLOG(jdwp) << "JDWP has attached";
655   gDebuggerConnected = true;
656   gDisposed = false;
657 }
658 
RequiresDeoptimization()659 bool Dbg::RequiresDeoptimization() {
660   // We don't need deoptimization if everything runs with interpreter after
661   // enabling -Xint mode.
662   return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
663 }
664 
GoActive()665 void Dbg::GoActive() {
666   // Enable all debugging features, including scans for breakpoints.
667   // This is a no-op if we're already active.
668   // Only called from the JDWP handler thread.
669   if (IsDebuggerActive()) {
670     return;
671   }
672 
673   {
674     // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
675     ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
676     CHECK_EQ(gBreakpoints.size(), 0U);
677   }
678 
679   {
680     MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
681     CHECK_EQ(deoptimization_requests_.size(), 0U);
682     CHECK_EQ(full_deoptimization_event_count_, 0U);
683     CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
684     CHECK_EQ(method_enter_event_ref_count_, 0U);
685     CHECK_EQ(method_exit_event_ref_count_, 0U);
686     CHECK_EQ(field_read_event_ref_count_, 0U);
687     CHECK_EQ(field_write_event_ref_count_, 0U);
688     CHECK_EQ(exception_catch_event_ref_count_, 0U);
689   }
690 
691   Runtime* runtime = Runtime::Current();
692   runtime->GetThreadList()->SuspendAll(__FUNCTION__);
693   Thread* self = Thread::Current();
694   ThreadState old_state = self->SetStateUnsafe(kRunnable);
695   CHECK_NE(old_state, kRunnable);
696   if (RequiresDeoptimization()) {
697     runtime->GetInstrumentation()->EnableDeoptimization();
698   }
699   instrumentation_events_ = 0;
700   gDebuggerActive = true;
701   CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
702   runtime->GetThreadList()->ResumeAll();
703 
704   LOG(INFO) << "Debugger is active";
705 }
706 
Disconnected()707 void Dbg::Disconnected() {
708   CHECK(gDebuggerConnected);
709 
710   LOG(INFO) << "Debugger is no longer active";
711 
712   // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
713   // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
714   // and clear the object registry.
715   Runtime* runtime = Runtime::Current();
716   runtime->GetThreadList()->SuspendAll(__FUNCTION__);
717   Thread* self = Thread::Current();
718   ThreadState old_state = self->SetStateUnsafe(kRunnable);
719 
720   // Debugger may not be active at this point.
721   if (IsDebuggerActive()) {
722     {
723       // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
724       // This prevents us from having any pending deoptimization request when the debugger attaches
725       // to us again while no event has been requested yet.
726       MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
727       deoptimization_requests_.clear();
728       full_deoptimization_event_count_ = 0U;
729     }
730     if (instrumentation_events_ != 0) {
731       runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
732                                                     instrumentation_events_);
733       instrumentation_events_ = 0;
734     }
735     if (RequiresDeoptimization()) {
736       runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
737     }
738     gDebuggerActive = false;
739   }
740   CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
741   runtime->GetThreadList()->ResumeAll();
742 
743   {
744     ScopedObjectAccess soa(self);
745     gRegistry->Clear();
746   }
747 
748   gDebuggerConnected = false;
749 }
750 
ConfigureJdwp(const JDWP::JdwpOptions & jdwp_options)751 void Dbg::ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options) {
752   CHECK_NE(jdwp_options.transport, JDWP::kJdwpTransportUnknown);
753   gJdwpOptions = jdwp_options;
754   gJdwpConfigured = true;
755 }
756 
IsJdwpConfigured()757 bool Dbg::IsJdwpConfigured() {
758   return gJdwpConfigured;
759 }
760 
LastDebuggerActivity()761 int64_t Dbg::LastDebuggerActivity() {
762   return gJdwpState->LastDebuggerActivity();
763 }
764 
UndoDebuggerSuspensions()765 void Dbg::UndoDebuggerSuspensions() {
766   Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
767 }
768 
GetClassName(JDWP::RefTypeId class_id)769 std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
770   JDWP::JdwpError error;
771   mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error);
772   if (o == nullptr) {
773     if (error == JDWP::ERR_NONE) {
774       return "null";
775     } else {
776       return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
777     }
778   }
779   if (!o->IsClass()) {
780     return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
781   }
782   return GetClassName(o->AsClass());
783 }
784 
GetClassName(mirror::Class * klass)785 std::string Dbg::GetClassName(mirror::Class* klass) {
786   if (klass == nullptr) {
787     return "null";
788   }
789   std::string temp;
790   return DescriptorToName(klass->GetDescriptor(&temp));
791 }
792 
GetClassObject(JDWP::RefTypeId id,JDWP::ObjectId * class_object_id)793 JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) {
794   JDWP::JdwpError status;
795   mirror::Class* c = DecodeClass(id, &status);
796   if (c == nullptr) {
797     *class_object_id = 0;
798     return status;
799   }
800   *class_object_id = gRegistry->Add(c);
801   return JDWP::ERR_NONE;
802 }
803 
GetSuperclass(JDWP::RefTypeId id,JDWP::RefTypeId * superclass_id)804 JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) {
805   JDWP::JdwpError status;
806   mirror::Class* c = DecodeClass(id, &status);
807   if (c == nullptr) {
808     *superclass_id = 0;
809     return status;
810   }
811   if (c->IsInterface()) {
812     // http://code.google.com/p/android/issues/detail?id=20856
813     *superclass_id = 0;
814   } else {
815     *superclass_id = gRegistry->Add(c->GetSuperClass());
816   }
817   return JDWP::ERR_NONE;
818 }
819 
GetClassLoader(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)820 JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
821   JDWP::JdwpError error;
822   mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
823   if (o == nullptr) {
824     return JDWP::ERR_INVALID_OBJECT;
825   }
826   expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
827   return JDWP::ERR_NONE;
828 }
829 
GetModifiers(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)830 JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
831   JDWP::JdwpError error;
832   mirror::Class* c = DecodeClass(id, &error);
833   if (c == nullptr) {
834     return error;
835   }
836 
837   uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
838 
839   // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
840   // not interfaces.
841   // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
842   if ((access_flags & kAccInterface) == 0) {
843     access_flags |= kAccSuper;
844   }
845 
846   expandBufAdd4BE(pReply, access_flags);
847 
848   return JDWP::ERR_NONE;
849 }
850 
GetMonitorInfo(JDWP::ObjectId object_id,JDWP::ExpandBuf * reply)851 JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) {
852   JDWP::JdwpError error;
853   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
854   if (o == nullptr) {
855     return JDWP::ERR_INVALID_OBJECT;
856   }
857 
858   // Ensure all threads are suspended while we read objects' lock words.
859   Thread* self = Thread::Current();
860   CHECK_EQ(self->GetState(), kRunnable);
861   self->TransitionFromRunnableToSuspended(kSuspended);
862   Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
863 
864   MonitorInfo monitor_info(o);
865 
866   Runtime::Current()->GetThreadList()->ResumeAll();
867   self->TransitionFromSuspendedToRunnable();
868 
869   if (monitor_info.owner_ != nullptr) {
870     expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
871   } else {
872     expandBufAddObjectId(reply, gRegistry->Add(nullptr));
873   }
874   expandBufAdd4BE(reply, monitor_info.entry_count_);
875   expandBufAdd4BE(reply, monitor_info.waiters_.size());
876   for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
877     expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
878   }
879   return JDWP::ERR_NONE;
880 }
881 
GetOwnedMonitors(JDWP::ObjectId thread_id,std::vector<JDWP::ObjectId> * monitors,std::vector<uint32_t> * stack_depths)882 JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
883                                       std::vector<JDWP::ObjectId>* monitors,
884                                       std::vector<uint32_t>* stack_depths) {
885   struct OwnedMonitorVisitor : public StackVisitor {
886     OwnedMonitorVisitor(Thread* thread, Context* context,
887                         std::vector<JDWP::ObjectId>* monitor_vector,
888                         std::vector<uint32_t>* stack_depth_vector)
889         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
890       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
891         current_stack_depth(0),
892         monitors(monitor_vector),
893         stack_depths(stack_depth_vector) {}
894 
895     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
896     // annotalysis.
897     bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
898       if (!GetMethod()->IsRuntimeMethod()) {
899         Monitor::VisitLocks(this, AppendOwnedMonitors, this);
900         ++current_stack_depth;
901       }
902       return true;
903     }
904 
905     static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
906         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
907       OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
908       visitor->monitors->push_back(gRegistry->Add(owned_monitor));
909       visitor->stack_depths->push_back(visitor->current_stack_depth);
910     }
911 
912     size_t current_stack_depth;
913     std::vector<JDWP::ObjectId>* const monitors;
914     std::vector<uint32_t>* const stack_depths;
915   };
916 
917   ScopedObjectAccessUnchecked soa(Thread::Current());
918   JDWP::JdwpError error;
919   Thread* thread = DecodeThread(soa, thread_id, &error);
920   if (thread == nullptr) {
921     return error;
922   }
923   if (!IsSuspendedForDebugger(soa, thread)) {
924     return JDWP::ERR_THREAD_NOT_SUSPENDED;
925   }
926   std::unique_ptr<Context> context(Context::Create());
927   OwnedMonitorVisitor visitor(thread, context.get(), monitors, stack_depths);
928   visitor.WalkStack();
929   return JDWP::ERR_NONE;
930 }
931 
GetContendedMonitor(JDWP::ObjectId thread_id,JDWP::ObjectId * contended_monitor)932 JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
933                                          JDWP::ObjectId* contended_monitor) {
934   ScopedObjectAccessUnchecked soa(Thread::Current());
935   *contended_monitor = 0;
936   JDWP::JdwpError error;
937   Thread* thread = DecodeThread(soa, thread_id, &error);
938   if (thread == nullptr) {
939     return error;
940   }
941   if (!IsSuspendedForDebugger(soa, thread)) {
942     return JDWP::ERR_THREAD_NOT_SUSPENDED;
943   }
944   mirror::Object* contended_monitor_obj = Monitor::GetContendedMonitor(thread);
945   // Add() requires the thread_list_lock_ not held to avoid the lock
946   // level violation.
947   *contended_monitor = gRegistry->Add(contended_monitor_obj);
948   return JDWP::ERR_NONE;
949 }
950 
GetInstanceCounts(const std::vector<JDWP::RefTypeId> & class_ids,std::vector<uint64_t> * counts)951 JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
952                                        std::vector<uint64_t>* counts) {
953   gc::Heap* heap = Runtime::Current()->GetHeap();
954   heap->CollectGarbage(false);
955   std::vector<mirror::Class*> classes;
956   counts->clear();
957   for (size_t i = 0; i < class_ids.size(); ++i) {
958     JDWP::JdwpError error;
959     mirror::Class* c = DecodeClass(class_ids[i], &error);
960     if (c == nullptr) {
961       return error;
962     }
963     classes.push_back(c);
964     counts->push_back(0);
965   }
966   heap->CountInstances(classes, false, &(*counts)[0]);
967   return JDWP::ERR_NONE;
968 }
969 
GetInstances(JDWP::RefTypeId class_id,int32_t max_count,std::vector<JDWP::ObjectId> * instances)970 JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
971                                   std::vector<JDWP::ObjectId>* instances) {
972   gc::Heap* heap = Runtime::Current()->GetHeap();
973   // We only want reachable instances, so do a GC.
974   heap->CollectGarbage(false);
975   JDWP::JdwpError error;
976   mirror::Class* c = DecodeClass(class_id, &error);
977   if (c == nullptr) {
978     return error;
979   }
980   std::vector<mirror::Object*> raw_instances;
981   Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
982   for (size_t i = 0; i < raw_instances.size(); ++i) {
983     instances->push_back(gRegistry->Add(raw_instances[i]));
984   }
985   return JDWP::ERR_NONE;
986 }
987 
GetReferringObjects(JDWP::ObjectId object_id,int32_t max_count,std::vector<JDWP::ObjectId> * referring_objects)988 JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
989                                          std::vector<JDWP::ObjectId>* referring_objects) {
990   gc::Heap* heap = Runtime::Current()->GetHeap();
991   heap->CollectGarbage(false);
992   JDWP::JdwpError error;
993   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
994   if (o == nullptr) {
995     return JDWP::ERR_INVALID_OBJECT;
996   }
997   std::vector<mirror::Object*> raw_instances;
998   heap->GetReferringObjects(o, max_count, raw_instances);
999   for (size_t i = 0; i < raw_instances.size(); ++i) {
1000     referring_objects->push_back(gRegistry->Add(raw_instances[i]));
1001   }
1002   return JDWP::ERR_NONE;
1003 }
1004 
DisableCollection(JDWP::ObjectId object_id)1005 JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) {
1006   JDWP::JdwpError error;
1007   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1008   if (o == nullptr) {
1009     return JDWP::ERR_INVALID_OBJECT;
1010   }
1011   gRegistry->DisableCollection(object_id);
1012   return JDWP::ERR_NONE;
1013 }
1014 
EnableCollection(JDWP::ObjectId object_id)1015 JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) {
1016   JDWP::JdwpError error;
1017   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1018   // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
1019   // also ignores these cases and never return an error. However it's not obvious why this command
1020   // should behave differently from DisableCollection and IsCollected commands. So let's be more
1021   // strict and return an error if this happens.
1022   if (o == nullptr) {
1023     return JDWP::ERR_INVALID_OBJECT;
1024   }
1025   gRegistry->EnableCollection(object_id);
1026   return JDWP::ERR_NONE;
1027 }
1028 
IsCollected(JDWP::ObjectId object_id,bool * is_collected)1029 JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool* is_collected) {
1030   *is_collected = true;
1031   if (object_id == 0) {
1032     // Null object id is invalid.
1033     return JDWP::ERR_INVALID_OBJECT;
1034   }
1035   // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
1036   // the RI seems to ignore this and assume object has been collected.
1037   JDWP::JdwpError error;
1038   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1039   if (o != nullptr) {
1040     *is_collected = gRegistry->IsCollected(object_id);
1041   }
1042   return JDWP::ERR_NONE;
1043 }
1044 
DisposeObject(JDWP::ObjectId object_id,uint32_t reference_count)1045 void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) {
1046   gRegistry->DisposeObject(object_id, reference_count);
1047 }
1048 
GetTypeTag(mirror::Class * klass)1049 JDWP::JdwpTypeTag Dbg::GetTypeTag(mirror::Class* klass) {
1050   DCHECK(klass != nullptr);
1051   if (klass->IsArrayClass()) {
1052     return JDWP::TT_ARRAY;
1053   } else if (klass->IsInterface()) {
1054     return JDWP::TT_INTERFACE;
1055   } else {
1056     return JDWP::TT_CLASS;
1057   }
1058 }
1059 
GetReflectedType(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)1060 JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1061   JDWP::JdwpError error;
1062   mirror::Class* c = DecodeClass(class_id, &error);
1063   if (c == nullptr) {
1064     return error;
1065   }
1066 
1067   JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1068   expandBufAdd1(pReply, type_tag);
1069   expandBufAddRefTypeId(pReply, class_id);
1070   return JDWP::ERR_NONE;
1071 }
1072 
GetClassList(std::vector<JDWP::RefTypeId> * classes)1073 void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) {
1074   // Get the complete list of reference classes (i.e. all classes except
1075   // the primitive types).
1076   // Returns a newly-allocated buffer full of RefTypeId values.
1077   struct ClassListCreator {
1078     explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes_in) : classes(classes_in) {
1079     }
1080 
1081     static bool Visit(mirror::Class* c, void* arg) {
1082       return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
1083     }
1084 
1085     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1086     // annotalysis.
1087     bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
1088       if (!c->IsPrimitive()) {
1089         classes->push_back(gRegistry->AddRefType(c));
1090       }
1091       return true;
1092     }
1093 
1094     std::vector<JDWP::RefTypeId>* const classes;
1095   };
1096 
1097   ClassListCreator clc(classes);
1098   Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
1099                                                                        &clc);
1100 }
1101 
GetClassInfo(JDWP::RefTypeId class_id,JDWP::JdwpTypeTag * pTypeTag,uint32_t * pStatus,std::string * pDescriptor)1102 JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1103                                   uint32_t* pStatus, std::string* pDescriptor) {
1104   JDWP::JdwpError error;
1105   mirror::Class* c = DecodeClass(class_id, &error);
1106   if (c == nullptr) {
1107     return error;
1108   }
1109 
1110   if (c->IsArrayClass()) {
1111     *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1112     *pTypeTag = JDWP::TT_ARRAY;
1113   } else {
1114     if (c->IsErroneous()) {
1115       *pStatus = JDWP::CS_ERROR;
1116     } else {
1117       *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1118     }
1119     *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1120   }
1121 
1122   if (pDescriptor != nullptr) {
1123     std::string temp;
1124     *pDescriptor = c->GetDescriptor(&temp);
1125   }
1126   return JDWP::ERR_NONE;
1127 }
1128 
FindLoadedClassBySignature(const char * descriptor,std::vector<JDWP::RefTypeId> * ids)1129 void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) {
1130   std::vector<mirror::Class*> classes;
1131   Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1132   ids->clear();
1133   for (size_t i = 0; i < classes.size(); ++i) {
1134     ids->push_back(gRegistry->Add(classes[i]));
1135   }
1136 }
1137 
GetReferenceType(JDWP::ObjectId object_id,JDWP::ExpandBuf * pReply)1138 JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) {
1139   JDWP::JdwpError error;
1140   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1141   if (o == nullptr) {
1142     return JDWP::ERR_INVALID_OBJECT;
1143   }
1144 
1145   JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1146   JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1147 
1148   expandBufAdd1(pReply, type_tag);
1149   expandBufAddRefTypeId(pReply, type_id);
1150 
1151   return JDWP::ERR_NONE;
1152 }
1153 
GetSignature(JDWP::RefTypeId class_id,std::string * signature)1154 JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1155   JDWP::JdwpError error;
1156   mirror::Class* c = DecodeClass(class_id, &error);
1157   if (c == nullptr) {
1158     return error;
1159   }
1160   std::string temp;
1161   *signature = c->GetDescriptor(&temp);
1162   return JDWP::ERR_NONE;
1163 }
1164 
GetSourceFile(JDWP::RefTypeId class_id,std::string * result)1165 JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string* result) {
1166   JDWP::JdwpError error;
1167   mirror::Class* c = DecodeClass(class_id, &error);
1168   if (c == nullptr) {
1169     return error;
1170   }
1171   const char* source_file = c->GetSourceFile();
1172   if (source_file == nullptr) {
1173     return JDWP::ERR_ABSENT_INFORMATION;
1174   }
1175   *result = source_file;
1176   return JDWP::ERR_NONE;
1177 }
1178 
GetObjectTag(JDWP::ObjectId object_id,uint8_t * tag)1179 JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) {
1180   ScopedObjectAccessUnchecked soa(Thread::Current());
1181   JDWP::JdwpError error;
1182   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1183   if (error != JDWP::ERR_NONE) {
1184     *tag = JDWP::JT_VOID;
1185     return error;
1186   }
1187   *tag = TagFromObject(soa, o);
1188   return JDWP::ERR_NONE;
1189 }
1190 
GetTagWidth(JDWP::JdwpTag tag)1191 size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1192   switch (tag) {
1193   case JDWP::JT_VOID:
1194     return 0;
1195   case JDWP::JT_BYTE:
1196   case JDWP::JT_BOOLEAN:
1197     return 1;
1198   case JDWP::JT_CHAR:
1199   case JDWP::JT_SHORT:
1200     return 2;
1201   case JDWP::JT_FLOAT:
1202   case JDWP::JT_INT:
1203     return 4;
1204   case JDWP::JT_ARRAY:
1205   case JDWP::JT_OBJECT:
1206   case JDWP::JT_STRING:
1207   case JDWP::JT_THREAD:
1208   case JDWP::JT_THREAD_GROUP:
1209   case JDWP::JT_CLASS_LOADER:
1210   case JDWP::JT_CLASS_OBJECT:
1211     return sizeof(JDWP::ObjectId);
1212   case JDWP::JT_DOUBLE:
1213   case JDWP::JT_LONG:
1214     return 8;
1215   default:
1216     LOG(FATAL) << "Unknown tag " << tag;
1217     return -1;
1218   }
1219 }
1220 
GetArrayLength(JDWP::ObjectId array_id,int32_t * length)1221 JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int32_t* length) {
1222   JDWP::JdwpError error;
1223   mirror::Array* a = DecodeNonNullArray(array_id, &error);
1224   if (a == nullptr) {
1225     return error;
1226   }
1227   *length = a->GetLength();
1228   return JDWP::ERR_NONE;
1229 }
1230 
OutputArray(JDWP::ObjectId array_id,int offset,int count,JDWP::ExpandBuf * pReply)1231 JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1232   JDWP::JdwpError error;
1233   mirror::Array* a = DecodeNonNullArray(array_id, &error);
1234   if (a == nullptr) {
1235     return error;
1236   }
1237 
1238   if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1239     LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1240     return JDWP::ERR_INVALID_LENGTH;
1241   }
1242   JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1243   expandBufAdd1(pReply, element_tag);
1244   expandBufAdd4BE(pReply, count);
1245 
1246   if (IsPrimitiveTag(element_tag)) {
1247     size_t width = GetTagWidth(element_tag);
1248     uint8_t* dst = expandBufAddSpace(pReply, count * width);
1249     if (width == 8) {
1250       const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1251       for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1252     } else if (width == 4) {
1253       const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1254       for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1255     } else if (width == 2) {
1256       const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1257       for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1258     } else {
1259       const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1260       memcpy(dst, &src[offset * width], count * width);
1261     }
1262   } else {
1263     ScopedObjectAccessUnchecked soa(Thread::Current());
1264     mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1265     for (int i = 0; i < count; ++i) {
1266       mirror::Object* element = oa->Get(offset + i);
1267       JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1268                                                         : element_tag;
1269       expandBufAdd1(pReply, specific_tag);
1270       expandBufAddObjectId(pReply, gRegistry->Add(element));
1271     }
1272   }
1273 
1274   return JDWP::ERR_NONE;
1275 }
1276 
1277 template <typename T>
CopyArrayData(mirror::Array * a,JDWP::Request * src,int offset,int count)1278 static void CopyArrayData(mirror::Array* a, JDWP::Request* src, int offset, int count)
1279     NO_THREAD_SAFETY_ANALYSIS {
1280   // TODO: fix when annotalysis correctly handles non-member functions.
1281   DCHECK(a->GetClass()->IsPrimitiveArray());
1282 
1283   T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1284   for (int i = 0; i < count; ++i) {
1285     *dst++ = src->ReadValue(sizeof(T));
1286   }
1287 }
1288 
SetArrayElements(JDWP::ObjectId array_id,int offset,int count,JDWP::Request * request)1289 JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1290                                       JDWP::Request* request) {
1291   JDWP::JdwpError error;
1292   mirror::Array* dst = DecodeNonNullArray(array_id, &error);
1293   if (dst == nullptr) {
1294     return error;
1295   }
1296 
1297   if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1298     LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1299     return JDWP::ERR_INVALID_LENGTH;
1300   }
1301   JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1302 
1303   if (IsPrimitiveTag(element_tag)) {
1304     size_t width = GetTagWidth(element_tag);
1305     if (width == 8) {
1306       CopyArrayData<uint64_t>(dst, request, offset, count);
1307     } else if (width == 4) {
1308       CopyArrayData<uint32_t>(dst, request, offset, count);
1309     } else if (width == 2) {
1310       CopyArrayData<uint16_t>(dst, request, offset, count);
1311     } else {
1312       CopyArrayData<uint8_t>(dst, request, offset, count);
1313     }
1314   } else {
1315     mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1316     for (int i = 0; i < count; ++i) {
1317       JDWP::ObjectId id = request->ReadObjectId();
1318       mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
1319       if (error != JDWP::ERR_NONE) {
1320         return error;
1321       }
1322       oa->Set<false>(offset + i, o);
1323     }
1324   }
1325 
1326   return JDWP::ERR_NONE;
1327 }
1328 
CreateString(const std::string & str,JDWP::ObjectId * new_string_id)1329 JDWP::JdwpError Dbg::CreateString(const std::string& str, JDWP::ObjectId* new_string_id) {
1330   Thread* self = Thread::Current();
1331   mirror::String* new_string = mirror::String::AllocFromModifiedUtf8(self, str.c_str());
1332   if (new_string == nullptr) {
1333     DCHECK(self->IsExceptionPending());
1334     self->ClearException();
1335     LOG(ERROR) << "Could not allocate string";
1336     *new_string_id = 0;
1337     return JDWP::ERR_OUT_OF_MEMORY;
1338   }
1339   *new_string_id = gRegistry->Add(new_string);
1340   return JDWP::ERR_NONE;
1341 }
1342 
CreateObject(JDWP::RefTypeId class_id,JDWP::ObjectId * new_object_id)1343 JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id) {
1344   JDWP::JdwpError error;
1345   mirror::Class* c = DecodeClass(class_id, &error);
1346   if (c == nullptr) {
1347     *new_object_id = 0;
1348     return error;
1349   }
1350   Thread* self = Thread::Current();
1351   mirror::Object* new_object = c->AllocObject(self);
1352   if (new_object == nullptr) {
1353     DCHECK(self->IsExceptionPending());
1354     self->ClearException();
1355     LOG(ERROR) << "Could not allocate object of type " << PrettyDescriptor(c);
1356     *new_object_id = 0;
1357     return JDWP::ERR_OUT_OF_MEMORY;
1358   }
1359   *new_object_id = gRegistry->Add(new_object);
1360   return JDWP::ERR_NONE;
1361 }
1362 
1363 /*
1364  * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1365  */
CreateArrayObject(JDWP::RefTypeId array_class_id,uint32_t length,JDWP::ObjectId * new_array_id)1366 JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1367                                        JDWP::ObjectId* new_array_id) {
1368   JDWP::JdwpError error;
1369   mirror::Class* c = DecodeClass(array_class_id, &error);
1370   if (c == nullptr) {
1371     *new_array_id = 0;
1372     return error;
1373   }
1374   Thread* self = Thread::Current();
1375   gc::Heap* heap = Runtime::Current()->GetHeap();
1376   mirror::Array* new_array = mirror::Array::Alloc<true>(self, c, length,
1377                                                         c->GetComponentSizeShift(),
1378                                                         heap->GetCurrentAllocator());
1379   if (new_array == nullptr) {
1380     DCHECK(self->IsExceptionPending());
1381     self->ClearException();
1382     LOG(ERROR) << "Could not allocate array of type " << PrettyDescriptor(c);
1383     *new_array_id = 0;
1384     return JDWP::ERR_OUT_OF_MEMORY;
1385   }
1386   *new_array_id = gRegistry->Add(new_array);
1387   return JDWP::ERR_NONE;
1388 }
1389 
ToFieldId(const ArtField * f)1390 JDWP::FieldId Dbg::ToFieldId(const ArtField* f) {
1391   return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1392 }
1393 
ToMethodId(const ArtMethod * m)1394 static JDWP::MethodId ToMethodId(const ArtMethod* m)
1395     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1396   return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1397 }
1398 
FromFieldId(JDWP::FieldId fid)1399 static ArtField* FromFieldId(JDWP::FieldId fid)
1400     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1401   return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
1402 }
1403 
FromMethodId(JDWP::MethodId mid)1404 static ArtMethod* FromMethodId(JDWP::MethodId mid)
1405     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1406   return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
1407 }
1408 
MatchThread(JDWP::ObjectId expected_thread_id,Thread * event_thread)1409 bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
1410   CHECK(event_thread != nullptr);
1411   JDWP::JdwpError error;
1412   mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(
1413       expected_thread_id, &error);
1414   return expected_thread_peer == event_thread->GetPeer();
1415 }
1416 
MatchLocation(const JDWP::JdwpLocation & expected_location,const JDWP::EventLocation & event_location)1417 bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
1418                         const JDWP::EventLocation& event_location) {
1419   if (expected_location.dex_pc != event_location.dex_pc) {
1420     return false;
1421   }
1422   ArtMethod* m = FromMethodId(expected_location.method_id);
1423   return m == event_location.method;
1424 }
1425 
MatchType(mirror::Class * event_class,JDWP::RefTypeId class_id)1426 bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) {
1427   if (event_class == nullptr) {
1428     return false;
1429   }
1430   JDWP::JdwpError error;
1431   mirror::Class* expected_class = DecodeClass(class_id, &error);
1432   CHECK(expected_class != nullptr);
1433   return expected_class->IsAssignableFrom(event_class);
1434 }
1435 
MatchField(JDWP::RefTypeId expected_type_id,JDWP::FieldId expected_field_id,ArtField * event_field)1436 bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
1437                      ArtField* event_field) {
1438   ArtField* expected_field = FromFieldId(expected_field_id);
1439   if (expected_field != event_field) {
1440     return false;
1441   }
1442   return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
1443 }
1444 
MatchInstance(JDWP::ObjectId expected_instance_id,mirror::Object * event_instance)1445 bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
1446   JDWP::JdwpError error;
1447   mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id, &error);
1448   return modifier_instance == event_instance;
1449 }
1450 
SetJdwpLocation(JDWP::JdwpLocation * location,ArtMethod * m,uint32_t dex_pc)1451 void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc)
1452     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1453     LOCKS_EXCLUDED(Locks::thread_list_lock_,
1454                    Locks::thread_suspend_count_lock_) {
1455   if (m == nullptr) {
1456     memset(location, 0, sizeof(*location));
1457   } else {
1458     mirror::Class* c = m->GetDeclaringClass();
1459     location->type_tag = GetTypeTag(c);
1460     location->class_id = gRegistry->AddRefType(c);
1461     location->method_id = ToMethodId(m);
1462     location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1463   }
1464 }
1465 
GetMethodName(JDWP::MethodId method_id)1466 std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
1467   ArtMethod* m = FromMethodId(method_id);
1468   if (m == nullptr) {
1469     return "null";
1470   }
1471   return m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
1472 }
1473 
GetFieldName(JDWP::FieldId field_id)1474 std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
1475   ArtField* f = FromFieldId(field_id);
1476   if (f == nullptr) {
1477     return "null";
1478   }
1479   return f->GetName();
1480 }
1481 
1482 /*
1483  * Augment the access flags for synthetic methods and fields by setting
1484  * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1485  * flags not specified by the Java programming language.
1486  */
MangleAccessFlags(uint32_t accessFlags)1487 static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1488   accessFlags &= kAccJavaFlagsMask;
1489   if ((accessFlags & kAccSynthetic) != 0) {
1490     accessFlags |= 0xf0000000;
1491   }
1492   return accessFlags;
1493 }
1494 
1495 /*
1496  * Circularly shifts registers so that arguments come first. Debuggers
1497  * expect slots to begin with arguments, but dex code places them at
1498  * the end.
1499  */
MangleSlot(uint16_t slot,ArtMethod * m)1500 static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
1501     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1502   const DexFile::CodeItem* code_item = m->GetCodeItem();
1503   if (code_item == nullptr) {
1504     // We should not get here for a method without code (native, proxy or abstract). Log it and
1505     // return the slot as is since all registers are arguments.
1506     LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1507     return slot;
1508   }
1509   uint16_t ins_size = code_item->ins_size_;
1510   uint16_t locals_size = code_item->registers_size_ - ins_size;
1511   if (slot >= locals_size) {
1512     return slot - locals_size;
1513   } else {
1514     return slot + ins_size;
1515   }
1516 }
1517 
1518 /*
1519  * Circularly shifts registers so that arguments come last. Reverts
1520  * slots to dex style argument placement.
1521  */
DemangleSlot(uint16_t slot,ArtMethod * m,JDWP::JdwpError * error)1522 static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
1523     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1524   const DexFile::CodeItem* code_item = m->GetCodeItem();
1525   if (code_item == nullptr) {
1526     // We should not get here for a method without code (native, proxy or abstract). Log it and
1527     // return the slot as is since all registers are arguments.
1528     LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1529     uint16_t vreg_count = ArtMethod::NumArgRegisters(m->GetShorty());
1530     if (slot < vreg_count) {
1531       *error = JDWP::ERR_NONE;
1532       return slot;
1533     }
1534   } else {
1535     if (slot < code_item->registers_size_) {
1536       uint16_t ins_size = code_item->ins_size_;
1537       uint16_t locals_size = code_item->registers_size_ - ins_size;
1538       *error = JDWP::ERR_NONE;
1539       return (slot < ins_size) ? slot + locals_size : slot - ins_size;
1540     }
1541   }
1542 
1543   // Slot is invalid in the method.
1544   LOG(ERROR) << "Invalid local slot " << slot << " for method " << PrettyMethod(m);
1545   *error = JDWP::ERR_INVALID_SLOT;
1546   return DexFile::kDexNoIndex16;
1547 }
1548 
OutputDeclaredFields(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1549 JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1550   JDWP::JdwpError error;
1551   mirror::Class* c = DecodeClass(class_id, &error);
1552   if (c == nullptr) {
1553     return error;
1554   }
1555 
1556   size_t instance_field_count = c->NumInstanceFields();
1557   size_t static_field_count = c->NumStaticFields();
1558 
1559   expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1560 
1561   for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1562     ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
1563     expandBufAddFieldId(pReply, ToFieldId(f));
1564     expandBufAddUtf8String(pReply, f->GetName());
1565     expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1566     if (with_generic) {
1567       static const char genericSignature[1] = "";
1568       expandBufAddUtf8String(pReply, genericSignature);
1569     }
1570     expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1571   }
1572   return JDWP::ERR_NONE;
1573 }
1574 
OutputDeclaredMethods(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1575 JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1576                                            JDWP::ExpandBuf* pReply) {
1577   JDWP::JdwpError error;
1578   mirror::Class* c = DecodeClass(class_id, &error);
1579   if (c == nullptr) {
1580     return error;
1581   }
1582 
1583   size_t direct_method_count = c->NumDirectMethods();
1584   size_t virtual_method_count = c->NumVirtualMethods();
1585 
1586   expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1587 
1588   auto* cl = Runtime::Current()->GetClassLinker();
1589   auto ptr_size = cl->GetImagePointerSize();
1590   for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1591     ArtMethod* m = i < direct_method_count ?
1592         c->GetDirectMethod(i, ptr_size) : c->GetVirtualMethod(i - direct_method_count, ptr_size);
1593     expandBufAddMethodId(pReply, ToMethodId(m));
1594     expandBufAddUtf8String(pReply, m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName());
1595     expandBufAddUtf8String(pReply,
1596                            m->GetInterfaceMethodIfProxy(sizeof(void*))->GetSignature().ToString());
1597     if (with_generic) {
1598       const char* generic_signature = "";
1599       expandBufAddUtf8String(pReply, generic_signature);
1600     }
1601     expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1602   }
1603   return JDWP::ERR_NONE;
1604 }
1605 
OutputDeclaredInterfaces(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)1606 JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1607   JDWP::JdwpError error;
1608   Thread* self = Thread::Current();
1609   StackHandleScope<1> hs(self);
1610   Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, &error)));
1611   if (c.Get() == nullptr) {
1612     return error;
1613   }
1614   size_t interface_count = c->NumDirectInterfaces();
1615   expandBufAdd4BE(pReply, interface_count);
1616   for (size_t i = 0; i < interface_count; ++i) {
1617     expandBufAddRefTypeId(pReply,
1618                           gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1619   }
1620   return JDWP::ERR_NONE;
1621 }
1622 
OutputLineTable(JDWP::RefTypeId,JDWP::MethodId method_id,JDWP::ExpandBuf * pReply)1623 void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) {
1624   struct DebugCallbackContext {
1625     int numItems;
1626     JDWP::ExpandBuf* pReply;
1627 
1628     static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1629       DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1630       expandBufAdd8BE(pContext->pReply, address);
1631       expandBufAdd4BE(pContext->pReply, line_number);
1632       pContext->numItems++;
1633       return false;
1634     }
1635   };
1636   ArtMethod* m = FromMethodId(method_id);
1637   const DexFile::CodeItem* code_item = m->GetCodeItem();
1638   uint64_t start, end;
1639   if (code_item == nullptr) {
1640     DCHECK(m->IsNative() || m->IsProxyMethod());
1641     start = -1;
1642     end = -1;
1643   } else {
1644     start = 0;
1645     // Return the index of the last instruction
1646     end = code_item->insns_size_in_code_units_ - 1;
1647   }
1648 
1649   expandBufAdd8BE(pReply, start);
1650   expandBufAdd8BE(pReply, end);
1651 
1652   // Add numLines later
1653   size_t numLinesOffset = expandBufGetLength(pReply);
1654   expandBufAdd4BE(pReply, 0);
1655 
1656   DebugCallbackContext context;
1657   context.numItems = 0;
1658   context.pReply = pReply;
1659 
1660   if (code_item != nullptr) {
1661     m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
1662                                      DebugCallbackContext::Callback, nullptr, &context);
1663   }
1664 
1665   JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1666 }
1667 
OutputVariableTable(JDWP::RefTypeId,JDWP::MethodId method_id,bool with_generic,JDWP::ExpandBuf * pReply)1668 void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1669                               JDWP::ExpandBuf* pReply) {
1670   struct DebugCallbackContext {
1671     ArtMethod* method;
1672     JDWP::ExpandBuf* pReply;
1673     size_t variable_count;
1674     bool with_generic;
1675 
1676     static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
1677                          const char* name, const char* descriptor, const char* signature)
1678         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1679       DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1680 
1681       VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1682                                  pContext->variable_count, startAddress, endAddress - startAddress,
1683                                  name, descriptor, signature, slot,
1684                                  MangleSlot(slot, pContext->method));
1685 
1686       slot = MangleSlot(slot, pContext->method);
1687 
1688       expandBufAdd8BE(pContext->pReply, startAddress);
1689       expandBufAddUtf8String(pContext->pReply, name);
1690       expandBufAddUtf8String(pContext->pReply, descriptor);
1691       if (pContext->with_generic) {
1692         expandBufAddUtf8String(pContext->pReply, signature);
1693       }
1694       expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1695       expandBufAdd4BE(pContext->pReply, slot);
1696 
1697       ++pContext->variable_count;
1698     }
1699   };
1700   ArtMethod* m = FromMethodId(method_id);
1701 
1702   // arg_count considers doubles and longs to take 2 units.
1703   // variable_count considers everything to take 1 unit.
1704   std::string shorty(m->GetShorty());
1705   expandBufAdd4BE(pReply, ArtMethod::NumArgRegisters(shorty));
1706 
1707   // We don't know the total number of variables yet, so leave a blank and update it later.
1708   size_t variable_count_offset = expandBufGetLength(pReply);
1709   expandBufAdd4BE(pReply, 0);
1710 
1711   DebugCallbackContext context;
1712   context.method = m;
1713   context.pReply = pReply;
1714   context.variable_count = 0;
1715   context.with_generic = with_generic;
1716 
1717   const DexFile::CodeItem* code_item = m->GetCodeItem();
1718   if (code_item != nullptr) {
1719     m->GetDexFile()->DecodeDebugInfo(
1720         code_item, m->IsStatic(), m->GetDexMethodIndex(), nullptr, DebugCallbackContext::Callback,
1721         &context);
1722   }
1723 
1724   JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1725 }
1726 
OutputMethodReturnValue(JDWP::MethodId method_id,const JValue * return_value,JDWP::ExpandBuf * pReply)1727 void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1728                                   JDWP::ExpandBuf* pReply) {
1729   ArtMethod* m = FromMethodId(method_id);
1730   JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1731   OutputJValue(tag, return_value, pReply);
1732 }
1733 
OutputFieldValue(JDWP::FieldId field_id,const JValue * field_value,JDWP::ExpandBuf * pReply)1734 void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1735                            JDWP::ExpandBuf* pReply) {
1736   ArtField* f = FromFieldId(field_id);
1737   JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1738   OutputJValue(tag, field_value, pReply);
1739 }
1740 
GetBytecodes(JDWP::RefTypeId,JDWP::MethodId method_id,std::vector<uint8_t> * bytecodes)1741 JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1742                                   std::vector<uint8_t>* bytecodes) {
1743   ArtMethod* m = FromMethodId(method_id);
1744   if (m == nullptr) {
1745     return JDWP::ERR_INVALID_METHODID;
1746   }
1747   const DexFile::CodeItem* code_item = m->GetCodeItem();
1748   size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1749   const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1750   const uint8_t* end = begin + byte_count;
1751   for (const uint8_t* p = begin; p != end; ++p) {
1752     bytecodes->push_back(*p);
1753   }
1754   return JDWP::ERR_NONE;
1755 }
1756 
GetFieldBasicTag(JDWP::FieldId field_id)1757 JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1758   return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1759 }
1760 
GetStaticFieldBasicTag(JDWP::FieldId field_id)1761 JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1762   return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1763 }
1764 
GetArtFieldValue(ArtField * f,mirror::Object * o)1765 static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
1766     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1767   Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
1768   JValue field_value;
1769   switch (fieldType) {
1770     case Primitive::kPrimBoolean:
1771       field_value.SetZ(f->GetBoolean(o));
1772       return field_value;
1773 
1774     case Primitive::kPrimByte:
1775       field_value.SetB(f->GetByte(o));
1776       return field_value;
1777 
1778     case Primitive::kPrimChar:
1779       field_value.SetC(f->GetChar(o));
1780       return field_value;
1781 
1782     case Primitive::kPrimShort:
1783       field_value.SetS(f->GetShort(o));
1784       return field_value;
1785 
1786     case Primitive::kPrimInt:
1787     case Primitive::kPrimFloat:
1788       // Int and Float must be treated as 32-bit values in JDWP.
1789       field_value.SetI(f->GetInt(o));
1790       return field_value;
1791 
1792     case Primitive::kPrimLong:
1793     case Primitive::kPrimDouble:
1794       // Long and Double must be treated as 64-bit values in JDWP.
1795       field_value.SetJ(f->GetLong(o));
1796       return field_value;
1797 
1798     case Primitive::kPrimNot:
1799       field_value.SetL(f->GetObject(o));
1800       return field_value;
1801 
1802     case Primitive::kPrimVoid:
1803       LOG(FATAL) << "Attempt to read from field of type 'void'";
1804       UNREACHABLE();
1805   }
1806   LOG(FATAL) << "Attempt to read from field of unknown type";
1807   UNREACHABLE();
1808 }
1809 
GetFieldValueImpl(JDWP::RefTypeId ref_type_id,JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply,bool is_static)1810 static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1811                                          JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1812                                          bool is_static)
1813     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1814   JDWP::JdwpError error;
1815   mirror::Class* c = DecodeClass(ref_type_id, &error);
1816   if (ref_type_id != 0 && c == nullptr) {
1817     return error;
1818   }
1819 
1820   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
1821   if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
1822     return JDWP::ERR_INVALID_OBJECT;
1823   }
1824   ArtField* f = FromFieldId(field_id);
1825 
1826   mirror::Class* receiver_class = c;
1827   if (receiver_class == nullptr && o != nullptr) {
1828     receiver_class = o->GetClass();
1829   }
1830   // TODO: should we give up now if receiver_class is null?
1831   if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1832     LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1833     return JDWP::ERR_INVALID_FIELDID;
1834   }
1835 
1836   // The RI only enforces the static/non-static mismatch in one direction.
1837   // TODO: should we change the tests and check both?
1838   if (is_static) {
1839     if (!f->IsStatic()) {
1840       return JDWP::ERR_INVALID_FIELDID;
1841     }
1842   } else {
1843     if (f->IsStatic()) {
1844       LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.GetValues"
1845                    << " on static field " << PrettyField(f);
1846     }
1847   }
1848   if (f->IsStatic()) {
1849     o = f->GetDeclaringClass();
1850   }
1851 
1852   JValue field_value(GetArtFieldValue(f, o));
1853   JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1854   Dbg::OutputJValue(tag, &field_value, pReply);
1855   return JDWP::ERR_NONE;
1856 }
1857 
GetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1858 JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1859                                    JDWP::ExpandBuf* pReply) {
1860   return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1861 }
1862 
GetStaticFieldValue(JDWP::RefTypeId ref_type_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1863 JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
1864                                          JDWP::ExpandBuf* pReply) {
1865   return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1866 }
1867 
SetArtFieldValue(ArtField * f,mirror::Object * o,uint64_t value,int width)1868 static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
1869     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1870   Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
1871   // Debugging only happens at runtime so we know we are not running in a transaction.
1872   static constexpr bool kNoTransactionMode = false;
1873   switch (fieldType) {
1874     case Primitive::kPrimBoolean:
1875       CHECK_EQ(width, 1);
1876       f->SetBoolean<kNoTransactionMode>(o, static_cast<uint8_t>(value));
1877       return JDWP::ERR_NONE;
1878 
1879     case Primitive::kPrimByte:
1880       CHECK_EQ(width, 1);
1881       f->SetByte<kNoTransactionMode>(o, static_cast<uint8_t>(value));
1882       return JDWP::ERR_NONE;
1883 
1884     case Primitive::kPrimChar:
1885       CHECK_EQ(width, 2);
1886       f->SetChar<kNoTransactionMode>(o, static_cast<uint16_t>(value));
1887       return JDWP::ERR_NONE;
1888 
1889     case Primitive::kPrimShort:
1890       CHECK_EQ(width, 2);
1891       f->SetShort<kNoTransactionMode>(o, static_cast<int16_t>(value));
1892       return JDWP::ERR_NONE;
1893 
1894     case Primitive::kPrimInt:
1895     case Primitive::kPrimFloat:
1896       CHECK_EQ(width, 4);
1897       // Int and Float must be treated as 32-bit values in JDWP.
1898       f->SetInt<kNoTransactionMode>(o, static_cast<int32_t>(value));
1899       return JDWP::ERR_NONE;
1900 
1901     case Primitive::kPrimLong:
1902     case Primitive::kPrimDouble:
1903       CHECK_EQ(width, 8);
1904       // Long and Double must be treated as 64-bit values in JDWP.
1905       f->SetLong<kNoTransactionMode>(o, value);
1906       return JDWP::ERR_NONE;
1907 
1908     case Primitive::kPrimNot: {
1909       JDWP::JdwpError error;
1910       mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
1911       if (error != JDWP::ERR_NONE) {
1912         return JDWP::ERR_INVALID_OBJECT;
1913       }
1914       if (v != nullptr) {
1915         mirror::Class* field_type;
1916         {
1917           StackHandleScope<2> hs(Thread::Current());
1918           HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1919           HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1920           field_type = f->GetType<true>();
1921         }
1922         if (!field_type->IsAssignableFrom(v->GetClass())) {
1923           return JDWP::ERR_INVALID_OBJECT;
1924         }
1925       }
1926       f->SetObject<kNoTransactionMode>(o, v);
1927       return JDWP::ERR_NONE;
1928     }
1929 
1930     case Primitive::kPrimVoid:
1931       LOG(FATAL) << "Attempt to write to field of type 'void'";
1932       UNREACHABLE();
1933   }
1934   LOG(FATAL) << "Attempt to write to field of unknown type";
1935   UNREACHABLE();
1936 }
1937 
SetFieldValueImpl(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width,bool is_static)1938 static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1939                                          uint64_t value, int width, bool is_static)
1940     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1941   JDWP::JdwpError error;
1942   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
1943   if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
1944     return JDWP::ERR_INVALID_OBJECT;
1945   }
1946   ArtField* f = FromFieldId(field_id);
1947 
1948   // The RI only enforces the static/non-static mismatch in one direction.
1949   // TODO: should we change the tests and check both?
1950   if (is_static) {
1951     if (!f->IsStatic()) {
1952       return JDWP::ERR_INVALID_FIELDID;
1953     }
1954   } else {
1955     if (f->IsStatic()) {
1956       LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues"
1957                    << " on static field " << PrettyField(f);
1958     }
1959   }
1960   if (f->IsStatic()) {
1961     o = f->GetDeclaringClass();
1962   }
1963   return SetArtFieldValue(f, o, value, width);
1964 }
1965 
SetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width)1966 JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1967                                    int width) {
1968   return SetFieldValueImpl(object_id, field_id, value, width, false);
1969 }
1970 
SetStaticFieldValue(JDWP::FieldId field_id,uint64_t value,int width)1971 JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1972   return SetFieldValueImpl(0, field_id, value, width, true);
1973 }
1974 
StringToUtf8(JDWP::ObjectId string_id,std::string * str)1975 JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
1976   JDWP::JdwpError error;
1977   mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id, &error);
1978   if (error != JDWP::ERR_NONE) {
1979     return error;
1980   }
1981   if (obj == nullptr) {
1982     return JDWP::ERR_INVALID_OBJECT;
1983   }
1984   {
1985     ScopedObjectAccessUnchecked soa(Thread::Current());
1986     mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
1987     if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
1988       // This isn't a string.
1989       return JDWP::ERR_INVALID_STRING;
1990     }
1991   }
1992   *str = obj->AsString()->ToModifiedUtf8();
1993   return JDWP::ERR_NONE;
1994 }
1995 
OutputJValue(JDWP::JdwpTag tag,const JValue * return_value,JDWP::ExpandBuf * pReply)1996 void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1997   if (IsPrimitiveTag(tag)) {
1998     expandBufAdd1(pReply, tag);
1999     if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
2000       expandBufAdd1(pReply, return_value->GetI());
2001     } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
2002       expandBufAdd2BE(pReply, return_value->GetI());
2003     } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
2004       expandBufAdd4BE(pReply, return_value->GetI());
2005     } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
2006       expandBufAdd8BE(pReply, return_value->GetJ());
2007     } else {
2008       CHECK_EQ(tag, JDWP::JT_VOID);
2009     }
2010   } else {
2011     ScopedObjectAccessUnchecked soa(Thread::Current());
2012     mirror::Object* value = return_value->GetL();
2013     expandBufAdd1(pReply, TagFromObject(soa, value));
2014     expandBufAddObjectId(pReply, gRegistry->Add(value));
2015   }
2016 }
2017 
GetThreadName(JDWP::ObjectId thread_id,std::string * name)2018 JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
2019   ScopedObjectAccessUnchecked soa(Thread::Current());
2020   JDWP::JdwpError error;
2021   Thread* thread = DecodeThread(soa, thread_id, &error);
2022   UNUSED(thread);
2023   if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
2024     return error;
2025   }
2026 
2027   // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
2028   mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
2029   CHECK(thread_object != nullptr) << error;
2030   ArtField* java_lang_Thread_name_field =
2031       soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
2032   mirror::String* s =
2033       reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
2034   if (s != nullptr) {
2035     *name = s->ToModifiedUtf8();
2036   }
2037   return JDWP::ERR_NONE;
2038 }
2039 
GetThreadGroup(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)2040 JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2041   ScopedObjectAccessUnchecked soa(Thread::Current());
2042   JDWP::JdwpError error;
2043   mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
2044   if (error != JDWP::ERR_NONE) {
2045     return JDWP::ERR_INVALID_OBJECT;
2046   }
2047   ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup");
2048   // Okay, so it's an object, but is it actually a thread?
2049   Thread* thread = DecodeThread(soa, thread_id, &error);
2050   UNUSED(thread);
2051   if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2052     // Zombie threads are in the null group.
2053     expandBufAddObjectId(pReply, JDWP::ObjectId(0));
2054     error = JDWP::ERR_NONE;
2055   } else if (error == JDWP::ERR_NONE) {
2056     mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
2057     CHECK(c != nullptr);
2058     ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2059     CHECK(f != nullptr);
2060     mirror::Object* group = f->GetObject(thread_object);
2061     CHECK(group != nullptr);
2062     JDWP::ObjectId thread_group_id = gRegistry->Add(group);
2063     expandBufAddObjectId(pReply, thread_group_id);
2064   }
2065   return error;
2066 }
2067 
DecodeThreadGroup(ScopedObjectAccessUnchecked & soa,JDWP::ObjectId thread_group_id,JDWP::JdwpError * error)2068 static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
2069                                          JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
2070     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2071   mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
2072                                                                                 error);
2073   if (*error != JDWP::ERR_NONE) {
2074     return nullptr;
2075   }
2076   if (thread_group == nullptr) {
2077     *error = JDWP::ERR_INVALID_OBJECT;
2078     return nullptr;
2079   }
2080   mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2081   CHECK(c != nullptr);
2082   if (!c->IsAssignableFrom(thread_group->GetClass())) {
2083     // This is not a java.lang.ThreadGroup.
2084     *error = JDWP::ERR_INVALID_THREAD_GROUP;
2085     return nullptr;
2086   }
2087   *error = JDWP::ERR_NONE;
2088   return thread_group;
2089 }
2090 
GetThreadGroupName(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2091 JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2092   ScopedObjectAccessUnchecked soa(Thread::Current());
2093   JDWP::JdwpError error;
2094   mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2095   if (error != JDWP::ERR_NONE) {
2096     return error;
2097   }
2098   ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupName");
2099   ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
2100   CHECK(f != nullptr);
2101   mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
2102 
2103   std::string thread_group_name(s->ToModifiedUtf8());
2104   expandBufAddUtf8String(pReply, thread_group_name);
2105   return JDWP::ERR_NONE;
2106 }
2107 
GetThreadGroupParent(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2108 JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2109   ScopedObjectAccessUnchecked soa(Thread::Current());
2110   JDWP::JdwpError error;
2111   mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2112   if (error != JDWP::ERR_NONE) {
2113     return error;
2114   }
2115   mirror::Object* parent;
2116   {
2117     ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupParent");
2118     ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
2119     CHECK(f != nullptr);
2120     parent = f->GetObject(thread_group);
2121   }
2122   JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
2123   expandBufAddObjectId(pReply, parent_group_id);
2124   return JDWP::ERR_NONE;
2125 }
2126 
GetChildThreadGroups(ScopedObjectAccessUnchecked & soa,mirror::Object * thread_group,std::vector<JDWP::ObjectId> * child_thread_group_ids)2127 static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
2128                                  std::vector<JDWP::ObjectId>* child_thread_group_ids)
2129     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2130   CHECK(thread_group != nullptr);
2131 
2132   // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
2133   ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
2134   mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
2135   {
2136     // The "groups" field is declared as a java.util.List: check it really is
2137     // an instance of java.util.ArrayList.
2138     CHECK(groups_array_list != nullptr);
2139     mirror::Class* java_util_ArrayList_class =
2140         soa.Decode<mirror::Class*>(WellKnownClasses::java_util_ArrayList);
2141     CHECK(groups_array_list->InstanceOf(java_util_ArrayList_class));
2142   }
2143 
2144   // Get the array and size out of the ArrayList<ThreadGroup>...
2145   ArtField* array_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_array);
2146   ArtField* size_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_size);
2147   mirror::ObjectArray<mirror::Object>* groups_array =
2148       array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
2149   const int32_t size = size_field->GetInt(groups_array_list);
2150 
2151   // Copy the first 'size' elements out of the array into the result.
2152   ObjectRegistry* registry = Dbg::GetObjectRegistry();
2153   for (int32_t i = 0; i < size; ++i) {
2154     child_thread_group_ids->push_back(registry->Add(groups_array->Get(i)));
2155   }
2156 }
2157 
GetThreadGroupChildren(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2158 JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
2159                                             JDWP::ExpandBuf* pReply) {
2160   ScopedObjectAccessUnchecked soa(Thread::Current());
2161   JDWP::JdwpError error;
2162   mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2163   if (error != JDWP::ERR_NONE) {
2164     return error;
2165   }
2166 
2167   // Add child threads.
2168   {
2169     std::vector<JDWP::ObjectId> child_thread_ids;
2170     GetThreads(thread_group, &child_thread_ids);
2171     expandBufAdd4BE(pReply, child_thread_ids.size());
2172     for (JDWP::ObjectId child_thread_id : child_thread_ids) {
2173       expandBufAddObjectId(pReply, child_thread_id);
2174     }
2175   }
2176 
2177   // Add child thread groups.
2178   {
2179     std::vector<JDWP::ObjectId> child_thread_groups_ids;
2180     GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
2181     expandBufAdd4BE(pReply, child_thread_groups_ids.size());
2182     for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
2183       expandBufAddObjectId(pReply, child_thread_group_id);
2184     }
2185   }
2186 
2187   return JDWP::ERR_NONE;
2188 }
2189 
GetSystemThreadGroupId()2190 JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
2191   ScopedObjectAccessUnchecked soa(Thread::Current());
2192   ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
2193   mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2194   return gRegistry->Add(group);
2195 }
2196 
ToJdwpThreadStatus(ThreadState state)2197 JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2198   switch (state) {
2199     case kBlocked:
2200       return JDWP::TS_MONITOR;
2201     case kNative:
2202     case kRunnable:
2203     case kSuspended:
2204       return JDWP::TS_RUNNING;
2205     case kSleeping:
2206       return JDWP::TS_SLEEPING;
2207     case kStarting:
2208     case kTerminated:
2209       return JDWP::TS_ZOMBIE;
2210     case kTimedWaiting:
2211     case kWaitingForCheckPointsToRun:
2212     case kWaitingForDebuggerSend:
2213     case kWaitingForDebuggerSuspension:
2214     case kWaitingForDebuggerToAttach:
2215     case kWaitingForDeoptimization:
2216     case kWaitingForGcToComplete:
2217     case kWaitingForGetObjectsAllocated:
2218     case kWaitingForJniOnLoad:
2219     case kWaitingForMethodTracingStart:
2220     case kWaitingForSignalCatcherOutput:
2221     case kWaitingForVisitObjects:
2222     case kWaitingInMainDebuggerLoop:
2223     case kWaitingInMainSignalCatcherLoop:
2224     case kWaitingPerformingGc:
2225     case kWaiting:
2226       return JDWP::TS_WAIT;
2227       // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2228   }
2229   LOG(FATAL) << "Unknown thread state: " << state;
2230   return JDWP::TS_ZOMBIE;
2231 }
2232 
GetThreadStatus(JDWP::ObjectId thread_id,JDWP::JdwpThreadStatus * pThreadStatus,JDWP::JdwpSuspendStatus * pSuspendStatus)2233 JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2234                                      JDWP::JdwpSuspendStatus* pSuspendStatus) {
2235   ScopedObjectAccess soa(Thread::Current());
2236 
2237   *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2238 
2239   JDWP::JdwpError error;
2240   Thread* thread = DecodeThread(soa, thread_id, &error);
2241   if (error != JDWP::ERR_NONE) {
2242     if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2243       *pThreadStatus = JDWP::TS_ZOMBIE;
2244       return JDWP::ERR_NONE;
2245     }
2246     return error;
2247   }
2248 
2249   if (IsSuspendedForDebugger(soa, thread)) {
2250     *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2251   }
2252 
2253   *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2254   return JDWP::ERR_NONE;
2255 }
2256 
GetThreadDebugSuspendCount(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)2257 JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2258   ScopedObjectAccess soa(Thread::Current());
2259   JDWP::JdwpError error;
2260   Thread* thread = DecodeThread(soa, thread_id, &error);
2261   if (error != JDWP::ERR_NONE) {
2262     return error;
2263   }
2264   MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2265   expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2266   return JDWP::ERR_NONE;
2267 }
2268 
Interrupt(JDWP::ObjectId thread_id)2269 JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2270   ScopedObjectAccess soa(Thread::Current());
2271   JDWP::JdwpError error;
2272   Thread* thread = DecodeThread(soa, thread_id, &error);
2273   if (error != JDWP::ERR_NONE) {
2274     return error;
2275   }
2276   thread->Interrupt(soa.Self());
2277   return JDWP::ERR_NONE;
2278 }
2279 
IsInDesiredThreadGroup(ScopedObjectAccessUnchecked & soa,mirror::Object * desired_thread_group,mirror::Object * peer)2280 static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2281                                    mirror::Object* desired_thread_group, mirror::Object* peer)
2282     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2283   // Do we want threads from all thread groups?
2284   if (desired_thread_group == nullptr) {
2285     return true;
2286   }
2287   ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2288   DCHECK(thread_group_field != nullptr);
2289   mirror::Object* group = thread_group_field->GetObject(peer);
2290   return (group == desired_thread_group);
2291 }
2292 
GetThreads(mirror::Object * thread_group,std::vector<JDWP::ObjectId> * thread_ids)2293 void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
2294   ScopedObjectAccessUnchecked soa(Thread::Current());
2295   std::list<Thread*> all_threads_list;
2296   {
2297     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2298     all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2299   }
2300   for (Thread* t : all_threads_list) {
2301     if (t == Dbg::GetDebugThread()) {
2302       // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2303       // query all threads, so it's easier if we just don't tell them about this thread.
2304       continue;
2305     }
2306     if (t->IsStillStarting()) {
2307       // This thread is being started (and has been registered in the thread list). However, it is
2308       // not completely started yet so we must ignore it.
2309       continue;
2310     }
2311     mirror::Object* peer = t->GetPeer();
2312     if (peer == nullptr) {
2313       // peer might be null if the thread is still starting up. We can't tell the debugger about
2314       // this thread yet.
2315       // TODO: if we identified threads to the debugger by their Thread*
2316       // rather than their peer's mirror::Object*, we could fix this.
2317       // Doing so might help us report ZOMBIE threads too.
2318       continue;
2319     }
2320     if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2321       thread_ids->push_back(gRegistry->Add(peer));
2322     }
2323   }
2324 }
2325 
GetStackDepth(Thread * thread)2326 static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2327   struct CountStackDepthVisitor : public StackVisitor {
2328     explicit CountStackDepthVisitor(Thread* thread_in)
2329         : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2330           depth(0) {}
2331 
2332     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2333     // annotalysis.
2334     bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2335       if (!GetMethod()->IsRuntimeMethod()) {
2336         ++depth;
2337       }
2338       return true;
2339     }
2340     size_t depth;
2341   };
2342 
2343   CountStackDepthVisitor visitor(thread);
2344   visitor.WalkStack();
2345   return visitor.depth;
2346 }
2347 
GetThreadFrameCount(JDWP::ObjectId thread_id,size_t * result)2348 JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
2349   ScopedObjectAccess soa(Thread::Current());
2350   JDWP::JdwpError error;
2351   *result = 0;
2352   Thread* thread = DecodeThread(soa, thread_id, &error);
2353   if (error != JDWP::ERR_NONE) {
2354     return error;
2355   }
2356   if (!IsSuspendedForDebugger(soa, thread)) {
2357     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2358   }
2359   *result = GetStackDepth(thread);
2360   return JDWP::ERR_NONE;
2361 }
2362 
GetThreadFrames(JDWP::ObjectId thread_id,size_t start_frame,size_t frame_count,JDWP::ExpandBuf * buf)2363 JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2364                                      size_t frame_count, JDWP::ExpandBuf* buf) {
2365   class GetFrameVisitor : public StackVisitor {
2366    public:
2367     GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
2368                     JDWP::ExpandBuf* buf_in)
2369         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2370         : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2371           depth_(0),
2372           start_frame_(start_frame_in),
2373           frame_count_(frame_count_in),
2374           buf_(buf_in) {
2375       expandBufAdd4BE(buf_, frame_count_);
2376     }
2377 
2378     bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2379       if (GetMethod()->IsRuntimeMethod()) {
2380         return true;  // The debugger can't do anything useful with a frame that has no Method*.
2381       }
2382       if (depth_ >= start_frame_ + frame_count_) {
2383         return false;
2384       }
2385       if (depth_ >= start_frame_) {
2386         JDWP::FrameId frame_id(GetFrameId());
2387         JDWP::JdwpLocation location;
2388         SetJdwpLocation(&location, GetMethod(), GetDexPc());
2389         VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2390         expandBufAdd8BE(buf_, frame_id);
2391         expandBufAddLocation(buf_, location);
2392       }
2393       ++depth_;
2394       return true;
2395     }
2396 
2397    private:
2398     size_t depth_;
2399     const size_t start_frame_;
2400     const size_t frame_count_;
2401     JDWP::ExpandBuf* buf_;
2402   };
2403 
2404   ScopedObjectAccessUnchecked soa(Thread::Current());
2405   JDWP::JdwpError error;
2406   Thread* thread = DecodeThread(soa, thread_id, &error);
2407   if (error != JDWP::ERR_NONE) {
2408     return error;
2409   }
2410   if (!IsSuspendedForDebugger(soa, thread)) {
2411     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2412   }
2413   GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2414   visitor.WalkStack();
2415   return JDWP::ERR_NONE;
2416 }
2417 
GetThreadSelfId()2418 JDWP::ObjectId Dbg::GetThreadSelfId() {
2419   return GetThreadId(Thread::Current());
2420 }
2421 
GetThreadId(Thread * thread)2422 JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
2423   ScopedObjectAccessUnchecked soa(Thread::Current());
2424   return gRegistry->Add(thread->GetPeer());
2425 }
2426 
SuspendVM()2427 void Dbg::SuspendVM() {
2428   Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2429 }
2430 
ResumeVM()2431 void Dbg::ResumeVM() {
2432   Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
2433 }
2434 
SuspendThread(JDWP::ObjectId thread_id,bool request_suspension)2435 JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2436   Thread* self = Thread::Current();
2437   ScopedLocalRef<jobject> peer(self->GetJniEnv(), nullptr);
2438   {
2439     ScopedObjectAccess soa(self);
2440     JDWP::JdwpError error;
2441     peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id, &error)));
2442   }
2443   if (peer.get() == nullptr) {
2444     return JDWP::ERR_THREAD_NOT_ALIVE;
2445   }
2446   // Suspend thread to build stack trace.
2447   bool timed_out;
2448   ThreadList* thread_list = Runtime::Current()->GetThreadList();
2449   Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2450                                                     &timed_out);
2451   if (thread != nullptr) {
2452     return JDWP::ERR_NONE;
2453   } else if (timed_out) {
2454     return JDWP::ERR_INTERNAL;
2455   } else {
2456     return JDWP::ERR_THREAD_NOT_ALIVE;
2457   }
2458 }
2459 
ResumeThread(JDWP::ObjectId thread_id)2460 void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2461   ScopedObjectAccessUnchecked soa(Thread::Current());
2462   JDWP::JdwpError error;
2463   mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id, &error);
2464   CHECK(peer != nullptr) << error;
2465   Thread* thread;
2466   {
2467     MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2468     thread = Thread::FromManagedThread(soa, peer);
2469   }
2470   if (thread == nullptr) {
2471     LOG(WARNING) << "No such thread for resume: " << peer;
2472     return;
2473   }
2474   bool needs_resume;
2475   {
2476     MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2477     needs_resume = thread->GetSuspendCount() > 0;
2478   }
2479   if (needs_resume) {
2480     Runtime::Current()->GetThreadList()->Resume(thread, true);
2481   }
2482 }
2483 
SuspendSelf()2484 void Dbg::SuspendSelf() {
2485   Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2486 }
2487 
2488 struct GetThisVisitor : public StackVisitor {
GetThisVisitorart::GetThisVisitor2489   GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
2490       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2491       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2492         this_object(nullptr),
2493         frame_id(frame_id_in) {}
2494 
2495   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2496   // annotalysis.
VisitFrameart::GetThisVisitor2497   virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2498     if (frame_id != GetFrameId()) {
2499       return true;  // continue
2500     } else {
2501       this_object = GetThisObject();
2502       return false;
2503     }
2504   }
2505 
2506   mirror::Object* this_object;
2507   JDWP::FrameId frame_id;
2508 };
2509 
GetThisObject(JDWP::ObjectId thread_id,JDWP::FrameId frame_id,JDWP::ObjectId * result)2510 JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2511                                    JDWP::ObjectId* result) {
2512   ScopedObjectAccessUnchecked soa(Thread::Current());
2513   JDWP::JdwpError error;
2514   Thread* thread = DecodeThread(soa, thread_id, &error);
2515   if (error != JDWP::ERR_NONE) {
2516     return error;
2517   }
2518   if (!IsSuspendedForDebugger(soa, thread)) {
2519     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2520   }
2521   std::unique_ptr<Context> context(Context::Create());
2522   GetThisVisitor visitor(thread, context.get(), frame_id);
2523   visitor.WalkStack();
2524   *result = gRegistry->Add(visitor.this_object);
2525   return JDWP::ERR_NONE;
2526 }
2527 
2528 // Walks the stack until we find the frame with the given FrameId.
2529 class FindFrameVisitor FINAL : public StackVisitor {
2530  public:
FindFrameVisitor(Thread * thread,Context * context,JDWP::FrameId frame_id)2531   FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2532       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2533       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2534         frame_id_(frame_id),
2535         error_(JDWP::ERR_INVALID_FRAMEID) {}
2536 
2537   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2538   // annotalysis.
VisitFrame()2539   bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2540     if (GetFrameId() != frame_id_) {
2541       return true;  // Not our frame, carry on.
2542     }
2543     ArtMethod* m = GetMethod();
2544     if (m->IsNative()) {
2545       // We can't read/write local value from/into native method.
2546       error_ = JDWP::ERR_OPAQUE_FRAME;
2547     } else {
2548       // We found our frame.
2549       error_ = JDWP::ERR_NONE;
2550     }
2551     return false;
2552   }
2553 
GetError() const2554   JDWP::JdwpError GetError() const {
2555     return error_;
2556   }
2557 
2558  private:
2559   const JDWP::FrameId frame_id_;
2560   JDWP::JdwpError error_;
2561 };
2562 
GetLocalValues(JDWP::Request * request,JDWP::ExpandBuf * pReply)2563 JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
2564   JDWP::ObjectId thread_id = request->ReadThreadId();
2565   JDWP::FrameId frame_id = request->ReadFrameId();
2566 
2567   ScopedObjectAccessUnchecked soa(Thread::Current());
2568   JDWP::JdwpError error;
2569   Thread* thread = DecodeThread(soa, thread_id, &error);
2570   if (error != JDWP::ERR_NONE) {
2571     return error;
2572   }
2573   if (!IsSuspendedForDebugger(soa, thread)) {
2574     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2575   }
2576   // Find the frame with the given frame_id.
2577   std::unique_ptr<Context> context(Context::Create());
2578   FindFrameVisitor visitor(thread, context.get(), frame_id);
2579   visitor.WalkStack();
2580   if (visitor.GetError() != JDWP::ERR_NONE) {
2581     return visitor.GetError();
2582   }
2583 
2584   // Read the values from visitor's context.
2585   int32_t slot_count = request->ReadSigned32("slot count");
2586   expandBufAdd4BE(pReply, slot_count);     /* "int values" */
2587   for (int32_t i = 0; i < slot_count; ++i) {
2588     uint32_t slot = request->ReadUnsigned32("slot");
2589     JDWP::JdwpTag reqSigByte = request->ReadTag();
2590 
2591     VLOG(jdwp) << "    --> slot " << slot << " " << reqSigByte;
2592 
2593     size_t width = Dbg::GetTagWidth(reqSigByte);
2594     uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
2595     error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
2596     if (error != JDWP::ERR_NONE) {
2597       return error;
2598     }
2599   }
2600   return JDWP::ERR_NONE;
2601 }
2602 
2603 constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
2604 
GetStackContextAsString(const StackVisitor & visitor)2605 static std::string GetStackContextAsString(const StackVisitor& visitor)
2606     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2607   return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
2608                       PrettyMethod(visitor.GetMethod()).c_str());
2609 }
2610 
FailGetLocalValue(const StackVisitor & visitor,uint16_t vreg,JDWP::JdwpTag tag)2611 static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
2612                                          JDWP::JdwpTag tag)
2613     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2614   LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
2615              << GetStackContextAsString(visitor);
2616   return kStackFrameLocalAccessError;
2617 }
2618 
GetLocalValue(const StackVisitor & visitor,ScopedObjectAccessUnchecked & soa,int slot,JDWP::JdwpTag tag,uint8_t * buf,size_t width)2619 JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa,
2620                                    int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2621   ArtMethod* m = visitor.GetMethod();
2622   JDWP::JdwpError error = JDWP::ERR_NONE;
2623   uint16_t vreg = DemangleSlot(slot, m, &error);
2624   if (error != JDWP::ERR_NONE) {
2625     return error;
2626   }
2627   // TODO: check that the tag is compatible with the actual type of the slot!
2628   switch (tag) {
2629     case JDWP::JT_BOOLEAN: {
2630       CHECK_EQ(width, 1U);
2631       uint32_t intVal;
2632       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2633         return FailGetLocalValue(visitor, vreg, tag);
2634       }
2635       VLOG(jdwp) << "get boolean local " << vreg << " = " << intVal;
2636       JDWP::Set1(buf + 1, intVal != 0);
2637       break;
2638     }
2639     case JDWP::JT_BYTE: {
2640       CHECK_EQ(width, 1U);
2641       uint32_t intVal;
2642       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2643         return FailGetLocalValue(visitor, vreg, tag);
2644       }
2645       VLOG(jdwp) << "get byte local " << vreg << " = " << intVal;
2646       JDWP::Set1(buf + 1, intVal);
2647       break;
2648     }
2649     case JDWP::JT_SHORT:
2650     case JDWP::JT_CHAR: {
2651       CHECK_EQ(width, 2U);
2652       uint32_t intVal;
2653       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2654         return FailGetLocalValue(visitor, vreg, tag);
2655       }
2656       VLOG(jdwp) << "get short/char local " << vreg << " = " << intVal;
2657       JDWP::Set2BE(buf + 1, intVal);
2658       break;
2659     }
2660     case JDWP::JT_INT: {
2661       CHECK_EQ(width, 4U);
2662       uint32_t intVal;
2663       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2664         return FailGetLocalValue(visitor, vreg, tag);
2665       }
2666       VLOG(jdwp) << "get int local " << vreg << " = " << intVal;
2667       JDWP::Set4BE(buf + 1, intVal);
2668       break;
2669     }
2670     case JDWP::JT_FLOAT: {
2671       CHECK_EQ(width, 4U);
2672       uint32_t intVal;
2673       if (!visitor.GetVReg(m, vreg, kFloatVReg, &intVal)) {
2674         return FailGetLocalValue(visitor, vreg, tag);
2675       }
2676       VLOG(jdwp) << "get float local " << vreg << " = " << intVal;
2677       JDWP::Set4BE(buf + 1, intVal);
2678       break;
2679     }
2680     case JDWP::JT_ARRAY:
2681     case JDWP::JT_CLASS_LOADER:
2682     case JDWP::JT_CLASS_OBJECT:
2683     case JDWP::JT_OBJECT:
2684     case JDWP::JT_STRING:
2685     case JDWP::JT_THREAD:
2686     case JDWP::JT_THREAD_GROUP: {
2687       CHECK_EQ(width, sizeof(JDWP::ObjectId));
2688       uint32_t intVal;
2689       if (!visitor.GetVReg(m, vreg, kReferenceVReg, &intVal)) {
2690         return FailGetLocalValue(visitor, vreg, tag);
2691       }
2692       mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2693       VLOG(jdwp) << "get " << tag << " object local " << vreg << " = " << o;
2694       if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2695         LOG(FATAL) << StringPrintf("Found invalid object %#" PRIxPTR " in register v%u",
2696                                    reinterpret_cast<uintptr_t>(o), vreg)
2697                                    << GetStackContextAsString(visitor);
2698         UNREACHABLE();
2699       }
2700       tag = TagFromObject(soa, o);
2701       JDWP::SetObjectId(buf + 1, gRegistry->Add(o));
2702       break;
2703     }
2704     case JDWP::JT_DOUBLE: {
2705       CHECK_EQ(width, 8U);
2706       uint64_t longVal;
2707       if (!visitor.GetVRegPair(m, vreg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2708         return FailGetLocalValue(visitor, vreg, tag);
2709       }
2710       VLOG(jdwp) << "get double local " << vreg << " = " << longVal;
2711       JDWP::Set8BE(buf + 1, longVal);
2712       break;
2713     }
2714     case JDWP::JT_LONG: {
2715       CHECK_EQ(width, 8U);
2716       uint64_t longVal;
2717       if (!visitor.GetVRegPair(m, vreg, kLongLoVReg, kLongHiVReg, &longVal)) {
2718         return FailGetLocalValue(visitor, vreg, tag);
2719       }
2720       VLOG(jdwp) << "get long local " << vreg << " = " << longVal;
2721       JDWP::Set8BE(buf + 1, longVal);
2722       break;
2723     }
2724     default:
2725       LOG(FATAL) << "Unknown tag " << tag;
2726       UNREACHABLE();
2727   }
2728 
2729   // Prepend tag, which may have been updated.
2730   JDWP::Set1(buf, tag);
2731   return JDWP::ERR_NONE;
2732 }
2733 
SetLocalValues(JDWP::Request * request)2734 JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) {
2735   JDWP::ObjectId thread_id = request->ReadThreadId();
2736   JDWP::FrameId frame_id = request->ReadFrameId();
2737 
2738   ScopedObjectAccessUnchecked soa(Thread::Current());
2739   JDWP::JdwpError error;
2740   Thread* thread = DecodeThread(soa, thread_id, &error);
2741   if (error != JDWP::ERR_NONE) {
2742     return error;
2743   }
2744   if (!IsSuspendedForDebugger(soa, thread)) {
2745     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2746   }
2747   // Find the frame with the given frame_id.
2748   std::unique_ptr<Context> context(Context::Create());
2749   FindFrameVisitor visitor(thread, context.get(), frame_id);
2750   visitor.WalkStack();
2751   if (visitor.GetError() != JDWP::ERR_NONE) {
2752     return visitor.GetError();
2753   }
2754 
2755   // Writes the values into visitor's context.
2756   int32_t slot_count = request->ReadSigned32("slot count");
2757   for (int32_t i = 0; i < slot_count; ++i) {
2758     uint32_t slot = request->ReadUnsigned32("slot");
2759     JDWP::JdwpTag sigByte = request->ReadTag();
2760     size_t width = Dbg::GetTagWidth(sigByte);
2761     uint64_t value = request->ReadValue(width);
2762 
2763     VLOG(jdwp) << "    --> slot " << slot << " " << sigByte << " " << value;
2764     error = Dbg::SetLocalValue(visitor, slot, sigByte, value, width);
2765     if (error != JDWP::ERR_NONE) {
2766       return error;
2767     }
2768   }
2769   return JDWP::ERR_NONE;
2770 }
2771 
2772 template<typename T>
FailSetLocalValue(const StackVisitor & visitor,uint16_t vreg,JDWP::JdwpTag tag,T value)2773 static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
2774                                          JDWP::JdwpTag tag, T value)
2775     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2776   LOG(ERROR) << "Failed to write " << tag << " local " << value
2777              << " (0x" << std::hex << value << ") into register v" << vreg
2778              << GetStackContextAsString(visitor);
2779   return kStackFrameLocalAccessError;
2780 }
2781 
SetLocalValue(StackVisitor & visitor,int slot,JDWP::JdwpTag tag,uint64_t value,size_t width)2782 JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag,
2783                                    uint64_t value, size_t width) {
2784   ArtMethod* m = visitor.GetMethod();
2785   JDWP::JdwpError error = JDWP::ERR_NONE;
2786   uint16_t vreg = DemangleSlot(slot, m, &error);
2787   if (error != JDWP::ERR_NONE) {
2788     return error;
2789   }
2790   // TODO: check that the tag is compatible with the actual type of the slot!
2791   switch (tag) {
2792     case JDWP::JT_BOOLEAN:
2793     case JDWP::JT_BYTE:
2794       CHECK_EQ(width, 1U);
2795       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2796         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2797       }
2798       break;
2799     case JDWP::JT_SHORT:
2800     case JDWP::JT_CHAR:
2801       CHECK_EQ(width, 2U);
2802       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2803         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2804       }
2805       break;
2806     case JDWP::JT_INT:
2807       CHECK_EQ(width, 4U);
2808       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2809         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2810       }
2811       break;
2812     case JDWP::JT_FLOAT:
2813       CHECK_EQ(width, 4U);
2814       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kFloatVReg)) {
2815         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2816       }
2817       break;
2818     case JDWP::JT_ARRAY:
2819     case JDWP::JT_CLASS_LOADER:
2820     case JDWP::JT_CLASS_OBJECT:
2821     case JDWP::JT_OBJECT:
2822     case JDWP::JT_STRING:
2823     case JDWP::JT_THREAD:
2824     case JDWP::JT_THREAD_GROUP: {
2825       CHECK_EQ(width, sizeof(JDWP::ObjectId));
2826       mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value),
2827                                                           &error);
2828       if (error != JDWP::ERR_NONE) {
2829         VLOG(jdwp) << tag << " object " << o << " is an invalid object";
2830         return JDWP::ERR_INVALID_OBJECT;
2831       }
2832       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2833                                  kReferenceVReg)) {
2834         return FailSetLocalValue(visitor, vreg, tag, reinterpret_cast<uintptr_t>(o));
2835       }
2836       break;
2837     }
2838     case JDWP::JT_DOUBLE: {
2839       CHECK_EQ(width, 8U);
2840       if (!visitor.SetVRegPair(m, vreg, value, kDoubleLoVReg, kDoubleHiVReg)) {
2841         return FailSetLocalValue(visitor, vreg, tag, value);
2842       }
2843       break;
2844     }
2845     case JDWP::JT_LONG: {
2846       CHECK_EQ(width, 8U);
2847       if (!visitor.SetVRegPair(m, vreg, value, kLongLoVReg, kLongHiVReg)) {
2848         return FailSetLocalValue(visitor, vreg, tag, value);
2849       }
2850       break;
2851     }
2852     default:
2853       LOG(FATAL) << "Unknown tag " << tag;
2854       UNREACHABLE();
2855   }
2856   return JDWP::ERR_NONE;
2857 }
2858 
SetEventLocation(JDWP::EventLocation * location,ArtMethod * m,uint32_t dex_pc)2859 static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
2860     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2861   DCHECK(location != nullptr);
2862   if (m == nullptr) {
2863     memset(location, 0, sizeof(*location));
2864   } else {
2865     location->method = m;
2866     location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
2867   }
2868 }
2869 
PostLocationEvent(ArtMethod * m,int dex_pc,mirror::Object * this_object,int event_flags,const JValue * return_value)2870 void Dbg::PostLocationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
2871                             int event_flags, const JValue* return_value) {
2872   if (!IsDebuggerActive()) {
2873     return;
2874   }
2875   DCHECK(m != nullptr);
2876   DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2877   JDWP::EventLocation location;
2878   SetEventLocation(&location, m, dex_pc);
2879 
2880   // We need to be sure no exception is pending when calling JdwpState::PostLocationEvent.
2881   // This is required to be able to call JNI functions to create JDWP ids. To achieve this,
2882   // we temporarily clear the current thread's exception (if any) and will restore it after
2883   // the call.
2884   // Note: the only way to get a pending exception here is to suspend on a move-exception
2885   // instruction.
2886   Thread* const self = Thread::Current();
2887   StackHandleScope<1> hs(self);
2888   Handle<mirror::Throwable> pending_exception(hs.NewHandle(self->GetException()));
2889   self->ClearException();
2890   if (kIsDebugBuild && pending_exception.Get() != nullptr) {
2891     const DexFile::CodeItem* code_item = location.method->GetCodeItem();
2892     const Instruction* instr = Instruction::At(&code_item->insns_[location.dex_pc]);
2893     CHECK_EQ(Instruction::MOVE_EXCEPTION, instr->Opcode());
2894   }
2895 
2896   gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
2897 
2898   if (pending_exception.Get() != nullptr) {
2899     self->SetException(pending_exception.Get());
2900   }
2901 }
2902 
PostFieldAccessEvent(ArtMethod * m,int dex_pc,mirror::Object * this_object,ArtField * f)2903 void Dbg::PostFieldAccessEvent(ArtMethod* m, int dex_pc,
2904                                mirror::Object* this_object, ArtField* f) {
2905   if (!IsDebuggerActive()) {
2906     return;
2907   }
2908   DCHECK(m != nullptr);
2909   DCHECK(f != nullptr);
2910   JDWP::EventLocation location;
2911   SetEventLocation(&location, m, dex_pc);
2912 
2913   gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
2914 }
2915 
PostFieldModificationEvent(ArtMethod * m,int dex_pc,mirror::Object * this_object,ArtField * f,const JValue * field_value)2916 void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc,
2917                                      mirror::Object* this_object, ArtField* f,
2918                                      const JValue* field_value) {
2919   if (!IsDebuggerActive()) {
2920     return;
2921   }
2922   DCHECK(m != nullptr);
2923   DCHECK(f != nullptr);
2924   DCHECK(field_value != nullptr);
2925   JDWP::EventLocation location;
2926   SetEventLocation(&location, m, dex_pc);
2927 
2928   gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
2929 }
2930 
2931 /**
2932  * Finds the location where this exception will be caught. We search until we reach the top
2933  * frame, in which case this exception is considered uncaught.
2934  */
2935 class CatchLocationFinder : public StackVisitor {
2936  public:
CatchLocationFinder(Thread * self,const Handle<mirror::Throwable> & exception,Context * context)2937   CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
2938       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2939     : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2940       self_(self),
2941       exception_(exception),
2942       handle_scope_(self),
2943       this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
2944       catch_method_(nullptr),
2945       throw_method_(nullptr),
2946       catch_dex_pc_(DexFile::kDexNoIndex),
2947       throw_dex_pc_(DexFile::kDexNoIndex) {
2948   }
2949 
VisitFrame()2950   bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2951     ArtMethod* method = GetMethod();
2952     DCHECK(method != nullptr);
2953     if (method->IsRuntimeMethod()) {
2954       // Ignore callee save method.
2955       DCHECK(method->IsCalleeSaveMethod());
2956       return true;
2957     }
2958 
2959     uint32_t dex_pc = GetDexPc();
2960     if (throw_method_ == nullptr) {
2961       // First Java method found. It is either the method that threw the exception,
2962       // or the Java native method that is reporting an exception thrown by
2963       // native code.
2964       this_at_throw_.Assign(GetThisObject());
2965       throw_method_ = method;
2966       throw_dex_pc_ = dex_pc;
2967     }
2968 
2969     if (dex_pc != DexFile::kDexNoIndex) {
2970       StackHandleScope<1> hs(self_);
2971       uint32_t found_dex_pc;
2972       Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
2973       bool unused_clear_exception;
2974       found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
2975       if (found_dex_pc != DexFile::kDexNoIndex) {
2976         catch_method_ = method;
2977         catch_dex_pc_ = found_dex_pc;
2978         return false;  // End stack walk.
2979       }
2980     }
2981     return true;  // Continue stack walk.
2982   }
2983 
GetCatchMethod()2984   ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2985     return catch_method_;
2986   }
2987 
GetThrowMethod()2988   ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2989     return throw_method_;
2990   }
2991 
GetThisAtThrow()2992   mirror::Object* GetThisAtThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2993     return this_at_throw_.Get();
2994   }
2995 
GetCatchDexPc() const2996   uint32_t GetCatchDexPc() const {
2997     return catch_dex_pc_;
2998   }
2999 
GetThrowDexPc() const3000   uint32_t GetThrowDexPc() const {
3001     return throw_dex_pc_;
3002   }
3003 
3004  private:
3005   Thread* const self_;
3006   const Handle<mirror::Throwable>& exception_;
3007   StackHandleScope<1> handle_scope_;
3008   MutableHandle<mirror::Object> this_at_throw_;
3009   ArtMethod* catch_method_;
3010   ArtMethod* throw_method_;
3011   uint32_t catch_dex_pc_;
3012   uint32_t throw_dex_pc_;
3013 
3014   DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
3015 };
3016 
PostException(mirror::Throwable * exception_object)3017 void Dbg::PostException(mirror::Throwable* exception_object) {
3018   if (!IsDebuggerActive()) {
3019     return;
3020   }
3021   Thread* const self = Thread::Current();
3022   StackHandleScope<1> handle_scope(self);
3023   Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
3024   std::unique_ptr<Context> context(Context::Create());
3025   CatchLocationFinder clf(self, h_exception, context.get());
3026   clf.WalkStack(/* include_transitions */ false);
3027   JDWP::EventLocation exception_throw_location;
3028   SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
3029   JDWP::EventLocation exception_catch_location;
3030   SetEventLocation(&exception_catch_location, clf.GetCatchMethod(), clf.GetCatchDexPc());
3031 
3032   gJdwpState->PostException(&exception_throw_location, h_exception.Get(), &exception_catch_location,
3033                             clf.GetThisAtThrow());
3034 }
3035 
PostClassPrepare(mirror::Class * c)3036 void Dbg::PostClassPrepare(mirror::Class* c) {
3037   if (!IsDebuggerActive()) {
3038     return;
3039   }
3040   gJdwpState->PostClassPrepare(c);
3041 }
3042 
UpdateDebugger(Thread * thread,mirror::Object * this_object,ArtMethod * m,uint32_t dex_pc,int event_flags,const JValue * return_value)3043 void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
3044                          ArtMethod* m, uint32_t dex_pc,
3045                          int event_flags, const JValue* return_value) {
3046   if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
3047     return;
3048   }
3049 
3050   if (IsBreakpoint(m, dex_pc)) {
3051     event_flags |= kBreakpoint;
3052   }
3053 
3054   // If the debugger is single-stepping one of our threads, check to
3055   // see if we're that thread and we've reached a step point.
3056   const SingleStepControl* single_step_control = thread->GetSingleStepControl();
3057   if (single_step_control != nullptr) {
3058     CHECK(!m->IsNative());
3059     if (single_step_control->GetStepDepth() == JDWP::SD_INTO) {
3060       // Step into method calls.  We break when the line number
3061       // or method pointer changes.  If we're in SS_MIN mode, we
3062       // always stop.
3063       if (single_step_control->GetMethod() != m) {
3064         event_flags |= kSingleStep;
3065         VLOG(jdwp) << "SS new method";
3066       } else if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
3067         event_flags |= kSingleStep;
3068         VLOG(jdwp) << "SS new instruction";
3069       } else if (single_step_control->ContainsDexPc(dex_pc)) {
3070         event_flags |= kSingleStep;
3071         VLOG(jdwp) << "SS new line";
3072       }
3073     } else if (single_step_control->GetStepDepth() == JDWP::SD_OVER) {
3074       // Step over method calls.  We break when the line number is
3075       // different and the frame depth is <= the original frame
3076       // depth.  (We can't just compare on the method, because we
3077       // might get unrolled past it by an exception, and it's tricky
3078       // to identify recursion.)
3079 
3080       int stack_depth = GetStackDepth(thread);
3081 
3082       if (stack_depth < single_step_control->GetStackDepth()) {
3083         // Popped up one or more frames, always trigger.
3084         event_flags |= kSingleStep;
3085         VLOG(jdwp) << "SS method pop";
3086       } else if (stack_depth == single_step_control->GetStackDepth()) {
3087         // Same depth, see if we moved.
3088         if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
3089           event_flags |= kSingleStep;
3090           VLOG(jdwp) << "SS new instruction";
3091         } else if (single_step_control->ContainsDexPc(dex_pc)) {
3092           event_flags |= kSingleStep;
3093           VLOG(jdwp) << "SS new line";
3094         }
3095       }
3096     } else {
3097       CHECK_EQ(single_step_control->GetStepDepth(), JDWP::SD_OUT);
3098       // Return from the current method.  We break when the frame
3099       // depth pops up.
3100 
3101       // This differs from the "method exit" break in that it stops
3102       // with the PC at the next instruction in the returned-to
3103       // function, rather than the end of the returning function.
3104 
3105       int stack_depth = GetStackDepth(thread);
3106       if (stack_depth < single_step_control->GetStackDepth()) {
3107         event_flags |= kSingleStep;
3108         VLOG(jdwp) << "SS method pop";
3109       }
3110     }
3111   }
3112 
3113   // If there's something interesting going on, see if it matches one
3114   // of the debugger filters.
3115   if (event_flags != 0) {
3116     Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
3117   }
3118 }
3119 
GetReferenceCounterForEvent(uint32_t instrumentation_event)3120 size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
3121   switch (instrumentation_event) {
3122     case instrumentation::Instrumentation::kMethodEntered:
3123       return &method_enter_event_ref_count_;
3124     case instrumentation::Instrumentation::kMethodExited:
3125       return &method_exit_event_ref_count_;
3126     case instrumentation::Instrumentation::kDexPcMoved:
3127       return &dex_pc_change_event_ref_count_;
3128     case instrumentation::Instrumentation::kFieldRead:
3129       return &field_read_event_ref_count_;
3130     case instrumentation::Instrumentation::kFieldWritten:
3131       return &field_write_event_ref_count_;
3132     case instrumentation::Instrumentation::kExceptionCaught:
3133       return &exception_catch_event_ref_count_;
3134     default:
3135       return nullptr;
3136   }
3137 }
3138 
3139 // Process request while all mutator threads are suspended.
ProcessDeoptimizationRequest(const DeoptimizationRequest & request)3140 void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
3141   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3142   switch (request.GetKind()) {
3143     case DeoptimizationRequest::kNothing:
3144       LOG(WARNING) << "Ignoring empty deoptimization request.";
3145       break;
3146     case DeoptimizationRequest::kRegisterForEvent:
3147       VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
3148                                  request.InstrumentationEvent());
3149       instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
3150       instrumentation_events_ |= request.InstrumentationEvent();
3151       break;
3152     case DeoptimizationRequest::kUnregisterForEvent:
3153       VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
3154                                  request.InstrumentationEvent());
3155       instrumentation->RemoveListener(&gDebugInstrumentationListener,
3156                                       request.InstrumentationEvent());
3157       instrumentation_events_ &= ~request.InstrumentationEvent();
3158       break;
3159     case DeoptimizationRequest::kFullDeoptimization:
3160       VLOG(jdwp) << "Deoptimize the world ...";
3161       instrumentation->DeoptimizeEverything(kDbgInstrumentationKey);
3162       VLOG(jdwp) << "Deoptimize the world DONE";
3163       break;
3164     case DeoptimizationRequest::kFullUndeoptimization:
3165       VLOG(jdwp) << "Undeoptimize the world ...";
3166       instrumentation->UndeoptimizeEverything(kDbgInstrumentationKey);
3167       VLOG(jdwp) << "Undeoptimize the world DONE";
3168       break;
3169     case DeoptimizationRequest::kSelectiveDeoptimization:
3170       VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
3171       instrumentation->Deoptimize(request.Method());
3172       VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
3173       break;
3174     case DeoptimizationRequest::kSelectiveUndeoptimization:
3175       VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
3176       instrumentation->Undeoptimize(request.Method());
3177       VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
3178       break;
3179     default:
3180       LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
3181       break;
3182   }
3183 }
3184 
RequestDeoptimization(const DeoptimizationRequest & req)3185 void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
3186   if (req.GetKind() == DeoptimizationRequest::kNothing) {
3187     // Nothing to do.
3188     return;
3189   }
3190   MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3191   RequestDeoptimizationLocked(req);
3192 }
3193 
RequestDeoptimizationLocked(const DeoptimizationRequest & req)3194 void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
3195   switch (req.GetKind()) {
3196     case DeoptimizationRequest::kRegisterForEvent: {
3197       DCHECK_NE(req.InstrumentationEvent(), 0u);
3198       size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3199       CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3200                                                 req.InstrumentationEvent());
3201       if (*counter == 0) {
3202         VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
3203                                    deoptimization_requests_.size(), req.InstrumentationEvent());
3204         deoptimization_requests_.push_back(req);
3205       }
3206       *counter = *counter + 1;
3207       break;
3208     }
3209     case DeoptimizationRequest::kUnregisterForEvent: {
3210       DCHECK_NE(req.InstrumentationEvent(), 0u);
3211       size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3212       CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3213                                                 req.InstrumentationEvent());
3214       *counter = *counter - 1;
3215       if (*counter == 0) {
3216         VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
3217                                    deoptimization_requests_.size(), req.InstrumentationEvent());
3218         deoptimization_requests_.push_back(req);
3219       }
3220       break;
3221     }
3222     case DeoptimizationRequest::kFullDeoptimization: {
3223       DCHECK(req.Method() == nullptr);
3224       if (full_deoptimization_event_count_ == 0) {
3225         VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3226                    << " for full deoptimization";
3227         deoptimization_requests_.push_back(req);
3228       }
3229       ++full_deoptimization_event_count_;
3230       break;
3231     }
3232     case DeoptimizationRequest::kFullUndeoptimization: {
3233       DCHECK(req.Method() == nullptr);
3234       DCHECK_GT(full_deoptimization_event_count_, 0U);
3235       --full_deoptimization_event_count_;
3236       if (full_deoptimization_event_count_ == 0) {
3237         VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3238                    << " for full undeoptimization";
3239         deoptimization_requests_.push_back(req);
3240       }
3241       break;
3242     }
3243     case DeoptimizationRequest::kSelectiveDeoptimization: {
3244       DCHECK(req.Method() != nullptr);
3245       VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3246                  << " for deoptimization of " << PrettyMethod(req.Method());
3247       deoptimization_requests_.push_back(req);
3248       break;
3249     }
3250     case DeoptimizationRequest::kSelectiveUndeoptimization: {
3251       DCHECK(req.Method() != nullptr);
3252       VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3253                  << " for undeoptimization of " << PrettyMethod(req.Method());
3254       deoptimization_requests_.push_back(req);
3255       break;
3256     }
3257     default: {
3258       LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
3259       break;
3260     }
3261   }
3262 }
3263 
ManageDeoptimization()3264 void Dbg::ManageDeoptimization() {
3265   Thread* const self = Thread::Current();
3266   {
3267     // Avoid suspend/resume if there is no pending request.
3268     MutexLock mu(self, *Locks::deoptimization_lock_);
3269     if (deoptimization_requests_.empty()) {
3270       return;
3271     }
3272   }
3273   CHECK_EQ(self->GetState(), kRunnable);
3274   self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
3275   // We need to suspend mutator threads first.
3276   Runtime* const runtime = Runtime::Current();
3277   runtime->GetThreadList()->SuspendAll(__FUNCTION__);
3278   const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3279   {
3280     MutexLock mu(self, *Locks::deoptimization_lock_);
3281     size_t req_index = 0;
3282     for (DeoptimizationRequest& request : deoptimization_requests_) {
3283       VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3284       ProcessDeoptimizationRequest(request);
3285     }
3286     deoptimization_requests_.clear();
3287   }
3288   CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3289   runtime->GetThreadList()->ResumeAll();
3290   self->TransitionFromSuspendedToRunnable();
3291 }
3292 
IsMethodPossiblyInlined(Thread * self,ArtMethod * m)3293 static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m)
3294     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3295   const DexFile::CodeItem* code_item = m->GetCodeItem();
3296   if (code_item == nullptr) {
3297     // TODO We should not be asked to watch location in a native or abstract method so the code item
3298     // should never be null. We could just check we never encounter this case.
3299     return false;
3300   }
3301   // Note: method verifier may cause thread suspension.
3302   self->AssertThreadSuspensionIsAllowable();
3303   StackHandleScope<2> hs(self);
3304   mirror::Class* declaring_class = m->GetDeclaringClass();
3305   Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
3306   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
3307   verifier::MethodVerifier verifier(self, dex_cache->GetDexFile(), dex_cache, class_loader,
3308                                     &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
3309                                     m->GetAccessFlags(), false, true, false, true);
3310   // Note: we don't need to verify the method.
3311   return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
3312 }
3313 
FindFirstBreakpointForMethod(ArtMethod * m)3314 static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
3315     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3316   for (Breakpoint& breakpoint : gBreakpoints) {
3317     if (breakpoint.Method() == m) {
3318       return &breakpoint;
3319     }
3320   }
3321   return nullptr;
3322 }
3323 
MethodHasAnyBreakpoints(ArtMethod * method)3324 bool Dbg::MethodHasAnyBreakpoints(ArtMethod* method) {
3325   ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3326   return FindFirstBreakpointForMethod(method) != nullptr;
3327 }
3328 
3329 // Sanity checks all existing breakpoints on the same method.
SanityCheckExistingBreakpoints(ArtMethod * m,DeoptimizationRequest::Kind deoptimization_kind)3330 static void SanityCheckExistingBreakpoints(ArtMethod* m,
3331                                            DeoptimizationRequest::Kind deoptimization_kind)
3332     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3333   for (const Breakpoint& breakpoint : gBreakpoints) {
3334     if (breakpoint.Method() == m) {
3335       CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
3336     }
3337   }
3338   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3339   if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3340     // We should have deoptimized everything but not "selectively" deoptimized this method.
3341     CHECK(instrumentation->AreAllMethodsDeoptimized());
3342     CHECK(!instrumentation->IsDeoptimized(m));
3343   } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3344     // We should have "selectively" deoptimized this method.
3345     // Note: while we have not deoptimized everything for this method, we may have done it for
3346     // another event.
3347     CHECK(instrumentation->IsDeoptimized(m));
3348   } else {
3349     // This method does not require deoptimization.
3350     CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3351     CHECK(!instrumentation->IsDeoptimized(m));
3352   }
3353 }
3354 
3355 // Returns the deoptimization kind required to set a breakpoint in a method.
3356 // If a breakpoint has already been set, we also return the first breakpoint
3357 // through the given 'existing_brkpt' pointer.
GetRequiredDeoptimizationKind(Thread * self,ArtMethod * m,const Breakpoint ** existing_brkpt)3358 static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
3359                                                                  ArtMethod* m,
3360                                                                  const Breakpoint** existing_brkpt)
3361     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3362   if (!Dbg::RequiresDeoptimization()) {
3363     // We already run in interpreter-only mode so we don't need to deoptimize anything.
3364     VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
3365                << PrettyMethod(m);
3366     return DeoptimizationRequest::kNothing;
3367   }
3368   const Breakpoint* first_breakpoint;
3369   {
3370     ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3371     first_breakpoint = FindFirstBreakpointForMethod(m);
3372     *existing_brkpt = first_breakpoint;
3373   }
3374 
3375   if (first_breakpoint == nullptr) {
3376     // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
3377     // inlined, we deoptimize everything; otherwise we deoptimize only this method.
3378     // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
3379     // Therefore we must not hold any lock when we call it.
3380     bool need_full_deoptimization = IsMethodPossiblyInlined(self, m);
3381     if (need_full_deoptimization) {
3382       VLOG(jdwp) << "Need full deoptimization because of possible inlining of method "
3383                  << PrettyMethod(m);
3384       return DeoptimizationRequest::kFullDeoptimization;
3385     } else {
3386       // We don't need to deoptimize if the method has not been compiled.
3387       ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
3388       const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr;
3389       if (is_compiled) {
3390         // If the method may be called through its direct code pointer (without loading
3391         // its updated entrypoint), we need full deoptimization to not miss the breakpoint.
3392         if (class_linker->MayBeCalledWithDirectCodePointer(m)) {
3393           VLOG(jdwp) << "Need full deoptimization because of possible direct code call "
3394                      << "into image for compiled method " << PrettyMethod(m);
3395           return DeoptimizationRequest::kFullDeoptimization;
3396         } else {
3397           VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
3398           return DeoptimizationRequest::kSelectiveDeoptimization;
3399         }
3400       } else {
3401         // Method is not compiled: we don't need to deoptimize.
3402         VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
3403         return DeoptimizationRequest::kNothing;
3404       }
3405     }
3406   } else {
3407     // There is at least one breakpoint for this method: we don't need to deoptimize.
3408     // Let's check that all breakpoints are configured the same way for deoptimization.
3409     VLOG(jdwp) << "Breakpoint already set: no deoptimization is required";
3410     DeoptimizationRequest::Kind deoptimization_kind = first_breakpoint->GetDeoptimizationKind();
3411     if (kIsDebugBuild) {
3412       ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3413       SanityCheckExistingBreakpoints(m, deoptimization_kind);
3414     }
3415     return DeoptimizationRequest::kNothing;
3416   }
3417 }
3418 
3419 // Installs a breakpoint at the specified location. Also indicates through the deoptimization
3420 // request if we need to deoptimize.
WatchLocation(const JDWP::JdwpLocation * location,DeoptimizationRequest * req)3421 void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3422   Thread* const self = Thread::Current();
3423   ArtMethod* m = FromMethodId(location->method_id);
3424   DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3425 
3426   const Breakpoint* existing_breakpoint = nullptr;
3427   const DeoptimizationRequest::Kind deoptimization_kind =
3428       GetRequiredDeoptimizationKind(self, m, &existing_breakpoint);
3429   req->SetKind(deoptimization_kind);
3430   if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3431     req->SetMethod(m);
3432   } else {
3433     CHECK(deoptimization_kind == DeoptimizationRequest::kNothing ||
3434           deoptimization_kind == DeoptimizationRequest::kFullDeoptimization);
3435     req->SetMethod(nullptr);
3436   }
3437 
3438   {
3439     WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3440     // If there is at least one existing breakpoint on the same method, the new breakpoint
3441     // must have the same deoptimization kind than the existing breakpoint(s).
3442     DeoptimizationRequest::Kind breakpoint_deoptimization_kind;
3443     if (existing_breakpoint != nullptr) {
3444       breakpoint_deoptimization_kind = existing_breakpoint->GetDeoptimizationKind();
3445     } else {
3446       breakpoint_deoptimization_kind = deoptimization_kind;
3447     }
3448     gBreakpoints.push_back(Breakpoint(m, location->dex_pc, breakpoint_deoptimization_kind));
3449     VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3450                << gBreakpoints[gBreakpoints.size() - 1];
3451   }
3452 }
3453 
3454 // Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3455 // request if we need to undeoptimize.
UnwatchLocation(const JDWP::JdwpLocation * location,DeoptimizationRequest * req)3456 void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3457   WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3458   ArtMethod* m = FromMethodId(location->method_id);
3459   DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3460   DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
3461   for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3462     if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
3463       VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3464       deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind();
3465       DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization,
3466                 Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3467       gBreakpoints.erase(gBreakpoints.begin() + i);
3468       break;
3469     }
3470   }
3471   const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3472   if (existing_breakpoint == nullptr) {
3473     // There is no more breakpoint on this method: we need to undeoptimize.
3474     if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3475       // This method required full deoptimization: we need to undeoptimize everything.
3476       req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3477       req->SetMethod(nullptr);
3478     } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3479       // This method required selective deoptimization: we need to undeoptimize only that method.
3480       req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3481       req->SetMethod(m);
3482     } else {
3483       // This method had no need for deoptimization: do nothing.
3484       CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3485       req->SetKind(DeoptimizationRequest::kNothing);
3486       req->SetMethod(nullptr);
3487     }
3488   } else {
3489     // There is at least one breakpoint for this method: we don't need to undeoptimize.
3490     req->SetKind(DeoptimizationRequest::kNothing);
3491     req->SetMethod(nullptr);
3492     if (kIsDebugBuild) {
3493       SanityCheckExistingBreakpoints(m, deoptimization_kind);
3494     }
3495   }
3496 }
3497 
IsForcedInterpreterNeededForCallingImpl(Thread * thread,ArtMethod * m)3498 bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) {
3499   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3500   if (ssc == nullptr) {
3501     // If we are not single-stepping, then we don't have to force interpreter.
3502     return false;
3503   }
3504   if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
3505     // If we are in interpreter only mode, then we don't have to force interpreter.
3506     return false;
3507   }
3508 
3509   if (!m->IsNative() && !m->IsProxyMethod()) {
3510     // If we want to step into a method, then we have to force interpreter on that call.
3511     if (ssc->GetStepDepth() == JDWP::SD_INTO) {
3512       return true;
3513     }
3514   }
3515   return false;
3516 }
3517 
IsForcedInterpreterNeededForResolutionImpl(Thread * thread,ArtMethod * m)3518 bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
3519   instrumentation::Instrumentation* const instrumentation =
3520       Runtime::Current()->GetInstrumentation();
3521   // If we are in interpreter only mode, then we don't have to force interpreter.
3522   if (instrumentation->InterpretOnly()) {
3523     return false;
3524   }
3525   // We can only interpret pure Java method.
3526   if (m->IsNative() || m->IsProxyMethod()) {
3527     return false;
3528   }
3529   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3530   if (ssc != nullptr) {
3531     // If we want to step into a method, then we have to force interpreter on that call.
3532     if (ssc->GetStepDepth() == JDWP::SD_INTO) {
3533       return true;
3534     }
3535     // If we are stepping out from a static initializer, by issuing a step
3536     // in or step over, that was implicitly invoked by calling a static method,
3537     // then we need to step into that method. Having a lower stack depth than
3538     // the one the single step control has indicates that the step originates
3539     // from the static initializer.
3540     if (ssc->GetStepDepth() != JDWP::SD_OUT &&
3541         ssc->GetStackDepth() > GetStackDepth(thread)) {
3542       return true;
3543     }
3544   }
3545   // There are cases where we have to force interpreter on deoptimized methods,
3546   // because in some cases the call will not be performed by invoking an entry
3547   // point that has been replaced by the deoptimization, but instead by directly
3548   // invoking the compiled code of the method, for example.
3549   return instrumentation->IsDeoptimized(m);
3550 }
3551 
IsForcedInstrumentationNeededForResolutionImpl(Thread * thread,ArtMethod * m)3552 bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
3553   // The upcall can be null and in that case we don't need to do anything.
3554   if (m == nullptr) {
3555     return false;
3556   }
3557   instrumentation::Instrumentation* const instrumentation =
3558       Runtime::Current()->GetInstrumentation();
3559   // If we are in interpreter only mode, then we don't have to force interpreter.
3560   if (instrumentation->InterpretOnly()) {
3561     return false;
3562   }
3563   // We can only interpret pure Java method.
3564   if (m->IsNative() || m->IsProxyMethod()) {
3565     return false;
3566   }
3567   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3568   if (ssc != nullptr) {
3569     // If we are stepping out from a static initializer, by issuing a step
3570     // out, that was implicitly invoked by calling a static method, then we
3571     // need to step into the caller of that method. Having a lower stack
3572     // depth than the one the single step control has indicates that the
3573     // step originates from the static initializer.
3574     if (ssc->GetStepDepth() == JDWP::SD_OUT &&
3575         ssc->GetStackDepth() > GetStackDepth(thread)) {
3576       return true;
3577     }
3578   }
3579   // If we are returning from a static intializer, that was implicitly
3580   // invoked by calling a static method and the caller is deoptimized,
3581   // then we have to deoptimize the stack without forcing interpreter
3582   // on the static method that was called originally. This problem can
3583   // be solved easily by forcing instrumentation on the called method,
3584   // because the instrumentation exit hook will recognise the need of
3585   // stack deoptimization by calling IsForcedInterpreterNeededForUpcall.
3586   return instrumentation->IsDeoptimized(m);
3587 }
3588 
IsForcedInterpreterNeededForUpcallImpl(Thread * thread,ArtMethod * m)3589 bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) {
3590   // The upcall can be null and in that case we don't need to do anything.
3591   if (m == nullptr) {
3592     return false;
3593   }
3594   instrumentation::Instrumentation* const instrumentation =
3595       Runtime::Current()->GetInstrumentation();
3596   // If we are in interpreter only mode, then we don't have to force interpreter.
3597   if (instrumentation->InterpretOnly()) {
3598     return false;
3599   }
3600   // We can only interpret pure Java method.
3601   if (m->IsNative() || m->IsProxyMethod()) {
3602     return false;
3603   }
3604   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3605   if (ssc != nullptr) {
3606     // The debugger is not interested in what is happening under the level
3607     // of the step, thus we only force interpreter when we are not below of
3608     // the step.
3609     if (ssc->GetStackDepth() >= GetStackDepth(thread)) {
3610       return true;
3611     }
3612   }
3613   // We have to require stack deoptimization if the upcall is deoptimized.
3614   return instrumentation->IsDeoptimized(m);
3615 }
3616 
3617 // Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3618 // cause suspension if the thread is the current thread.
3619 class ScopedThreadSuspension {
3620  public:
ScopedThreadSuspension(Thread * self,JDWP::ObjectId thread_id)3621   ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3622       LOCKS_EXCLUDED(Locks::thread_list_lock_)
3623       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
3624       thread_(nullptr),
3625       error_(JDWP::ERR_NONE),
3626       self_suspend_(false),
3627       other_suspend_(false) {
3628     ScopedObjectAccessUnchecked soa(self);
3629     thread_ = DecodeThread(soa, thread_id, &error_);
3630     if (error_ == JDWP::ERR_NONE) {
3631       if (thread_ == soa.Self()) {
3632         self_suspend_ = true;
3633       } else {
3634         soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3635         jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
3636         bool timed_out;
3637         ThreadList* thread_list = Runtime::Current()->GetThreadList();
3638         Thread* suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true,
3639                                                                     &timed_out);
3640         CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
3641         if (suspended_thread == nullptr) {
3642           // Thread terminated from under us while suspending.
3643           error_ = JDWP::ERR_INVALID_THREAD;
3644         } else {
3645           CHECK_EQ(suspended_thread, thread_);
3646           other_suspend_ = true;
3647         }
3648       }
3649     }
3650   }
3651 
GetThread() const3652   Thread* GetThread() const {
3653     return thread_;
3654   }
3655 
GetError() const3656   JDWP::JdwpError GetError() const {
3657     return error_;
3658   }
3659 
~ScopedThreadSuspension()3660   ~ScopedThreadSuspension() {
3661     if (other_suspend_) {
3662       Runtime::Current()->GetThreadList()->Resume(thread_, true);
3663     }
3664   }
3665 
3666  private:
3667   Thread* thread_;
3668   JDWP::JdwpError error_;
3669   bool self_suspend_;
3670   bool other_suspend_;
3671 };
3672 
ConfigureStep(JDWP::ObjectId thread_id,JDWP::JdwpStepSize step_size,JDWP::JdwpStepDepth step_depth)3673 JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3674                                    JDWP::JdwpStepDepth step_depth) {
3675   Thread* self = Thread::Current();
3676   ScopedThreadSuspension sts(self, thread_id);
3677   if (sts.GetError() != JDWP::ERR_NONE) {
3678     return sts.GetError();
3679   }
3680 
3681   // Work out what ArtMethod* we're in, the current line number, and how deep the stack currently
3682   // is for step-out.
3683   struct SingleStepStackVisitor : public StackVisitor {
3684     explicit SingleStepStackVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
3685         : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
3686           stack_depth(0),
3687           method(nullptr),
3688           line_number(-1) {}
3689 
3690     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3691     // annotalysis.
3692     bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3693       ArtMethod* m = GetMethod();
3694       if (!m->IsRuntimeMethod()) {
3695         ++stack_depth;
3696         if (method == nullptr) {
3697           mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3698           method = m;
3699           if (dex_cache != nullptr) {
3700             const DexFile& dex_file = *dex_cache->GetDexFile();
3701             line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
3702           }
3703         }
3704       }
3705       return true;
3706     }
3707 
3708     int stack_depth;
3709     ArtMethod* method;
3710     int32_t line_number;
3711   };
3712 
3713   Thread* const thread = sts.GetThread();
3714   SingleStepStackVisitor visitor(thread);
3715   visitor.WalkStack();
3716 
3717   // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3718   struct DebugCallbackContext {
3719     explicit DebugCallbackContext(SingleStepControl* single_step_control_cb,
3720                                   int32_t line_number_cb, const DexFile::CodeItem* code_item)
3721       : single_step_control_(single_step_control_cb), line_number_(line_number_cb),
3722         code_item_(code_item), last_pc_valid(false), last_pc(0) {
3723     }
3724 
3725     static bool Callback(void* raw_context, uint32_t address, uint32_t line_number_cb) {
3726       DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3727       if (static_cast<int32_t>(line_number_cb) == context->line_number_) {
3728         if (!context->last_pc_valid) {
3729           // Everything from this address until the next line change is ours.
3730           context->last_pc = address;
3731           context->last_pc_valid = true;
3732         }
3733         // Otherwise, if we're already in a valid range for this line,
3734         // just keep going (shouldn't really happen)...
3735       } else if (context->last_pc_valid) {  // and the line number is new
3736         // Add everything from the last entry up until here to the set
3737         for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
3738           context->single_step_control_->AddDexPc(dex_pc);
3739         }
3740         context->last_pc_valid = false;
3741       }
3742       return false;  // There may be multiple entries for any given line.
3743     }
3744 
3745     ~DebugCallbackContext() {
3746       // If the line number was the last in the position table...
3747       if (last_pc_valid) {
3748         size_t end = code_item_->insns_size_in_code_units_;
3749         for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3750           single_step_control_->AddDexPc(dex_pc);
3751         }
3752       }
3753     }
3754 
3755     SingleStepControl* const single_step_control_;
3756     const int32_t line_number_;
3757     const DexFile::CodeItem* const code_item_;
3758     bool last_pc_valid;
3759     uint32_t last_pc;
3760   };
3761 
3762   // Allocate single step.
3763   SingleStepControl* single_step_control =
3764       new (std::nothrow) SingleStepControl(step_size, step_depth,
3765                                            visitor.stack_depth, visitor.method);
3766   if (single_step_control == nullptr) {
3767     LOG(ERROR) << "Failed to allocate SingleStepControl";
3768     return JDWP::ERR_OUT_OF_MEMORY;
3769   }
3770 
3771   ArtMethod* m = single_step_control->GetMethod();
3772   const int32_t line_number = visitor.line_number;
3773   // Note: if the thread is not running Java code (pure native thread), there is no "current"
3774   // method on the stack (and no line number either).
3775   if (m != nullptr && !m->IsNative()) {
3776     const DexFile::CodeItem* const code_item = m->GetCodeItem();
3777     DebugCallbackContext context(single_step_control, line_number, code_item);
3778     m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
3779                                      DebugCallbackContext::Callback, nullptr, &context);
3780   }
3781 
3782   // Activate single-step in the thread.
3783   thread->ActivateSingleStepControl(single_step_control);
3784 
3785   if (VLOG_IS_ON(jdwp)) {
3786     VLOG(jdwp) << "Single-step thread: " << *thread;
3787     VLOG(jdwp) << "Single-step step size: " << single_step_control->GetStepSize();
3788     VLOG(jdwp) << "Single-step step depth: " << single_step_control->GetStepDepth();
3789     VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->GetMethod());
3790     VLOG(jdwp) << "Single-step current line: " << line_number;
3791     VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->GetStackDepth();
3792     VLOG(jdwp) << "Single-step dex_pc values:";
3793     for (uint32_t dex_pc : single_step_control->GetDexPcs()) {
3794       VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3795     }
3796   }
3797 
3798   return JDWP::ERR_NONE;
3799 }
3800 
UnconfigureStep(JDWP::ObjectId thread_id)3801 void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3802   ScopedObjectAccessUnchecked soa(Thread::Current());
3803   JDWP::JdwpError error;
3804   Thread* thread = DecodeThread(soa, thread_id, &error);
3805   if (error == JDWP::ERR_NONE) {
3806     thread->DeactivateSingleStepControl();
3807   }
3808 }
3809 
JdwpTagToShortyChar(JDWP::JdwpTag tag)3810 static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3811   switch (tag) {
3812     default:
3813       LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3814       UNREACHABLE();
3815 
3816     // Primitives.
3817     case JDWP::JT_BYTE:    return 'B';
3818     case JDWP::JT_CHAR:    return 'C';
3819     case JDWP::JT_FLOAT:   return 'F';
3820     case JDWP::JT_DOUBLE:  return 'D';
3821     case JDWP::JT_INT:     return 'I';
3822     case JDWP::JT_LONG:    return 'J';
3823     case JDWP::JT_SHORT:   return 'S';
3824     case JDWP::JT_VOID:    return 'V';
3825     case JDWP::JT_BOOLEAN: return 'Z';
3826 
3827     // Reference types.
3828     case JDWP::JT_ARRAY:
3829     case JDWP::JT_OBJECT:
3830     case JDWP::JT_STRING:
3831     case JDWP::JT_THREAD:
3832     case JDWP::JT_THREAD_GROUP:
3833     case JDWP::JT_CLASS_LOADER:
3834     case JDWP::JT_CLASS_OBJECT:
3835       return 'L';
3836   }
3837 }
3838 
PrepareInvokeMethod(uint32_t request_id,JDWP::ObjectId thread_id,JDWP::ObjectId object_id,JDWP::RefTypeId class_id,JDWP::MethodId method_id,uint32_t arg_count,uint64_t arg_values[],JDWP::JdwpTag * arg_types,uint32_t options)3839 JDWP::JdwpError Dbg::PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
3840                                          JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
3841                                          JDWP::MethodId method_id, uint32_t arg_count,
3842                                          uint64_t arg_values[], JDWP::JdwpTag* arg_types,
3843                                          uint32_t options) {
3844   Thread* const self = Thread::Current();
3845   CHECK_EQ(self, GetDebugThread()) << "This must be called by the JDWP thread";
3846 
3847   ThreadList* thread_list = Runtime::Current()->GetThreadList();
3848   Thread* targetThread = nullptr;
3849   {
3850     ScopedObjectAccessUnchecked soa(self);
3851     JDWP::JdwpError error;
3852     targetThread = DecodeThread(soa, thread_id, &error);
3853     if (error != JDWP::ERR_NONE) {
3854       LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3855       return error;
3856     }
3857     if (targetThread->GetInvokeReq() != nullptr) {
3858       // Thread is already invoking a method on behalf of the debugger.
3859       LOG(ERROR) << "InvokeMethod request for thread already invoking a method: " << *targetThread;
3860       return JDWP::ERR_ALREADY_INVOKING;
3861     }
3862     if (!targetThread->IsReadyForDebugInvoke()) {
3863       // Thread is not suspended by an event so it cannot invoke a method.
3864       LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3865       return JDWP::ERR_INVALID_THREAD;
3866     }
3867 
3868     /*
3869      * We currently have a bug where we don't successfully resume the
3870      * target thread if the suspend count is too deep.  We're expected to
3871      * require one "resume" for each "suspend", but when asked to execute
3872      * a method we have to resume fully and then re-suspend it back to the
3873      * same level.  (The easiest way to cause this is to type "suspend"
3874      * multiple times in jdb.)
3875      *
3876      * It's unclear what this means when the event specifies "resume all"
3877      * and some threads are suspended more deeply than others.  This is
3878      * a rare problem, so for now we just prevent it from hanging forever
3879      * by rejecting the method invocation request.  Without this, we will
3880      * be stuck waiting on a suspended thread.
3881      */
3882     int suspend_count;
3883     {
3884       MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3885       suspend_count = targetThread->GetSuspendCount();
3886     }
3887     if (suspend_count > 1) {
3888       LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
3889       return JDWP::ERR_THREAD_SUSPENDED;  // Probably not expected here.
3890     }
3891 
3892     mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id, &error);
3893     if (error != JDWP::ERR_NONE) {
3894       return JDWP::ERR_INVALID_OBJECT;
3895     }
3896 
3897     gRegistry->Get<mirror::Object*>(thread_id, &error);
3898     if (error != JDWP::ERR_NONE) {
3899       return JDWP::ERR_INVALID_OBJECT;
3900     }
3901 
3902     mirror::Class* c = DecodeClass(class_id, &error);
3903     if (c == nullptr) {
3904       return error;
3905     }
3906 
3907     ArtMethod* m = FromMethodId(method_id);
3908     if (m->IsStatic() != (receiver == nullptr)) {
3909       return JDWP::ERR_INVALID_METHODID;
3910     }
3911     if (m->IsStatic()) {
3912       if (m->GetDeclaringClass() != c) {
3913         return JDWP::ERR_INVALID_METHODID;
3914       }
3915     } else {
3916       if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3917         return JDWP::ERR_INVALID_METHODID;
3918       }
3919     }
3920 
3921     // Check the argument list matches the method.
3922     uint32_t shorty_len = 0;
3923     const char* shorty = m->GetShorty(&shorty_len);
3924     if (shorty_len - 1 != arg_count) {
3925       return JDWP::ERR_ILLEGAL_ARGUMENT;
3926     }
3927 
3928     {
3929       StackHandleScope<2> hs(soa.Self());
3930       HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3931       HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3932       const DexFile::TypeList* types = m->GetParameterTypeList();
3933       for (size_t i = 0; i < arg_count; ++i) {
3934         if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3935           return JDWP::ERR_ILLEGAL_ARGUMENT;
3936         }
3937 
3938         if (shorty[i + 1] == 'L') {
3939           // Did we really get an argument of an appropriate reference type?
3940           mirror::Class* parameter_type =
3941               m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_, true);
3942           mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error);
3943           if (error != JDWP::ERR_NONE) {
3944             return JDWP::ERR_INVALID_OBJECT;
3945           }
3946           if (argument != nullptr && !argument->InstanceOf(parameter_type)) {
3947             return JDWP::ERR_ILLEGAL_ARGUMENT;
3948           }
3949 
3950           // Turn the on-the-wire ObjectId into a jobject.
3951           jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3952           v.l = gRegistry->GetJObject(arg_values[i]);
3953         }
3954       }
3955     }
3956 
3957     // Allocates a DebugInvokeReq.
3958     DebugInvokeReq* req = new (std::nothrow) DebugInvokeReq(request_id, thread_id, receiver, c, m,
3959                                                             options, arg_values, arg_count);
3960     if (req == nullptr) {
3961       LOG(ERROR) << "Failed to allocate DebugInvokeReq";
3962       return JDWP::ERR_OUT_OF_MEMORY;
3963     }
3964 
3965     // Attaches the DebugInvokeReq to the target thread so it executes the method when
3966     // it is resumed. Once the invocation completes, the target thread will delete it before
3967     // suspending itself (see ThreadList::SuspendSelfForDebugger).
3968     targetThread->SetDebugInvokeReq(req);
3969   }
3970 
3971   // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3972   // away we're sitting high and dry -- but we must release this before the UndoDebuggerSuspensions
3973   // call.
3974 
3975   if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3976     VLOG(jdwp) << "      Resuming all threads";
3977     thread_list->UndoDebuggerSuspensions();
3978   } else {
3979     VLOG(jdwp) << "      Resuming event thread only";
3980     thread_list->Resume(targetThread, true);
3981   }
3982 
3983   return JDWP::ERR_NONE;
3984 }
3985 
ExecuteMethod(DebugInvokeReq * pReq)3986 void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3987   Thread* const self = Thread::Current();
3988   CHECK_NE(self, GetDebugThread()) << "This must be called by the event thread";
3989 
3990   ScopedObjectAccess soa(self);
3991 
3992   // We can be called while an exception is pending. We need
3993   // to preserve that across the method invocation.
3994   StackHandleScope<1> hs(soa.Self());
3995   Handle<mirror::Throwable> old_exception = hs.NewHandle(soa.Self()->GetException());
3996   soa.Self()->ClearException();
3997 
3998   // Execute the method then sends reply to the debugger.
3999   ExecuteMethodWithoutPendingException(soa, pReq);
4000 
4001   // If an exception was pending before the invoke, restore it now.
4002   if (old_exception.Get() != nullptr) {
4003     soa.Self()->SetException(old_exception.Get());
4004   }
4005 }
4006 
4007 // Helper function: write a variable-width value into the output input buffer.
WriteValue(JDWP::ExpandBuf * pReply,int width,uint64_t value)4008 static void WriteValue(JDWP::ExpandBuf* pReply, int width, uint64_t value) {
4009   switch (width) {
4010     case 1:
4011       expandBufAdd1(pReply, value);
4012       break;
4013     case 2:
4014       expandBufAdd2BE(pReply, value);
4015       break;
4016     case 4:
4017       expandBufAdd4BE(pReply, value);
4018       break;
4019     case 8:
4020       expandBufAdd8BE(pReply, value);
4021       break;
4022     default:
4023       LOG(FATAL) << width;
4024       UNREACHABLE();
4025   }
4026 }
4027 
ExecuteMethodWithoutPendingException(ScopedObjectAccess & soa,DebugInvokeReq * pReq)4028 void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) {
4029   soa.Self()->AssertNoPendingException();
4030 
4031   // Translate the method through the vtable, unless the debugger wants to suppress it.
4032   ArtMethod* m = pReq->method;
4033   size_t image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
4034   if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
4035     ArtMethod* actual_method =
4036         pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
4037     if (actual_method != m) {
4038       VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m)
4039                  << " to " << PrettyMethod(actual_method);
4040       m = actual_method;
4041     }
4042   }
4043   VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m)
4044              << " receiver=" << pReq->receiver.Read()
4045              << " arg_count=" << pReq->arg_count;
4046   CHECK(m != nullptr);
4047 
4048   CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
4049 
4050   // Invoke the method.
4051   ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
4052   JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m),
4053                                     reinterpret_cast<jvalue*>(pReq->arg_values.get()));
4054 
4055   // Prepare JDWP ids for the reply.
4056   JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty());
4057   const bool is_object_result = (result_tag == JDWP::JT_OBJECT);
4058   StackHandleScope<2> hs(soa.Self());
4059   Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
4060   Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
4061   soa.Self()->ClearException();
4062 
4063   if (!IsDebuggerActive()) {
4064     // The debugger detached: we must not re-suspend threads. We also don't need to fill the reply
4065     // because it won't be sent either.
4066     return;
4067   }
4068 
4069   JDWP::ObjectId exceptionObjectId = gRegistry->Add(exception);
4070   uint64_t result_value = 0;
4071   if (exceptionObjectId != 0) {
4072     VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception.Get()
4073                << " " << exception->Dump();
4074     result_value = 0;
4075   } else if (is_object_result) {
4076     /* if no exception was thrown, examine object result more closely */
4077     JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get());
4078     if (new_tag != result_tag) {
4079       VLOG(jdwp) << "  JDWP promoted result from " << result_tag << " to " << new_tag;
4080       result_tag = new_tag;
4081     }
4082 
4083     // Register the object in the registry and reference its ObjectId. This ensures
4084     // GC safety and prevents from accessing stale reference if the object is moved.
4085     result_value = gRegistry->Add(object_result.Get());
4086   } else {
4087     // Primitive result.
4088     DCHECK(IsPrimitiveTag(result_tag));
4089     result_value = result.GetJ();
4090   }
4091   const bool is_constructor = m->IsConstructor() && !m->IsStatic();
4092   if (is_constructor) {
4093     // If we invoked a constructor (which actually returns void), return the receiver,
4094     // unless we threw, in which case we return null.
4095     result_tag = JDWP::JT_OBJECT;
4096     if (exceptionObjectId == 0) {
4097       // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
4098       // object registry.
4099       result_value = GetObjectRegistry()->Add(pReq->receiver.Read());
4100     } else {
4101       result_value = 0;
4102     }
4103   }
4104 
4105   // Suspend other threads if the invoke is not single-threaded.
4106   if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
4107     soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
4108     VLOG(jdwp) << "      Suspending all threads";
4109     Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
4110     soa.Self()->TransitionFromSuspendedToRunnable();
4111   }
4112 
4113   VLOG(jdwp) << "  --> returned " << result_tag
4114              << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", result_value,
4115                              exceptionObjectId);
4116 
4117   // Show detailed debug output.
4118   if (result_tag == JDWP::JT_STRING && exceptionObjectId == 0) {
4119     if (result_value != 0) {
4120       if (VLOG_IS_ON(jdwp)) {
4121         std::string result_string;
4122         JDWP::JdwpError error = Dbg::StringToUtf8(result_value, &result_string);
4123         CHECK_EQ(error, JDWP::ERR_NONE);
4124         VLOG(jdwp) << "      string '" << result_string << "'";
4125       }
4126     } else {
4127       VLOG(jdwp) << "      string (null)";
4128     }
4129   }
4130 
4131   // Attach the reply to DebugInvokeReq so it can be sent to the debugger when the event thread
4132   // is ready to suspend.
4133   BuildInvokeReply(pReq->reply, pReq->request_id, result_tag, result_value, exceptionObjectId);
4134 }
4135 
BuildInvokeReply(JDWP::ExpandBuf * pReply,uint32_t request_id,JDWP::JdwpTag result_tag,uint64_t result_value,JDWP::ObjectId exception)4136 void Dbg::BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag,
4137                            uint64_t result_value, JDWP::ObjectId exception) {
4138   // Make room for the JDWP header since we do not know the size of the reply yet.
4139   JDWP::expandBufAddSpace(pReply, kJDWPHeaderLen);
4140 
4141   size_t width = GetTagWidth(result_tag);
4142   JDWP::expandBufAdd1(pReply, result_tag);
4143   if (width != 0) {
4144     WriteValue(pReply, width, result_value);
4145   }
4146   JDWP::expandBufAdd1(pReply, JDWP::JT_OBJECT);
4147   JDWP::expandBufAddObjectId(pReply, exception);
4148 
4149   // Now we know the size, we can complete the JDWP header.
4150   uint8_t* buf = expandBufGetBuffer(pReply);
4151   JDWP::Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReply));
4152   JDWP::Set4BE(buf + kJDWPHeaderIdOffset, request_id);
4153   JDWP::Set1(buf + kJDWPHeaderFlagsOffset, kJDWPFlagReply);  // flags
4154   JDWP::Set2BE(buf + kJDWPHeaderErrorCodeOffset, JDWP::ERR_NONE);
4155 }
4156 
FinishInvokeMethod(DebugInvokeReq * pReq)4157 void Dbg::FinishInvokeMethod(DebugInvokeReq* pReq) {
4158   CHECK_NE(Thread::Current(), GetDebugThread()) << "This must be called by the event thread";
4159 
4160   JDWP::ExpandBuf* const pReply = pReq->reply;
4161   CHECK(pReply != nullptr) << "No reply attached to DebugInvokeReq";
4162 
4163   // We need to prevent other threads (including JDWP thread) from interacting with the debugger
4164   // while we send the reply but are not yet suspended. The JDWP token will be released just before
4165   // we suspend ourself again (see ThreadList::SuspendSelfForDebugger).
4166   gJdwpState->AcquireJdwpTokenForEvent(pReq->thread_id);
4167 
4168   // Send the reply unless the debugger detached before the completion of the method.
4169   if (IsDebuggerActive()) {
4170     const size_t replyDataLength = expandBufGetLength(pReply) - kJDWPHeaderLen;
4171     VLOG(jdwp) << StringPrintf("REPLY INVOKE id=0x%06x (length=%zu)",
4172                                pReq->request_id, replyDataLength);
4173 
4174     gJdwpState->SendRequest(pReply);
4175   } else {
4176     VLOG(jdwp) << "Not sending invoke reply because debugger detached";
4177   }
4178 }
4179 
4180 /*
4181  * "request" contains a full JDWP packet, possibly with multiple chunks.  We
4182  * need to process each, accumulate the replies, and ship the whole thing
4183  * back.
4184  *
4185  * Returns "true" if we have a reply.  The reply buffer is newly allocated,
4186  * and includes the chunk type/length, followed by the data.
4187  *
4188  * OLD-TODO: we currently assume that the request and reply include a single
4189  * chunk.  If this becomes inconvenient we will need to adapt.
4190  */
DdmHandlePacket(JDWP::Request * request,uint8_t ** pReplyBuf,int * pReplyLen)4191 bool Dbg::DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen) {
4192   Thread* self = Thread::Current();
4193   JNIEnv* env = self->GetJniEnv();
4194 
4195   uint32_t type = request->ReadUnsigned32("type");
4196   uint32_t length = request->ReadUnsigned32("length");
4197 
4198   // Create a byte[] corresponding to 'request'.
4199   size_t request_length = request->size();
4200   ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
4201   if (dataArray.get() == nullptr) {
4202     LOG(WARNING) << "byte[] allocation failed: " << request_length;
4203     env->ExceptionClear();
4204     return false;
4205   }
4206   env->SetByteArrayRegion(dataArray.get(), 0, request_length,
4207                           reinterpret_cast<const jbyte*>(request->data()));
4208   request->Skip(request_length);
4209 
4210   // Run through and find all chunks.  [Currently just find the first.]
4211   ScopedByteArrayRO contents(env, dataArray.get());
4212   if (length != request_length) {
4213     LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
4214     return false;
4215   }
4216 
4217   // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
4218   ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
4219                                                                  WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
4220                                                                  type, dataArray.get(), 0, length));
4221   if (env->ExceptionCheck()) {
4222     LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
4223     env->ExceptionDescribe();
4224     env->ExceptionClear();
4225     return false;
4226   }
4227 
4228   if (chunk.get() == nullptr) {
4229     return false;
4230   }
4231 
4232   /*
4233    * Pull the pieces out of the chunk.  We copy the results into a
4234    * newly-allocated buffer that the caller can free.  We don't want to
4235    * continue using the Chunk object because nothing has a reference to it.
4236    *
4237    * We could avoid this by returning type/data/offset/length and having
4238    * the caller be aware of the object lifetime issues, but that
4239    * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
4240    * if we have responses for multiple chunks.
4241    *
4242    * So we're pretty much stuck with copying data around multiple times.
4243    */
4244   ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
4245   jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
4246   length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
4247   type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
4248 
4249   VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
4250   if (length == 0 || replyData.get() == nullptr) {
4251     return false;
4252   }
4253 
4254   const int kChunkHdrLen = 8;
4255   uint8_t* reply = new uint8_t[length + kChunkHdrLen];
4256   if (reply == nullptr) {
4257     LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
4258     return false;
4259   }
4260   JDWP::Set4BE(reply + 0, type);
4261   JDWP::Set4BE(reply + 4, length);
4262   env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
4263 
4264   *pReplyBuf = reply;
4265   *pReplyLen = length + kChunkHdrLen;
4266 
4267   VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
4268   return true;
4269 }
4270 
DdmBroadcast(bool connect)4271 void Dbg::DdmBroadcast(bool connect) {
4272   VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
4273 
4274   Thread* self = Thread::Current();
4275   if (self->GetState() != kRunnable) {
4276     LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
4277     /* try anyway? */
4278   }
4279 
4280   JNIEnv* env = self->GetJniEnv();
4281   jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
4282   env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
4283                             WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
4284                             event);
4285   if (env->ExceptionCheck()) {
4286     LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
4287     env->ExceptionDescribe();
4288     env->ExceptionClear();
4289   }
4290 }
4291 
DdmConnected()4292 void Dbg::DdmConnected() {
4293   Dbg::DdmBroadcast(true);
4294 }
4295 
DdmDisconnected()4296 void Dbg::DdmDisconnected() {
4297   Dbg::DdmBroadcast(false);
4298   gDdmThreadNotification = false;
4299 }
4300 
4301 /*
4302  * Send a notification when a thread starts, stops, or changes its name.
4303  *
4304  * Because we broadcast the full set of threads when the notifications are
4305  * first enabled, it's possible for "thread" to be actively executing.
4306  */
DdmSendThreadNotification(Thread * t,uint32_t type)4307 void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
4308   if (!gDdmThreadNotification) {
4309     return;
4310   }
4311 
4312   if (type == CHUNK_TYPE("THDE")) {
4313     uint8_t buf[4];
4314     JDWP::Set4BE(&buf[0], t->GetThreadId());
4315     Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
4316   } else {
4317     CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
4318     ScopedObjectAccessUnchecked soa(Thread::Current());
4319     StackHandleScope<1> hs(soa.Self());
4320     Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
4321     size_t char_count = (name.Get() != nullptr) ? name->GetLength() : 0;
4322     const jchar* chars = (name.Get() != nullptr) ? name->GetValue() : nullptr;
4323 
4324     std::vector<uint8_t> bytes;
4325     JDWP::Append4BE(bytes, t->GetThreadId());
4326     JDWP::AppendUtf16BE(bytes, chars, char_count);
4327     CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
4328     Dbg::DdmSendChunk(type, bytes);
4329   }
4330 }
4331 
DdmSetThreadNotification(bool enable)4332 void Dbg::DdmSetThreadNotification(bool enable) {
4333   // Enable/disable thread notifications.
4334   gDdmThreadNotification = enable;
4335   if (enable) {
4336     // Suspend the VM then post thread start notifications for all threads. Threads attaching will
4337     // see a suspension in progress and block until that ends. They then post their own start
4338     // notification.
4339     SuspendVM();
4340     std::list<Thread*> threads;
4341     Thread* self = Thread::Current();
4342     {
4343       MutexLock mu(self, *Locks::thread_list_lock_);
4344       threads = Runtime::Current()->GetThreadList()->GetList();
4345     }
4346     {
4347       ScopedObjectAccess soa(self);
4348       for (Thread* thread : threads) {
4349         Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
4350       }
4351     }
4352     ResumeVM();
4353   }
4354 }
4355 
PostThreadStartOrStop(Thread * t,uint32_t type)4356 void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
4357   if (IsDebuggerActive()) {
4358     gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
4359   }
4360   Dbg::DdmSendThreadNotification(t, type);
4361 }
4362 
PostThreadStart(Thread * t)4363 void Dbg::PostThreadStart(Thread* t) {
4364   Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
4365 }
4366 
PostThreadDeath(Thread * t)4367 void Dbg::PostThreadDeath(Thread* t) {
4368   Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
4369 }
4370 
DdmSendChunk(uint32_t type,size_t byte_count,const uint8_t * buf)4371 void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
4372   CHECK(buf != nullptr);
4373   iovec vec[1];
4374   vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
4375   vec[0].iov_len = byte_count;
4376   Dbg::DdmSendChunkV(type, vec, 1);
4377 }
4378 
DdmSendChunk(uint32_t type,const std::vector<uint8_t> & bytes)4379 void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
4380   DdmSendChunk(type, bytes.size(), &bytes[0]);
4381 }
4382 
DdmSendChunkV(uint32_t type,const iovec * iov,int iov_count)4383 void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
4384   if (gJdwpState == nullptr) {
4385     VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
4386   } else {
4387     gJdwpState->DdmSendChunkV(type, iov, iov_count);
4388   }
4389 }
4390 
GetJdwpState()4391 JDWP::JdwpState* Dbg::GetJdwpState() {
4392   return gJdwpState;
4393 }
4394 
DdmHandleHpifChunk(HpifWhen when)4395 int Dbg::DdmHandleHpifChunk(HpifWhen when) {
4396   if (when == HPIF_WHEN_NOW) {
4397     DdmSendHeapInfo(when);
4398     return true;
4399   }
4400 
4401   if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
4402     LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
4403     return false;
4404   }
4405 
4406   gDdmHpifWhen = when;
4407   return true;
4408 }
4409 
DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when,Dbg::HpsgWhat what,bool native)4410 bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
4411   if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
4412     LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
4413     return false;
4414   }
4415 
4416   if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
4417     LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
4418     return false;
4419   }
4420 
4421   if (native) {
4422     gDdmNhsgWhen = when;
4423     gDdmNhsgWhat = what;
4424   } else {
4425     gDdmHpsgWhen = when;
4426     gDdmHpsgWhat = what;
4427   }
4428   return true;
4429 }
4430 
DdmSendHeapInfo(HpifWhen reason)4431 void Dbg::DdmSendHeapInfo(HpifWhen reason) {
4432   // If there's a one-shot 'when', reset it.
4433   if (reason == gDdmHpifWhen) {
4434     if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
4435       gDdmHpifWhen = HPIF_WHEN_NEVER;
4436     }
4437   }
4438 
4439   /*
4440    * Chunk HPIF (client --> server)
4441    *
4442    * Heap Info. General information about the heap,
4443    * suitable for a summary display.
4444    *
4445    *   [u4]: number of heaps
4446    *
4447    *   For each heap:
4448    *     [u4]: heap ID
4449    *     [u8]: timestamp in ms since Unix epoch
4450    *     [u1]: capture reason (same as 'when' value from server)
4451    *     [u4]: max heap size in bytes (-Xmx)
4452    *     [u4]: current heap size in bytes
4453    *     [u4]: current number of bytes allocated
4454    *     [u4]: current number of objects allocated
4455    */
4456   uint8_t heap_count = 1;
4457   gc::Heap* heap = Runtime::Current()->GetHeap();
4458   std::vector<uint8_t> bytes;
4459   JDWP::Append4BE(bytes, heap_count);
4460   JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
4461   JDWP::Append8BE(bytes, MilliTime());
4462   JDWP::Append1BE(bytes, reason);
4463   JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
4464   JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
4465   JDWP::Append4BE(bytes, heap->GetBytesAllocated());
4466   JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
4467   CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
4468   Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
4469 }
4470 
4471 enum HpsgSolidity {
4472   SOLIDITY_FREE = 0,
4473   SOLIDITY_HARD = 1,
4474   SOLIDITY_SOFT = 2,
4475   SOLIDITY_WEAK = 3,
4476   SOLIDITY_PHANTOM = 4,
4477   SOLIDITY_FINALIZABLE = 5,
4478   SOLIDITY_SWEEP = 6,
4479 };
4480 
4481 enum HpsgKind {
4482   KIND_OBJECT = 0,
4483   KIND_CLASS_OBJECT = 1,
4484   KIND_ARRAY_1 = 2,
4485   KIND_ARRAY_2 = 3,
4486   KIND_ARRAY_4 = 4,
4487   KIND_ARRAY_8 = 5,
4488   KIND_UNKNOWN = 6,
4489   KIND_NATIVE = 7,
4490 };
4491 
4492 #define HPSG_PARTIAL (1<<7)
4493 #define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
4494 
4495 class HeapChunkContext {
4496  public:
4497   // Maximum chunk size.  Obtain this from the formula:
4498   // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
HeapChunkContext(bool merge,bool native)4499   HeapChunkContext(bool merge, bool native)
4500       : buf_(16384 - 16),
4501         type_(0),
4502         chunk_overhead_(0) {
4503     Reset();
4504     if (native) {
4505       type_ = CHUNK_TYPE("NHSG");
4506     } else {
4507       type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
4508     }
4509   }
4510 
~HeapChunkContext()4511   ~HeapChunkContext() {
4512     if (p_ > &buf_[0]) {
4513       Flush();
4514     }
4515   }
4516 
SetChunkOverhead(size_t chunk_overhead)4517   void SetChunkOverhead(size_t chunk_overhead) {
4518     chunk_overhead_ = chunk_overhead;
4519   }
4520 
ResetStartOfNextChunk()4521   void ResetStartOfNextChunk() {
4522     startOfNextMemoryChunk_ = nullptr;
4523   }
4524 
EnsureHeader(const void * chunk_ptr)4525   void EnsureHeader(const void* chunk_ptr) {
4526     if (!needHeader_) {
4527       return;
4528     }
4529 
4530     // Start a new HPSx chunk.
4531     JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
4532     JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
4533 
4534     JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
4535     JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
4536     // [u4]: length of piece, in allocation units
4537     // We won't know this until we're done, so save the offset and stuff in a dummy value.
4538     pieceLenField_ = p_;
4539     JDWP::Write4BE(&p_, 0x55555555);
4540     needHeader_ = false;
4541   }
4542 
Flush()4543   void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4544     if (pieceLenField_ == nullptr) {
4545       // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4546       CHECK(needHeader_);
4547       return;
4548     }
4549     // Patch the "length of piece" field.
4550     CHECK_LE(&buf_[0], pieceLenField_);
4551     CHECK_LE(pieceLenField_, p_);
4552     JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4553 
4554     Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4555     Reset();
4556   }
4557 
HeapChunkJavaCallback(void * start,void * end,size_t used_bytes,void * arg)4558   static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
4559       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4560                             Locks::mutator_lock_) {
4561     reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
4562   }
4563 
HeapChunkNativeCallback(void * start,void * end,size_t used_bytes,void * arg)4564   static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
4565       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4566     reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
4567   }
4568 
4569  private:
4570   enum { ALLOCATION_UNIT_SIZE = 8 };
4571 
Reset()4572   void Reset() {
4573     p_ = &buf_[0];
4574     ResetStartOfNextChunk();
4575     totalAllocationUnits_ = 0;
4576     needHeader_ = true;
4577     pieceLenField_ = nullptr;
4578   }
4579 
IsNative() const4580   bool IsNative() const {
4581     return type_ == CHUNK_TYPE("NHSG");
4582   }
4583 
4584   // Returns true if the object is not an empty chunk.
ProcessRecord(void * start,size_t used_bytes)4585   bool ProcessRecord(void* start, size_t used_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4586     // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4587     // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4588     if (used_bytes == 0) {
4589       if (start == nullptr) {
4590         // Reset for start of new heap.
4591         startOfNextMemoryChunk_ = nullptr;
4592         Flush();
4593       }
4594       // Only process in use memory so that free region information
4595       // also includes dlmalloc book keeping.
4596       return false;
4597     }
4598     if (startOfNextMemoryChunk_ != nullptr) {
4599       // Transmit any pending free memory. Native free memory of over kMaxFreeLen could be because
4600       // of the use of mmaps, so don't report. If not free memory then start a new segment.
4601       bool flush = true;
4602       if (start > startOfNextMemoryChunk_) {
4603         const size_t kMaxFreeLen = 2 * kPageSize;
4604         void* free_start = startOfNextMemoryChunk_;
4605         void* free_end = start;
4606         const size_t free_len =
4607             reinterpret_cast<uintptr_t>(free_end) - reinterpret_cast<uintptr_t>(free_start);
4608         if (!IsNative() || free_len < kMaxFreeLen) {
4609           AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), free_start, free_len, IsNative());
4610           flush = false;
4611         }
4612       }
4613       if (flush) {
4614         startOfNextMemoryChunk_ = nullptr;
4615         Flush();
4616       }
4617     }
4618     return true;
4619   }
4620 
HeapChunkNativeCallback(void * start,void *,size_t used_bytes)4621   void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
4622       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4623     if (ProcessRecord(start, used_bytes)) {
4624       uint8_t state = ExamineNativeObject(start);
4625       AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
4626       startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4627     }
4628   }
4629 
HeapChunkJavaCallback(void * start,void *,size_t used_bytes)4630   void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
4631       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
4632     if (ProcessRecord(start, used_bytes)) {
4633       // Determine the type of this chunk.
4634       // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4635       // If it's the same, we should combine them.
4636       uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
4637       AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/);
4638       startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4639     }
4640   }
4641 
AppendChunk(uint8_t state,void * ptr,size_t length,bool is_native)4642   void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
4643       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4644     // Make sure there's enough room left in the buffer.
4645     // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4646     // 17 bytes for any header.
4647     const size_t needed = ((RoundUp(length / ALLOCATION_UNIT_SIZE, 256) / 256) * 2) + 17;
4648     size_t byte_left = &buf_.back() - p_;
4649     if (byte_left < needed) {
4650       if (is_native) {
4651       // Cannot trigger memory allocation while walking native heap.
4652         return;
4653       }
4654       Flush();
4655     }
4656 
4657     byte_left = &buf_.back() - p_;
4658     if (byte_left < needed) {
4659       LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4660           << needed << " bytes)";
4661       return;
4662     }
4663     EnsureHeader(ptr);
4664     // Write out the chunk description.
4665     length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
4666     totalAllocationUnits_ += length;
4667     while (length > 256) {
4668       *p_++ = state | HPSG_PARTIAL;
4669       *p_++ = 255;     // length - 1
4670       length -= 256;
4671     }
4672     *p_++ = state;
4673     *p_++ = length - 1;
4674   }
4675 
ExamineNativeObject(const void * p)4676   uint8_t ExamineNativeObject(const void* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4677     return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4678   }
4679 
ExamineJavaObject(mirror::Object * o)4680   uint8_t ExamineJavaObject(mirror::Object* o)
4681       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4682     if (o == nullptr) {
4683       return HPSG_STATE(SOLIDITY_FREE, 0);
4684     }
4685     // It's an allocated chunk. Figure out what it is.
4686     gc::Heap* heap = Runtime::Current()->GetHeap();
4687     if (!heap->IsLiveObjectLocked(o)) {
4688       LOG(ERROR) << "Invalid object in managed heap: " << o;
4689       return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4690     }
4691     mirror::Class* c = o->GetClass();
4692     if (c == nullptr) {
4693       // The object was probably just created but hasn't been initialized yet.
4694       return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4695     }
4696     if (!heap->IsValidObjectAddress(c)) {
4697       LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4698       return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4699     }
4700     if (c->GetClass() == nullptr) {
4701       LOG(ERROR) << "Null class of class " << c << " for object " << o;
4702       return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4703     }
4704     if (c->IsClassClass()) {
4705       return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4706     }
4707     if (c->IsArrayClass()) {
4708       switch (c->GetComponentSize()) {
4709       case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4710       case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4711       case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4712       case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4713       }
4714     }
4715     return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4716   }
4717 
4718   std::vector<uint8_t> buf_;
4719   uint8_t* p_;
4720   uint8_t* pieceLenField_;
4721   void* startOfNextMemoryChunk_;
4722   size_t totalAllocationUnits_;
4723   uint32_t type_;
4724   bool needHeader_;
4725   size_t chunk_overhead_;
4726 
4727   DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4728 };
4729 
BumpPointerSpaceCallback(mirror::Object * obj,void * arg)4730 static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4731     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
4732   const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4733   HeapChunkContext::HeapChunkJavaCallback(
4734       obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4735 }
4736 
DdmSendHeapSegments(bool native)4737 void Dbg::DdmSendHeapSegments(bool native) {
4738   Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
4739   Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
4740   if (when == HPSG_WHEN_NEVER) {
4741     return;
4742   }
4743   // Figure out what kind of chunks we'll be sending.
4744   CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS)
4745       << static_cast<int>(what);
4746 
4747   // First, send a heap start chunk.
4748   uint8_t heap_id[4];
4749   JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
4750   Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4751   Thread* self = Thread::Current();
4752   Locks::mutator_lock_->AssertSharedHeld(self);
4753 
4754   // Send a series of heap segment chunks.
4755   HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
4756   if (native) {
4757 #if defined(HAVE_ANDROID_OS) && defined(USE_DLMALLOC)
4758     dlmalloc_inspect_all(HeapChunkContext::HeapChunkNativeCallback, &context);
4759     HeapChunkContext::HeapChunkNativeCallback(nullptr, nullptr, 0, &context);  // Indicate end of a space.
4760 #else
4761     UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
4762 #endif
4763   } else {
4764     gc::Heap* heap = Runtime::Current()->GetHeap();
4765     for (const auto& space : heap->GetContinuousSpaces()) {
4766       if (space->IsDlMallocSpace()) {
4767         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4768         // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4769         // allocation then the first sizeof(size_t) may belong to it.
4770         context.SetChunkOverhead(sizeof(size_t));
4771         space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4772       } else if (space->IsRosAllocSpace()) {
4773         context.SetChunkOverhead(0);
4774         // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
4775         // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
4776         self->TransitionFromRunnableToSuspended(kSuspended);
4777         ThreadList* tl = Runtime::Current()->GetThreadList();
4778         tl->SuspendAll(__FUNCTION__);
4779         {
4780           ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4781           space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4782         }
4783         tl->ResumeAll();
4784         self->TransitionFromSuspendedToRunnable();
4785       } else if (space->IsBumpPointerSpace()) {
4786         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4787         context.SetChunkOverhead(0);
4788         space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4789         HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
4790       } else if (space->IsRegionSpace()) {
4791         heap->IncrementDisableMovingGC(self);
4792         self->TransitionFromRunnableToSuspended(kSuspended);
4793         ThreadList* tl = Runtime::Current()->GetThreadList();
4794         tl->SuspendAll(__FUNCTION__);
4795         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4796         context.SetChunkOverhead(0);
4797         space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
4798         HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
4799         tl->ResumeAll();
4800         self->TransitionFromSuspendedToRunnable();
4801         heap->DecrementDisableMovingGC(self);
4802       } else {
4803         UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4804       }
4805       context.ResetStartOfNextChunk();
4806     }
4807     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4808     // Walk the large objects, these are not in the AllocSpace.
4809     context.SetChunkOverhead(0);
4810     heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4811   }
4812 
4813   // Finally, send a heap end chunk.
4814   Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4815 }
4816 
GetAllocTrackerMax()4817 static size_t GetAllocTrackerMax() {
4818 #ifdef HAVE_ANDROID_OS
4819   // Check whether there's a system property overriding the number of records.
4820   const char* propertyName = "dalvik.vm.allocTrackerMax";
4821   char allocRecordMaxString[PROPERTY_VALUE_MAX];
4822   if (property_get(propertyName, allocRecordMaxString, "") > 0) {
4823     char* end;
4824     size_t value = strtoul(allocRecordMaxString, &end, 10);
4825     if (*end != '\0') {
4826       LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4827                  << "' --- invalid";
4828       return kDefaultNumAllocRecords;
4829     }
4830     if (!IsPowerOfTwo(value)) {
4831       LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4832                  << "' --- not power of two";
4833       return kDefaultNumAllocRecords;
4834     }
4835     return value;
4836   }
4837 #endif
4838   return kDefaultNumAllocRecords;
4839 }
4840 
SetAllocTrackingEnabled(bool enable)4841 void Dbg::SetAllocTrackingEnabled(bool enable) {
4842   Thread* self = Thread::Current();
4843   if (enable) {
4844     {
4845       MutexLock mu(self, *Locks::alloc_tracker_lock_);
4846       if (recent_allocation_records_ != nullptr) {
4847         return;  // Already enabled, bail.
4848       }
4849       alloc_record_max_ = GetAllocTrackerMax();
4850       LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
4851                 << kMaxAllocRecordStackDepth << " frames, taking "
4852                 << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
4853       DCHECK_EQ(alloc_record_head_, 0U);
4854       DCHECK_EQ(alloc_record_count_, 0U);
4855       recent_allocation_records_ = new AllocRecord[alloc_record_max_];
4856       CHECK(recent_allocation_records_ != nullptr);
4857     }
4858     Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4859   } else {
4860     {
4861       ScopedObjectAccess soa(self);  // For type_cache_.Clear();
4862       MutexLock mu(self, *Locks::alloc_tracker_lock_);
4863       if (recent_allocation_records_ == nullptr) {
4864         return;  // Already disabled, bail.
4865       }
4866       LOG(INFO) << "Disabling alloc tracker";
4867       delete[] recent_allocation_records_;
4868       recent_allocation_records_ = nullptr;
4869       alloc_record_head_ = 0;
4870       alloc_record_count_ = 0;
4871       type_cache_.Clear();
4872     }
4873     // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
4874     Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4875   }
4876 }
4877 
4878 struct AllocRecordStackVisitor : public StackVisitor {
AllocRecordStackVisitorart::AllocRecordStackVisitor4879   AllocRecordStackVisitor(Thread* thread, AllocRecord* record_in)
4880       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
4881       : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
4882         record(record_in),
4883         depth(0) {}
4884 
4885   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
4886   // annotalysis.
VisitFrameart::AllocRecordStackVisitor4887   bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
4888     if (depth >= kMaxAllocRecordStackDepth) {
4889       return false;
4890     }
4891     ArtMethod* m = GetMethod();
4892     if (!m->IsRuntimeMethod()) {
4893       record->StackElement(depth)->SetMethod(m);
4894       record->StackElement(depth)->SetDexPc(GetDexPc());
4895       ++depth;
4896     }
4897     return true;
4898   }
4899 
~AllocRecordStackVisitorart::AllocRecordStackVisitor4900   ~AllocRecordStackVisitor() {
4901     // Clear out any unused stack trace elements.
4902     for (; depth < kMaxAllocRecordStackDepth; ++depth) {
4903       record->StackElement(depth)->SetMethod(nullptr);
4904       record->StackElement(depth)->SetDexPc(0);
4905     }
4906   }
4907 
4908   AllocRecord* record;
4909   size_t depth;
4910 };
4911 
RecordAllocation(Thread * self,mirror::Class * type,size_t byte_count)4912 void Dbg::RecordAllocation(Thread* self, mirror::Class* type, size_t byte_count) {
4913   MutexLock mu(self, *Locks::alloc_tracker_lock_);
4914   if (recent_allocation_records_ == nullptr) {
4915     // In the process of shutting down recording, bail.
4916     return;
4917   }
4918 
4919   // Advance and clip.
4920   if (++alloc_record_head_ == alloc_record_max_) {
4921     alloc_record_head_ = 0;
4922   }
4923 
4924   // Fill in the basics.
4925   AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
4926   record->SetType(type);
4927   record->SetByteCount(byte_count);
4928   record->SetThinLockId(self->GetThreadId());
4929 
4930   // Fill in the stack trace.
4931   AllocRecordStackVisitor visitor(self, record);
4932   visitor.WalkStack();
4933 
4934   if (alloc_record_count_ < alloc_record_max_) {
4935     ++alloc_record_count_;
4936   }
4937 }
4938 
4939 // Returns the index of the head element.
4940 //
4941 // We point at the most-recently-written record, so if alloc_record_count_ is 1
4942 // we want to use the current element.  Take "head+1" and subtract count
4943 // from it.
4944 //
4945 // We need to handle underflow in our circular buffer, so we add
4946 // alloc_record_max_ and then mask it back down.
HeadIndex()4947 size_t Dbg::HeadIndex() {
4948   return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
4949       (Dbg::alloc_record_max_ - 1);
4950 }
4951 
DumpRecentAllocations()4952 void Dbg::DumpRecentAllocations() {
4953   ScopedObjectAccess soa(Thread::Current());
4954   MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4955   if (recent_allocation_records_ == nullptr) {
4956     LOG(INFO) << "Not recording tracked allocations";
4957     return;
4958   }
4959 
4960   // "i" is the head of the list.  We want to start at the end of the
4961   // list and move forward to the tail.
4962   size_t i = HeadIndex();
4963   const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4964   uint16_t count = capped_count;
4965 
4966   LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
4967   while (count--) {
4968     AllocRecord* record = &recent_allocation_records_[i];
4969 
4970     LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount())
4971               << PrettyClass(record->Type());
4972 
4973     for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
4974       AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
4975       ArtMethod* m = stack_element->Method();
4976       if (m == nullptr) {
4977         break;
4978       }
4979       LOG(INFO) << "    " << PrettyMethod(m) << " line " << stack_element->LineNumber();
4980     }
4981 
4982     // pause periodically to help logcat catch up
4983     if ((count % 5) == 0) {
4984       usleep(40000);
4985     }
4986 
4987     i = (i + 1) & (alloc_record_max_ - 1);
4988   }
4989 }
4990 
4991 class StringTable {
4992  public:
StringTable()4993   StringTable() {
4994   }
4995 
Add(const std::string & str)4996   void Add(const std::string& str) {
4997     table_.insert(str);
4998   }
4999 
Add(const char * str)5000   void Add(const char* str) {
5001     table_.insert(str);
5002   }
5003 
IndexOf(const char * s) const5004   size_t IndexOf(const char* s) const {
5005     auto it = table_.find(s);
5006     if (it == table_.end()) {
5007       LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
5008     }
5009     return std::distance(table_.begin(), it);
5010   }
5011 
Size() const5012   size_t Size() const {
5013     return table_.size();
5014   }
5015 
WriteTo(std::vector<uint8_t> & bytes) const5016   void WriteTo(std::vector<uint8_t>& bytes) const {
5017     for (const std::string& str : table_) {
5018       const char* s = str.c_str();
5019       size_t s_len = CountModifiedUtf8Chars(s);
5020       std::unique_ptr<uint16_t[]> s_utf16(new uint16_t[s_len]);
5021       ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
5022       JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
5023     }
5024   }
5025 
5026  private:
5027   std::set<std::string> table_;
5028   DISALLOW_COPY_AND_ASSIGN(StringTable);
5029 };
5030 
GetMethodSourceFile(ArtMethod * method)5031 static const char* GetMethodSourceFile(ArtMethod* method)
5032     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
5033   DCHECK(method != nullptr);
5034   const char* source_file = method->GetDeclaringClassSourceFile();
5035   return (source_file != nullptr) ? source_file : "";
5036 }
5037 
5038 /*
5039  * The data we send to DDMS contains everything we have recorded.
5040  *
5041  * Message header (all values big-endian):
5042  * (1b) message header len (to allow future expansion); includes itself
5043  * (1b) entry header len
5044  * (1b) stack frame len
5045  * (2b) number of entries
5046  * (4b) offset to string table from start of message
5047  * (2b) number of class name strings
5048  * (2b) number of method name strings
5049  * (2b) number of source file name strings
5050  * For each entry:
5051  *   (4b) total allocation size
5052  *   (2b) thread id
5053  *   (2b) allocated object's class name index
5054  *   (1b) stack depth
5055  *   For each stack frame:
5056  *     (2b) method's class name
5057  *     (2b) method name
5058  *     (2b) method source file
5059  *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
5060  * (xb) class name strings
5061  * (xb) method name strings
5062  * (xb) source file strings
5063  *
5064  * As with other DDM traffic, strings are sent as a 4-byte length
5065  * followed by UTF-16 data.
5066  *
5067  * We send up 16-bit unsigned indexes into string tables.  In theory there
5068  * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
5069  * each table, but in practice there should be far fewer.
5070  *
5071  * The chief reason for using a string table here is to keep the size of
5072  * the DDMS message to a minimum.  This is partly to make the protocol
5073  * efficient, but also because we have to form the whole thing up all at
5074  * once in a memory buffer.
5075  *
5076  * We use separate string tables for class names, method names, and source
5077  * files to keep the indexes small.  There will generally be no overlap
5078  * between the contents of these tables.
5079  */
GetRecentAllocations()5080 jbyteArray Dbg::GetRecentAllocations() {
5081   if ((false)) {
5082     DumpRecentAllocations();
5083   }
5084 
5085   Thread* self = Thread::Current();
5086   std::vector<uint8_t> bytes;
5087   {
5088     MutexLock mu(self, *Locks::alloc_tracker_lock_);
5089     //
5090     // Part 1: generate string tables.
5091     //
5092     StringTable class_names;
5093     StringTable method_names;
5094     StringTable filenames;
5095 
5096     const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
5097     uint16_t count = capped_count;
5098     size_t idx = HeadIndex();
5099     while (count--) {
5100       AllocRecord* record = &recent_allocation_records_[idx];
5101       std::string temp;
5102       class_names.Add(record->Type()->GetDescriptor(&temp));
5103       for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
5104         ArtMethod* m = record->StackElement(i)->Method();
5105         if (m != nullptr) {
5106           class_names.Add(m->GetDeclaringClassDescriptor());
5107           method_names.Add(m->GetName());
5108           filenames.Add(GetMethodSourceFile(m));
5109         }
5110       }
5111 
5112       idx = (idx + 1) & (alloc_record_max_ - 1);
5113     }
5114 
5115     LOG(INFO) << "allocation records: " << capped_count;
5116 
5117     //
5118     // Part 2: Generate the output and store it in the buffer.
5119     //
5120 
5121     // (1b) message header len (to allow future expansion); includes itself
5122     // (1b) entry header len
5123     // (1b) stack frame len
5124     const int kMessageHeaderLen = 15;
5125     const int kEntryHeaderLen = 9;
5126     const int kStackFrameLen = 8;
5127     JDWP::Append1BE(bytes, kMessageHeaderLen);
5128     JDWP::Append1BE(bytes, kEntryHeaderLen);
5129     JDWP::Append1BE(bytes, kStackFrameLen);
5130 
5131     // (2b) number of entries
5132     // (4b) offset to string table from start of message
5133     // (2b) number of class name strings
5134     // (2b) number of method name strings
5135     // (2b) number of source file name strings
5136     JDWP::Append2BE(bytes, capped_count);
5137     size_t string_table_offset = bytes.size();
5138     JDWP::Append4BE(bytes, 0);  // We'll patch this later...
5139     JDWP::Append2BE(bytes, class_names.Size());
5140     JDWP::Append2BE(bytes, method_names.Size());
5141     JDWP::Append2BE(bytes, filenames.Size());
5142 
5143     idx = HeadIndex();
5144     std::string temp;
5145     for (count = capped_count; count != 0; --count) {
5146       // For each entry:
5147       // (4b) total allocation size
5148       // (2b) thread id
5149       // (2b) allocated object's class name index
5150       // (1b) stack depth
5151       AllocRecord* record = &recent_allocation_records_[idx];
5152       size_t stack_depth = record->GetDepth();
5153       size_t allocated_object_class_name_index =
5154           class_names.IndexOf(record->Type()->GetDescriptor(&temp));
5155       JDWP::Append4BE(bytes, record->ByteCount());
5156       JDWP::Append2BE(bytes, record->ThinLockId());
5157       JDWP::Append2BE(bytes, allocated_object_class_name_index);
5158       JDWP::Append1BE(bytes, stack_depth);
5159 
5160       for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
5161         // For each stack frame:
5162         // (2b) method's class name
5163         // (2b) method name
5164         // (2b) method source file
5165         // (2b) line number, clipped to 32767; -2 if native; -1 if no source
5166         ArtMethod* m = record->StackElement(stack_frame)->Method();
5167         size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
5168         size_t method_name_index = method_names.IndexOf(m->GetName());
5169         size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
5170         JDWP::Append2BE(bytes, class_name_index);
5171         JDWP::Append2BE(bytes, method_name_index);
5172         JDWP::Append2BE(bytes, file_name_index);
5173         JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
5174       }
5175       idx = (idx + 1) & (alloc_record_max_ - 1);
5176     }
5177 
5178     // (xb) class name strings
5179     // (xb) method name strings
5180     // (xb) source file strings
5181     JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
5182     class_names.WriteTo(bytes);
5183     method_names.WriteTo(bytes);
5184     filenames.WriteTo(bytes);
5185   }
5186   JNIEnv* env = self->GetJniEnv();
5187   jbyteArray result = env->NewByteArray(bytes.size());
5188   if (result != nullptr) {
5189     env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
5190   }
5191   return result;
5192 }
5193 
Method() const5194 ArtMethod* DeoptimizationRequest::Method() const {
5195   ScopedObjectAccessUnchecked soa(Thread::Current());
5196   return soa.DecodeMethod(method_);
5197 }
5198 
SetMethod(ArtMethod * m)5199 void DeoptimizationRequest::SetMethod(ArtMethod* m) {
5200   ScopedObjectAccessUnchecked soa(Thread::Current());
5201   method_ = soa.EncodeMethod(m);
5202 }
5203 
5204 }  // namespace art
5205