• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "debugger.h"
18 
19 #include <sys/uio.h>
20 
21 #include <set>
22 
23 #include "arch/context.h"
24 #include "art_field-inl.h"
25 #include "art_method-inl.h"
26 #include "base/time_utils.h"
27 #include "class_linker.h"
28 #include "class_linker-inl.h"
29 #include "dex_file-inl.h"
30 #include "dex_instruction.h"
31 #include "entrypoints/runtime_asm_entrypoints.h"
32 #include "gc/accounting/card_table-inl.h"
33 #include "gc/allocation_record.h"
34 #include "gc/scoped_gc_critical_section.h"
35 #include "gc/space/large_object_space.h"
36 #include "gc/space/space-inl.h"
37 #include "handle_scope.h"
38 #include "jdwp/jdwp_priv.h"
39 #include "jdwp/object_registry.h"
40 #include "mirror/class.h"
41 #include "mirror/class-inl.h"
42 #include "mirror/class_loader.h"
43 #include "mirror/object-inl.h"
44 #include "mirror/object_array-inl.h"
45 #include "mirror/string-inl.h"
46 #include "mirror/throwable.h"
47 #include "reflection.h"
48 #include "safe_map.h"
49 #include "scoped_thread_state_change.h"
50 #include "ScopedLocalRef.h"
51 #include "ScopedPrimitiveArray.h"
52 #include "handle_scope-inl.h"
53 #include "thread_list.h"
54 #include "utf.h"
55 #include "well_known_classes.h"
56 
57 namespace art {
58 
59 // The key identifying the debugger to update instrumentation.
60 static constexpr const char* kDbgInstrumentationKey = "Debugger";
61 
62 // Limit alloc_record_count to the 2BE value (64k-1) that is the limit of the current protocol.
CappedAllocRecordCount(size_t alloc_record_count)63 static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
64   const size_t cap = 0xffff;
65   if (alloc_record_count > cap) {
66     return cap;
67   }
68   return alloc_record_count;
69 }
70 
71 // Takes a method and returns a 'canonical' one if the method is default (and therefore potentially
72 // copied from some other class). This ensures that the debugger does not get confused as to which
73 // method we are in.
GetCanonicalMethod(ArtMethod * m)74 static ArtMethod* GetCanonicalMethod(ArtMethod* m)
75     SHARED_REQUIRES(Locks::mutator_lock_) {
76   if (LIKELY(!m->IsDefault())) {
77     return m;
78   } else {
79     mirror::Class* declaring_class = m->GetDeclaringClass();
80     return declaring_class->FindDeclaredVirtualMethod(declaring_class->GetDexCache(),
81                                                       m->GetDexMethodIndex(),
82                                                       sizeof(void*));
83   }
84 }
85 
86 class Breakpoint : public ValueObject {
87  public:
Breakpoint(ArtMethod * method,uint32_t dex_pc,DeoptimizationRequest::Kind deoptimization_kind)88   Breakpoint(ArtMethod* method, uint32_t dex_pc, DeoptimizationRequest::Kind deoptimization_kind)
89     : method_(GetCanonicalMethod(method)),
90       dex_pc_(dex_pc),
91       deoptimization_kind_(deoptimization_kind) {
92     CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
93           deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
94           deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
95   }
96 
SHARED_REQUIRES(Locks::mutator_lock_)97   Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_)
98     : method_(other.method_),
99       dex_pc_(other.dex_pc_),
100       deoptimization_kind_(other.deoptimization_kind_) {}
101 
102   // Method() is called from root visiting, do not use ScopedObjectAccess here or it can cause
103   // GC to deadlock if another thread tries to call SuspendAll while the GC is in a runnable state.
Method() const104   ArtMethod* Method() const {
105     return method_;
106   }
107 
DexPc() const108   uint32_t DexPc() const {
109     return dex_pc_;
110   }
111 
GetDeoptimizationKind() const112   DeoptimizationRequest::Kind GetDeoptimizationKind() const {
113     return deoptimization_kind_;
114   }
115 
116   // Returns true if the method of this breakpoint and the passed in method should be considered the
117   // same. That is, they are either the same method or they are copied from the same method.
IsInMethod(ArtMethod * m) const118   bool IsInMethod(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_) {
119     return method_ == GetCanonicalMethod(m);
120   }
121 
122  private:
123   // The location of this breakpoint.
124   ArtMethod* method_;
125   uint32_t dex_pc_;
126 
127   // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
128   DeoptimizationRequest::Kind deoptimization_kind_;
129 };
130 
operator <<(std::ostream & os,const Breakpoint & rhs)131 static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
132     SHARED_REQUIRES(Locks::mutator_lock_) {
133   os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
134   return os;
135 }
136 
137 class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
138  public:
DebugInstrumentationListener()139   DebugInstrumentationListener() {}
~DebugInstrumentationListener()140   virtual ~DebugInstrumentationListener() {}
141 
MethodEntered(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc)142   void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
143                      uint32_t dex_pc)
144       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
145     if (method->IsNative()) {
146       // TODO: post location events is a suspension point and native method entry stubs aren't.
147       return;
148     }
149     if (IsListeningToDexPcMoved()) {
150       // We also listen to kDexPcMoved instrumentation event so we know the DexPcMoved method is
151       // going to be called right after us. To avoid sending JDWP events twice for this location,
152       // we report the event in DexPcMoved. However, we must remind this is method entry so we
153       // send the METHOD_ENTRY event. And we can also group it with other events for this location
154       // like BREAKPOINT or SINGLE_STEP (or even METHOD_EXIT if this is a RETURN instruction).
155       thread->SetDebugMethodEntry();
156     } else if (IsListeningToMethodExit() && IsReturn(method, dex_pc)) {
157       // We also listen to kMethodExited instrumentation event and the current instruction is a
158       // RETURN so we know the MethodExited method is going to be called right after us. To avoid
159       // sending JDWP events twice for this location, we report the event(s) in MethodExited.
160       // However, we must remind this is method entry so we send the METHOD_ENTRY event. And we can
161       // also group it with other events for this location like BREAKPOINT or SINGLE_STEP.
162       thread->SetDebugMethodEntry();
163     } else {
164       Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
165     }
166   }
167 
MethodExited(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,const JValue & return_value)168   void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
169                     uint32_t dex_pc, const JValue& return_value)
170       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
171     if (method->IsNative()) {
172       // TODO: post location events is a suspension point and native method entry stubs aren't.
173       return;
174     }
175     uint32_t events = Dbg::kMethodExit;
176     if (thread->IsDebugMethodEntry()) {
177       // It is also the method entry.
178       DCHECK(IsReturn(method, dex_pc));
179       events |= Dbg::kMethodEntry;
180       thread->ClearDebugMethodEntry();
181     }
182     Dbg::UpdateDebugger(thread, this_object, method, dex_pc, events, &return_value);
183   }
184 
MethodUnwind(Thread * thread ATTRIBUTE_UNUSED,mirror::Object * this_object ATTRIBUTE_UNUSED,ArtMethod * method,uint32_t dex_pc)185   void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
186                     ArtMethod* method, uint32_t dex_pc)
187       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
188     // We're not recorded to listen to this kind of event, so complain.
189     LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
190                << " " << dex_pc;
191   }
192 
DexPcMoved(Thread * thread,mirror::Object * this_object,ArtMethod * method,uint32_t new_dex_pc)193   void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
194                   uint32_t new_dex_pc)
195       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
196     if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
197       // We also listen to kMethodExited instrumentation event and the current instruction is a
198       // RETURN so we know the MethodExited method is going to be called right after us. Like in
199       // MethodEntered, we delegate event reporting to MethodExited.
200       // Besides, if this RETURN instruction is the only one in the method, we can send multiple
201       // JDWP events in the same packet: METHOD_ENTRY, METHOD_EXIT, BREAKPOINT and/or SINGLE_STEP.
202       // Therefore, we must not clear the debug method entry flag here.
203     } else {
204       uint32_t events = 0;
205       if (thread->IsDebugMethodEntry()) {
206         // It is also the method entry.
207         events = Dbg::kMethodEntry;
208         thread->ClearDebugMethodEntry();
209       }
210       Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, events, nullptr);
211     }
212   }
213 
FieldRead(Thread * thread ATTRIBUTE_UNUSED,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field)214   void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
215                  ArtMethod* method, uint32_t dex_pc, ArtField* field)
216       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
217     Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
218   }
219 
FieldWritten(Thread * thread ATTRIBUTE_UNUSED,mirror::Object * this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field,const JValue & field_value)220   void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
221                     ArtMethod* method, uint32_t dex_pc, ArtField* field,
222                     const JValue& field_value)
223       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
224     Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
225   }
226 
ExceptionCaught(Thread * thread ATTRIBUTE_UNUSED,mirror::Throwable * exception_object)227   void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
228       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
229     Dbg::PostException(exception_object);
230   }
231 
232   // We only care about branches in the Jit.
Branch(Thread *,ArtMethod * method,uint32_t dex_pc,int32_t dex_pc_offset)233   void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
234       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
235     LOG(ERROR) << "Unexpected branch event in debugger " << PrettyMethod(method)
236                << " " << dex_pc << ", " << dex_pc_offset;
237   }
238 
239   // We only care about invokes in the Jit.
InvokeVirtualOrInterface(Thread * thread ATTRIBUTE_UNUSED,mirror::Object *,ArtMethod * method,uint32_t dex_pc,ArtMethod *)240   void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
241                                 mirror::Object*,
242                                 ArtMethod* method,
243                                 uint32_t dex_pc,
244                                 ArtMethod*)
245       OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
246     LOG(ERROR) << "Unexpected invoke event in debugger " << PrettyMethod(method)
247                << " " << dex_pc;
248   }
249 
250  private:
IsReturn(ArtMethod * method,uint32_t dex_pc)251   static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
252       SHARED_REQUIRES(Locks::mutator_lock_) {
253     const DexFile::CodeItem* code_item = method->GetCodeItem();
254     const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
255     return instruction->IsReturn();
256   }
257 
IsListeningToDexPcMoved()258   static bool IsListeningToDexPcMoved() SHARED_REQUIRES(Locks::mutator_lock_) {
259     return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
260   }
261 
IsListeningToMethodExit()262   static bool IsListeningToMethodExit() SHARED_REQUIRES(Locks::mutator_lock_) {
263     return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
264   }
265 
IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)266   static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
267       SHARED_REQUIRES(Locks::mutator_lock_) {
268     return (Dbg::GetInstrumentationEvents() & event) != 0;
269   }
270 
271   DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
272 } gDebugInstrumentationListener;
273 
274 // JDWP is allowed unless the Zygote forbids it.
275 static bool gJdwpAllowed = true;
276 
277 // Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
278 static bool gJdwpConfigured = false;
279 
280 // JDWP options for debugging. Only valid if IsJdwpConfigured() is true.
281 static JDWP::JdwpOptions gJdwpOptions;
282 
283 // Runtime JDWP state.
284 static JDWP::JdwpState* gJdwpState = nullptr;
285 static bool gDebuggerConnected;  // debugger or DDMS is connected.
286 
287 static bool gDdmThreadNotification = false;
288 
289 // DDMS GC-related settings.
290 static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
291 static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
292 static Dbg::HpsgWhat gDdmHpsgWhat;
293 static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
294 static Dbg::HpsgWhat gDdmNhsgWhat;
295 
296 bool Dbg::gDebuggerActive = false;
297 bool Dbg::gDisposed = false;
298 ObjectRegistry* Dbg::gRegistry = nullptr;
299 
300 // Deoptimization support.
301 std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
302 size_t Dbg::full_deoptimization_event_count_ = 0;
303 
304 // Instrumentation event reference counters.
305 size_t Dbg::dex_pc_change_event_ref_count_ = 0;
306 size_t Dbg::method_enter_event_ref_count_ = 0;
307 size_t Dbg::method_exit_event_ref_count_ = 0;
308 size_t Dbg::field_read_event_ref_count_ = 0;
309 size_t Dbg::field_write_event_ref_count_ = 0;
310 size_t Dbg::exception_catch_event_ref_count_ = 0;
311 uint32_t Dbg::instrumentation_events_ = 0;
312 
313 // Breakpoints.
314 static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
315 
VisitRoots(RootVisitor * visitor,const RootInfo & root_info)316 void DebugInvokeReq::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
317   receiver.VisitRootIfNonNull(visitor, root_info);  // null for static method call.
318   klass.VisitRoot(visitor, root_info);
319 }
320 
AddDexPc(uint32_t dex_pc)321 void SingleStepControl::AddDexPc(uint32_t dex_pc) {
322   dex_pcs_.insert(dex_pc);
323 }
324 
ContainsDexPc(uint32_t dex_pc) const325 bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
326   return dex_pcs_.find(dex_pc) == dex_pcs_.end();
327 }
328 
IsBreakpoint(ArtMethod * m,uint32_t dex_pc)329 static bool IsBreakpoint(ArtMethod* m, uint32_t dex_pc)
330     REQUIRES(!Locks::breakpoint_lock_)
331     SHARED_REQUIRES(Locks::mutator_lock_) {
332   ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
333   for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
334     if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].IsInMethod(m)) {
335       VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
336       return true;
337     }
338   }
339   return false;
340 }
341 
IsSuspendedForDebugger(ScopedObjectAccessUnchecked & soa,Thread * thread)342 static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
343     REQUIRES(!Locks::thread_suspend_count_lock_) {
344   MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
345   // A thread may be suspended for GC; in this code, we really want to know whether
346   // there's a debugger suspension active.
347   return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
348 }
349 
DecodeNonNullArray(JDWP::RefTypeId id,JDWP::JdwpError * error)350 static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
351     SHARED_REQUIRES(Locks::mutator_lock_) {
352   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
353   if (o == nullptr) {
354     *error = JDWP::ERR_INVALID_OBJECT;
355     return nullptr;
356   }
357   if (!o->IsArrayInstance()) {
358     *error = JDWP::ERR_INVALID_ARRAY;
359     return nullptr;
360   }
361   *error = JDWP::ERR_NONE;
362   return o->AsArray();
363 }
364 
DecodeClass(JDWP::RefTypeId id,JDWP::JdwpError * error)365 static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
366     SHARED_REQUIRES(Locks::mutator_lock_) {
367   mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
368   if (o == nullptr) {
369     *error = JDWP::ERR_INVALID_OBJECT;
370     return nullptr;
371   }
372   if (!o->IsClass()) {
373     *error = JDWP::ERR_INVALID_CLASS;
374     return nullptr;
375   }
376   *error = JDWP::ERR_NONE;
377   return o->AsClass();
378 }
379 
DecodeThread(ScopedObjectAccessUnchecked & soa,JDWP::ObjectId thread_id,JDWP::JdwpError * error)380 static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
381                             JDWP::JdwpError* error)
382     SHARED_REQUIRES(Locks::mutator_lock_)
383     REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) {
384   mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
385   if (thread_peer == nullptr) {
386     // This isn't even an object.
387     *error = JDWP::ERR_INVALID_OBJECT;
388     return nullptr;
389   }
390 
391   mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
392   if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
393     // This isn't a thread.
394     *error = JDWP::ERR_INVALID_THREAD;
395     return nullptr;
396   }
397 
398   MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
399   Thread* thread = Thread::FromManagedThread(soa, thread_peer);
400   // If thread is null then this a java.lang.Thread without a Thread*. Must be a un-started or a
401   // zombie.
402   *error = (thread == nullptr) ? JDWP::ERR_THREAD_NOT_ALIVE : JDWP::ERR_NONE;
403   return thread;
404 }
405 
BasicTagFromDescriptor(const char * descriptor)406 static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
407   // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
408   // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
409   return static_cast<JDWP::JdwpTag>(descriptor[0]);
410 }
411 
BasicTagFromClass(mirror::Class * klass)412 static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
413     SHARED_REQUIRES(Locks::mutator_lock_) {
414   std::string temp;
415   const char* descriptor = klass->GetDescriptor(&temp);
416   return BasicTagFromDescriptor(descriptor);
417 }
418 
TagFromClass(const ScopedObjectAccessUnchecked & soa,mirror::Class * c)419 static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
420     SHARED_REQUIRES(Locks::mutator_lock_) {
421   CHECK(c != nullptr);
422   if (c->IsArrayClass()) {
423     return JDWP::JT_ARRAY;
424   }
425   if (c->IsStringClass()) {
426     return JDWP::JT_STRING;
427   }
428   if (c->IsClassClass()) {
429     return JDWP::JT_CLASS_OBJECT;
430   }
431   {
432     mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
433     if (thread_class->IsAssignableFrom(c)) {
434       return JDWP::JT_THREAD;
435     }
436   }
437   {
438     mirror::Class* thread_group_class =
439         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
440     if (thread_group_class->IsAssignableFrom(c)) {
441       return JDWP::JT_THREAD_GROUP;
442     }
443   }
444   {
445     mirror::Class* class_loader_class =
446         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
447     if (class_loader_class->IsAssignableFrom(c)) {
448       return JDWP::JT_CLASS_LOADER;
449     }
450   }
451   return JDWP::JT_OBJECT;
452 }
453 
454 /*
455  * Objects declared to hold Object might actually hold a more specific
456  * type.  The debugger may take a special interest in these (e.g. it
457  * wants to display the contents of Strings), so we want to return an
458  * appropriate tag.
459  *
460  * Null objects are tagged JT_OBJECT.
461  */
TagFromObject(const ScopedObjectAccessUnchecked & soa,mirror::Object * o)462 JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) {
463   return (o == nullptr) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
464 }
465 
IsPrimitiveTag(JDWP::JdwpTag tag)466 static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
467   switch (tag) {
468   case JDWP::JT_BOOLEAN:
469   case JDWP::JT_BYTE:
470   case JDWP::JT_CHAR:
471   case JDWP::JT_FLOAT:
472   case JDWP::JT_DOUBLE:
473   case JDWP::JT_INT:
474   case JDWP::JT_LONG:
475   case JDWP::JT_SHORT:
476   case JDWP::JT_VOID:
477     return true;
478   default:
479     return false;
480   }
481 }
482 
StartJdwp()483 void Dbg::StartJdwp() {
484   if (!gJdwpAllowed || !IsJdwpConfigured()) {
485     // No JDWP for you!
486     return;
487   }
488 
489   CHECK(gRegistry == nullptr);
490   gRegistry = new ObjectRegistry;
491 
492   // Init JDWP if the debugger is enabled. This may connect out to a
493   // debugger, passively listen for a debugger, or block waiting for a
494   // debugger.
495   gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
496   if (gJdwpState == nullptr) {
497     // We probably failed because some other process has the port already, which means that
498     // if we don't abort the user is likely to think they're talking to us when they're actually
499     // talking to that other process.
500     LOG(FATAL) << "Debugger thread failed to initialize";
501   }
502 
503   // If a debugger has already attached, send the "welcome" message.
504   // This may cause us to suspend all threads.
505   if (gJdwpState->IsActive()) {
506     ScopedObjectAccess soa(Thread::Current());
507     gJdwpState->PostVMStart();
508   }
509 }
510 
StopJdwp()511 void Dbg::StopJdwp() {
512   // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
513   // destruction of gJdwpState).
514   if (gJdwpState != nullptr && gJdwpState->IsActive()) {
515     gJdwpState->PostVMDeath();
516   }
517   // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
518   Dispose();
519   delete gJdwpState;
520   gJdwpState = nullptr;
521   delete gRegistry;
522   gRegistry = nullptr;
523 }
524 
GcDidFinish()525 void Dbg::GcDidFinish() {
526   if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
527     ScopedObjectAccess soa(Thread::Current());
528     VLOG(jdwp) << "Sending heap info to DDM";
529     DdmSendHeapInfo(gDdmHpifWhen);
530   }
531   if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
532     ScopedObjectAccess soa(Thread::Current());
533     VLOG(jdwp) << "Dumping heap to DDM";
534     DdmSendHeapSegments(false);
535   }
536   if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
537     ScopedObjectAccess soa(Thread::Current());
538     VLOG(jdwp) << "Dumping native heap to DDM";
539     DdmSendHeapSegments(true);
540   }
541 }
542 
SetJdwpAllowed(bool allowed)543 void Dbg::SetJdwpAllowed(bool allowed) {
544   gJdwpAllowed = allowed;
545 }
546 
GetInvokeReq()547 DebugInvokeReq* Dbg::GetInvokeReq() {
548   return Thread::Current()->GetInvokeReq();
549 }
550 
GetDebugThread()551 Thread* Dbg::GetDebugThread() {
552   return (gJdwpState != nullptr) ? gJdwpState->GetDebugThread() : nullptr;
553 }
554 
ClearWaitForEventThread()555 void Dbg::ClearWaitForEventThread() {
556   gJdwpState->ReleaseJdwpTokenForEvent();
557 }
558 
Connected()559 void Dbg::Connected() {
560   CHECK(!gDebuggerConnected);
561   VLOG(jdwp) << "JDWP has attached";
562   gDebuggerConnected = true;
563   gDisposed = false;
564 }
565 
RequiresDeoptimization()566 bool Dbg::RequiresDeoptimization() {
567   // We don't need deoptimization if everything runs with interpreter after
568   // enabling -Xint mode.
569   return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
570 }
571 
572 // Used to patch boot image method entry point to interpreter bridge.
573 class UpdateEntryPointsClassVisitor : public ClassVisitor {
574  public:
UpdateEntryPointsClassVisitor(instrumentation::Instrumentation * instrumentation)575   explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
576       : instrumentation_(instrumentation) {}
577 
operator ()(mirror::Class * klass)578   bool operator()(mirror::Class* klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
579     auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
580     for (auto& m : klass->GetMethods(pointer_size)) {
581       const void* code = m.GetEntryPointFromQuickCompiledCode();
582       if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
583           !m.IsNative() &&
584           !m.IsProxyMethod()) {
585         instrumentation_->UpdateMethodsCodeFromDebugger(&m, GetQuickToInterpreterBridge());
586       }
587     }
588     return true;
589   }
590 
591  private:
592   instrumentation::Instrumentation* const instrumentation_;
593 };
594 
GoActive()595 void Dbg::GoActive() {
596   // Enable all debugging features, including scans for breakpoints.
597   // This is a no-op if we're already active.
598   // Only called from the JDWP handler thread.
599   if (IsDebuggerActive()) {
600     return;
601   }
602 
603   Thread* const self = Thread::Current();
604   {
605     // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
606     ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
607     CHECK_EQ(gBreakpoints.size(), 0U);
608   }
609 
610   {
611     MutexLock mu(self, *Locks::deoptimization_lock_);
612     CHECK_EQ(deoptimization_requests_.size(), 0U);
613     CHECK_EQ(full_deoptimization_event_count_, 0U);
614     CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
615     CHECK_EQ(method_enter_event_ref_count_, 0U);
616     CHECK_EQ(method_exit_event_ref_count_, 0U);
617     CHECK_EQ(field_read_event_ref_count_, 0U);
618     CHECK_EQ(field_write_event_ref_count_, 0U);
619     CHECK_EQ(exception_catch_event_ref_count_, 0U);
620   }
621 
622   Runtime* runtime = Runtime::Current();
623   // Since boot image code may be AOT compiled as not debuggable, we need to patch
624   // entry points of methods in boot image to interpreter bridge.
625   // However, the performance cost of this is non-negligible during native-debugging due to the
626   // forced JIT, so we keep the AOT code in that case in exchange for limited native debugging.
627   if (!runtime->GetInstrumentation()->IsForcedInterpretOnly() && !runtime->IsNativeDebuggable()) {
628     ScopedObjectAccess soa(self);
629     UpdateEntryPointsClassVisitor visitor(runtime->GetInstrumentation());
630     runtime->GetClassLinker()->VisitClasses(&visitor);
631   }
632 
633   ScopedSuspendAll ssa(__FUNCTION__);
634   if (RequiresDeoptimization()) {
635     runtime->GetInstrumentation()->EnableDeoptimization();
636   }
637   instrumentation_events_ = 0;
638   gDebuggerActive = true;
639   LOG(INFO) << "Debugger is active";
640 }
641 
Disconnected()642 void Dbg::Disconnected() {
643   CHECK(gDebuggerConnected);
644 
645   LOG(INFO) << "Debugger is no longer active";
646 
647   // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
648   // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
649   // and clear the object registry.
650   Runtime* runtime = Runtime::Current();
651   Thread* self = Thread::Current();
652   {
653     // Required for DisableDeoptimization.
654     gc::ScopedGCCriticalSection gcs(self,
655                                     gc::kGcCauseInstrumentation,
656                                     gc::kCollectorTypeInstrumentation);
657     ScopedSuspendAll ssa(__FUNCTION__);
658     ThreadState old_state = self->SetStateUnsafe(kRunnable);
659     // Debugger may not be active at this point.
660     if (IsDebuggerActive()) {
661       {
662         // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
663         // This prevents us from having any pending deoptimization request when the debugger attaches
664         // to us again while no event has been requested yet.
665         MutexLock mu(self, *Locks::deoptimization_lock_);
666         deoptimization_requests_.clear();
667         full_deoptimization_event_count_ = 0U;
668       }
669       if (instrumentation_events_ != 0) {
670         runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
671                                                       instrumentation_events_);
672         instrumentation_events_ = 0;
673       }
674       if (RequiresDeoptimization()) {
675         runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
676       }
677       gDebuggerActive = false;
678     }
679     CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
680   }
681 
682   {
683     ScopedObjectAccess soa(self);
684     gRegistry->Clear();
685   }
686 
687   gDebuggerConnected = false;
688 }
689 
ConfigureJdwp(const JDWP::JdwpOptions & jdwp_options)690 void Dbg::ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options) {
691   CHECK_NE(jdwp_options.transport, JDWP::kJdwpTransportUnknown);
692   gJdwpOptions = jdwp_options;
693   gJdwpConfigured = true;
694 }
695 
IsJdwpConfigured()696 bool Dbg::IsJdwpConfigured() {
697   return gJdwpConfigured;
698 }
699 
LastDebuggerActivity()700 int64_t Dbg::LastDebuggerActivity() {
701   return gJdwpState->LastDebuggerActivity();
702 }
703 
UndoDebuggerSuspensions()704 void Dbg::UndoDebuggerSuspensions() {
705   Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
706 }
707 
GetClassName(JDWP::RefTypeId class_id)708 std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
709   JDWP::JdwpError error;
710   mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error);
711   if (o == nullptr) {
712     if (error == JDWP::ERR_NONE) {
713       return "null";
714     } else {
715       return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
716     }
717   }
718   if (!o->IsClass()) {
719     return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
720   }
721   return GetClassName(o->AsClass());
722 }
723 
GetClassName(mirror::Class * klass)724 std::string Dbg::GetClassName(mirror::Class* klass) {
725   if (klass == nullptr) {
726     return "null";
727   }
728   std::string temp;
729   return DescriptorToName(klass->GetDescriptor(&temp));
730 }
731 
GetClassObject(JDWP::RefTypeId id,JDWP::ObjectId * class_object_id)732 JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) {
733   JDWP::JdwpError status;
734   mirror::Class* c = DecodeClass(id, &status);
735   if (c == nullptr) {
736     *class_object_id = 0;
737     return status;
738   }
739   *class_object_id = gRegistry->Add(c);
740   return JDWP::ERR_NONE;
741 }
742 
GetSuperclass(JDWP::RefTypeId id,JDWP::RefTypeId * superclass_id)743 JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) {
744   JDWP::JdwpError status;
745   mirror::Class* c = DecodeClass(id, &status);
746   if (c == nullptr) {
747     *superclass_id = 0;
748     return status;
749   }
750   if (c->IsInterface()) {
751     // http://code.google.com/p/android/issues/detail?id=20856
752     *superclass_id = 0;
753   } else {
754     *superclass_id = gRegistry->Add(c->GetSuperClass());
755   }
756   return JDWP::ERR_NONE;
757 }
758 
GetClassLoader(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)759 JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
760   JDWP::JdwpError error;
761   mirror::Class* c = DecodeClass(id, &error);
762   if (c == nullptr) {
763     return error;
764   }
765   expandBufAddObjectId(pReply, gRegistry->Add(c->GetClassLoader()));
766   return JDWP::ERR_NONE;
767 }
768 
GetModifiers(JDWP::RefTypeId id,JDWP::ExpandBuf * pReply)769 JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
770   JDWP::JdwpError error;
771   mirror::Class* c = DecodeClass(id, &error);
772   if (c == nullptr) {
773     return error;
774   }
775 
776   uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
777 
778   // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
779   // not interfaces.
780   // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
781   if ((access_flags & kAccInterface) == 0) {
782     access_flags |= kAccSuper;
783   }
784 
785   expandBufAdd4BE(pReply, access_flags);
786 
787   return JDWP::ERR_NONE;
788 }
789 
GetMonitorInfo(JDWP::ObjectId object_id,JDWP::ExpandBuf * reply)790 JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) {
791   JDWP::JdwpError error;
792   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
793   if (o == nullptr) {
794     return JDWP::ERR_INVALID_OBJECT;
795   }
796 
797   // Ensure all threads are suspended while we read objects' lock words.
798   Thread* self = Thread::Current();
799   CHECK_EQ(self->GetState(), kRunnable);
800 
801   MonitorInfo monitor_info;
802   {
803     ScopedThreadSuspension sts(self, kSuspended);
804     ScopedSuspendAll ssa(__FUNCTION__);
805     monitor_info = MonitorInfo(o);
806   }
807   if (monitor_info.owner_ != nullptr) {
808     expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
809   } else {
810     expandBufAddObjectId(reply, gRegistry->Add(nullptr));
811   }
812   expandBufAdd4BE(reply, monitor_info.entry_count_);
813   expandBufAdd4BE(reply, monitor_info.waiters_.size());
814   for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
815     expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
816   }
817   return JDWP::ERR_NONE;
818 }
819 
GetOwnedMonitors(JDWP::ObjectId thread_id,std::vector<JDWP::ObjectId> * monitors,std::vector<uint32_t> * stack_depths)820 JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
821                                       std::vector<JDWP::ObjectId>* monitors,
822                                       std::vector<uint32_t>* stack_depths) {
823   struct OwnedMonitorVisitor : public StackVisitor {
824     OwnedMonitorVisitor(Thread* thread, Context* context,
825                         std::vector<JDWP::ObjectId>* monitor_vector,
826                         std::vector<uint32_t>* stack_depth_vector)
827         SHARED_REQUIRES(Locks::mutator_lock_)
828       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
829         current_stack_depth(0),
830         monitors(monitor_vector),
831         stack_depths(stack_depth_vector) {}
832 
833     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
834     // annotalysis.
835     bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
836       if (!GetMethod()->IsRuntimeMethod()) {
837         Monitor::VisitLocks(this, AppendOwnedMonitors, this);
838         ++current_stack_depth;
839       }
840       return true;
841     }
842 
843     static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
844         SHARED_REQUIRES(Locks::mutator_lock_) {
845       OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
846       visitor->monitors->push_back(gRegistry->Add(owned_monitor));
847       visitor->stack_depths->push_back(visitor->current_stack_depth);
848     }
849 
850     size_t current_stack_depth;
851     std::vector<JDWP::ObjectId>* const monitors;
852     std::vector<uint32_t>* const stack_depths;
853   };
854 
855   ScopedObjectAccessUnchecked soa(Thread::Current());
856   JDWP::JdwpError error;
857   Thread* thread = DecodeThread(soa, thread_id, &error);
858   if (thread == nullptr) {
859     return error;
860   }
861   if (!IsSuspendedForDebugger(soa, thread)) {
862     return JDWP::ERR_THREAD_NOT_SUSPENDED;
863   }
864   std::unique_ptr<Context> context(Context::Create());
865   OwnedMonitorVisitor visitor(thread, context.get(), monitors, stack_depths);
866   visitor.WalkStack();
867   return JDWP::ERR_NONE;
868 }
869 
GetContendedMonitor(JDWP::ObjectId thread_id,JDWP::ObjectId * contended_monitor)870 JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
871                                          JDWP::ObjectId* contended_monitor) {
872   ScopedObjectAccessUnchecked soa(Thread::Current());
873   *contended_monitor = 0;
874   JDWP::JdwpError error;
875   Thread* thread = DecodeThread(soa, thread_id, &error);
876   if (thread == nullptr) {
877     return error;
878   }
879   if (!IsSuspendedForDebugger(soa, thread)) {
880     return JDWP::ERR_THREAD_NOT_SUSPENDED;
881   }
882   mirror::Object* contended_monitor_obj = Monitor::GetContendedMonitor(thread);
883   // Add() requires the thread_list_lock_ not held to avoid the lock
884   // level violation.
885   *contended_monitor = gRegistry->Add(contended_monitor_obj);
886   return JDWP::ERR_NONE;
887 }
888 
GetInstanceCounts(const std::vector<JDWP::RefTypeId> & class_ids,std::vector<uint64_t> * counts)889 JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
890                                        std::vector<uint64_t>* counts) {
891   gc::Heap* heap = Runtime::Current()->GetHeap();
892   heap->CollectGarbage(false);
893   std::vector<mirror::Class*> classes;
894   counts->clear();
895   for (size_t i = 0; i < class_ids.size(); ++i) {
896     JDWP::JdwpError error;
897     mirror::Class* c = DecodeClass(class_ids[i], &error);
898     if (c == nullptr) {
899       return error;
900     }
901     classes.push_back(c);
902     counts->push_back(0);
903   }
904   heap->CountInstances(classes, false, &(*counts)[0]);
905   return JDWP::ERR_NONE;
906 }
907 
GetInstances(JDWP::RefTypeId class_id,int32_t max_count,std::vector<JDWP::ObjectId> * instances)908 JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
909                                   std::vector<JDWP::ObjectId>* instances) {
910   gc::Heap* heap = Runtime::Current()->GetHeap();
911   // We only want reachable instances, so do a GC.
912   heap->CollectGarbage(false);
913   JDWP::JdwpError error;
914   mirror::Class* c = DecodeClass(class_id, &error);
915   if (c == nullptr) {
916     return error;
917   }
918   std::vector<mirror::Object*> raw_instances;
919   Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
920   for (size_t i = 0; i < raw_instances.size(); ++i) {
921     instances->push_back(gRegistry->Add(raw_instances[i]));
922   }
923   return JDWP::ERR_NONE;
924 }
925 
GetReferringObjects(JDWP::ObjectId object_id,int32_t max_count,std::vector<JDWP::ObjectId> * referring_objects)926 JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
927                                          std::vector<JDWP::ObjectId>* referring_objects) {
928   gc::Heap* heap = Runtime::Current()->GetHeap();
929   heap->CollectGarbage(false);
930   JDWP::JdwpError error;
931   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
932   if (o == nullptr) {
933     return JDWP::ERR_INVALID_OBJECT;
934   }
935   std::vector<mirror::Object*> raw_instances;
936   heap->GetReferringObjects(o, max_count, raw_instances);
937   for (size_t i = 0; i < raw_instances.size(); ++i) {
938     referring_objects->push_back(gRegistry->Add(raw_instances[i]));
939   }
940   return JDWP::ERR_NONE;
941 }
942 
DisableCollection(JDWP::ObjectId object_id)943 JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) {
944   JDWP::JdwpError error;
945   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
946   if (o == nullptr) {
947     return JDWP::ERR_INVALID_OBJECT;
948   }
949   gRegistry->DisableCollection(object_id);
950   return JDWP::ERR_NONE;
951 }
952 
EnableCollection(JDWP::ObjectId object_id)953 JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) {
954   JDWP::JdwpError error;
955   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
956   // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
957   // also ignores these cases and never return an error. However it's not obvious why this command
958   // should behave differently from DisableCollection and IsCollected commands. So let's be more
959   // strict and return an error if this happens.
960   if (o == nullptr) {
961     return JDWP::ERR_INVALID_OBJECT;
962   }
963   gRegistry->EnableCollection(object_id);
964   return JDWP::ERR_NONE;
965 }
966 
IsCollected(JDWP::ObjectId object_id,bool * is_collected)967 JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool* is_collected) {
968   *is_collected = true;
969   if (object_id == 0) {
970     // Null object id is invalid.
971     return JDWP::ERR_INVALID_OBJECT;
972   }
973   // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
974   // the RI seems to ignore this and assume object has been collected.
975   JDWP::JdwpError error;
976   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
977   if (o != nullptr) {
978     *is_collected = gRegistry->IsCollected(object_id);
979   }
980   return JDWP::ERR_NONE;
981 }
982 
DisposeObject(JDWP::ObjectId object_id,uint32_t reference_count)983 void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) {
984   gRegistry->DisposeObject(object_id, reference_count);
985 }
986 
GetTypeTag(mirror::Class * klass)987 JDWP::JdwpTypeTag Dbg::GetTypeTag(mirror::Class* klass) {
988   DCHECK(klass != nullptr);
989   if (klass->IsArrayClass()) {
990     return JDWP::TT_ARRAY;
991   } else if (klass->IsInterface()) {
992     return JDWP::TT_INTERFACE;
993   } else {
994     return JDWP::TT_CLASS;
995   }
996 }
997 
GetReflectedType(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)998 JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
999   JDWP::JdwpError error;
1000   mirror::Class* c = DecodeClass(class_id, &error);
1001   if (c == nullptr) {
1002     return error;
1003   }
1004 
1005   JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1006   expandBufAdd1(pReply, type_tag);
1007   expandBufAddRefTypeId(pReply, class_id);
1008   return JDWP::ERR_NONE;
1009 }
1010 
1011 // Get the complete list of reference classes (i.e. all classes except
1012 // the primitive types).
1013 // Returns a newly-allocated buffer full of RefTypeId values.
1014 class ClassListCreator : public ClassVisitor {
1015  public:
ClassListCreator(std::vector<JDWP::RefTypeId> * classes)1016   explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
1017 
operator ()(mirror::Class * c)1018   bool operator()(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
1019     if (!c->IsPrimitive()) {
1020       classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
1021     }
1022     return true;
1023   }
1024 
1025  private:
1026   std::vector<JDWP::RefTypeId>* const classes_;
1027 };
1028 
GetClassList(std::vector<JDWP::RefTypeId> * classes)1029 void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) {
1030   ClassListCreator clc(classes);
1031   Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&clc);
1032 }
1033 
GetClassInfo(JDWP::RefTypeId class_id,JDWP::JdwpTypeTag * pTypeTag,uint32_t * pStatus,std::string * pDescriptor)1034 JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1035                                   uint32_t* pStatus, std::string* pDescriptor) {
1036   JDWP::JdwpError error;
1037   mirror::Class* c = DecodeClass(class_id, &error);
1038   if (c == nullptr) {
1039     return error;
1040   }
1041 
1042   if (c->IsArrayClass()) {
1043     *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1044     *pTypeTag = JDWP::TT_ARRAY;
1045   } else {
1046     if (c->IsErroneous()) {
1047       *pStatus = JDWP::CS_ERROR;
1048     } else {
1049       *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1050     }
1051     *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1052   }
1053 
1054   if (pDescriptor != nullptr) {
1055     std::string temp;
1056     *pDescriptor = c->GetDescriptor(&temp);
1057   }
1058   return JDWP::ERR_NONE;
1059 }
1060 
FindLoadedClassBySignature(const char * descriptor,std::vector<JDWP::RefTypeId> * ids)1061 void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) {
1062   std::vector<mirror::Class*> classes;
1063   Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1064   ids->clear();
1065   for (size_t i = 0; i < classes.size(); ++i) {
1066     ids->push_back(gRegistry->Add(classes[i]));
1067   }
1068 }
1069 
GetReferenceType(JDWP::ObjectId object_id,JDWP::ExpandBuf * pReply)1070 JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) {
1071   JDWP::JdwpError error;
1072   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1073   if (o == nullptr) {
1074     return JDWP::ERR_INVALID_OBJECT;
1075   }
1076 
1077   JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1078   JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1079 
1080   expandBufAdd1(pReply, type_tag);
1081   expandBufAddRefTypeId(pReply, type_id);
1082 
1083   return JDWP::ERR_NONE;
1084 }
1085 
GetSignature(JDWP::RefTypeId class_id,std::string * signature)1086 JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1087   JDWP::JdwpError error;
1088   mirror::Class* c = DecodeClass(class_id, &error);
1089   if (c == nullptr) {
1090     return error;
1091   }
1092   std::string temp;
1093   *signature = c->GetDescriptor(&temp);
1094   return JDWP::ERR_NONE;
1095 }
1096 
GetSourceFile(JDWP::RefTypeId class_id,std::string * result)1097 JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string* result) {
1098   JDWP::JdwpError error;
1099   mirror::Class* c = DecodeClass(class_id, &error);
1100   if (c == nullptr) {
1101     return error;
1102   }
1103   const char* source_file = c->GetSourceFile();
1104   if (source_file == nullptr) {
1105     return JDWP::ERR_ABSENT_INFORMATION;
1106   }
1107   *result = source_file;
1108   return JDWP::ERR_NONE;
1109 }
1110 
GetObjectTag(JDWP::ObjectId object_id,uint8_t * tag)1111 JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) {
1112   ScopedObjectAccessUnchecked soa(Thread::Current());
1113   JDWP::JdwpError error;
1114   mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1115   if (error != JDWP::ERR_NONE) {
1116     *tag = JDWP::JT_VOID;
1117     return error;
1118   }
1119   *tag = TagFromObject(soa, o);
1120   return JDWP::ERR_NONE;
1121 }
1122 
GetTagWidth(JDWP::JdwpTag tag)1123 size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1124   switch (tag) {
1125   case JDWP::JT_VOID:
1126     return 0;
1127   case JDWP::JT_BYTE:
1128   case JDWP::JT_BOOLEAN:
1129     return 1;
1130   case JDWP::JT_CHAR:
1131   case JDWP::JT_SHORT:
1132     return 2;
1133   case JDWP::JT_FLOAT:
1134   case JDWP::JT_INT:
1135     return 4;
1136   case JDWP::JT_ARRAY:
1137   case JDWP::JT_OBJECT:
1138   case JDWP::JT_STRING:
1139   case JDWP::JT_THREAD:
1140   case JDWP::JT_THREAD_GROUP:
1141   case JDWP::JT_CLASS_LOADER:
1142   case JDWP::JT_CLASS_OBJECT:
1143     return sizeof(JDWP::ObjectId);
1144   case JDWP::JT_DOUBLE:
1145   case JDWP::JT_LONG:
1146     return 8;
1147   default:
1148     LOG(FATAL) << "Unknown tag " << tag;
1149     return -1;
1150   }
1151 }
1152 
GetArrayLength(JDWP::ObjectId array_id,int32_t * length)1153 JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int32_t* length) {
1154   JDWP::JdwpError error;
1155   mirror::Array* a = DecodeNonNullArray(array_id, &error);
1156   if (a == nullptr) {
1157     return error;
1158   }
1159   *length = a->GetLength();
1160   return JDWP::ERR_NONE;
1161 }
1162 
OutputArray(JDWP::ObjectId array_id,int offset,int count,JDWP::ExpandBuf * pReply)1163 JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1164   JDWP::JdwpError error;
1165   mirror::Array* a = DecodeNonNullArray(array_id, &error);
1166   if (a == nullptr) {
1167     return error;
1168   }
1169 
1170   if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1171     LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1172     return JDWP::ERR_INVALID_LENGTH;
1173   }
1174   JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1175   expandBufAdd1(pReply, element_tag);
1176   expandBufAdd4BE(pReply, count);
1177 
1178   if (IsPrimitiveTag(element_tag)) {
1179     size_t width = GetTagWidth(element_tag);
1180     uint8_t* dst = expandBufAddSpace(pReply, count * width);
1181     if (width == 8) {
1182       const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1183       for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1184     } else if (width == 4) {
1185       const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1186       for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1187     } else if (width == 2) {
1188       const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1189       for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1190     } else {
1191       const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1192       memcpy(dst, &src[offset * width], count * width);
1193     }
1194   } else {
1195     ScopedObjectAccessUnchecked soa(Thread::Current());
1196     mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1197     for (int i = 0; i < count; ++i) {
1198       mirror::Object* element = oa->Get(offset + i);
1199       JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1200                                                         : element_tag;
1201       expandBufAdd1(pReply, specific_tag);
1202       expandBufAddObjectId(pReply, gRegistry->Add(element));
1203     }
1204   }
1205 
1206   return JDWP::ERR_NONE;
1207 }
1208 
1209 template <typename T>
CopyArrayData(mirror::Array * a,JDWP::Request * src,int offset,int count)1210 static void CopyArrayData(mirror::Array* a, JDWP::Request* src, int offset, int count)
1211     NO_THREAD_SAFETY_ANALYSIS {
1212   // TODO: fix when annotalysis correctly handles non-member functions.
1213   DCHECK(a->GetClass()->IsPrimitiveArray());
1214 
1215   T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1216   for (int i = 0; i < count; ++i) {
1217     *dst++ = src->ReadValue(sizeof(T));
1218   }
1219 }
1220 
SetArrayElements(JDWP::ObjectId array_id,int offset,int count,JDWP::Request * request)1221 JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1222                                       JDWP::Request* request) {
1223   JDWP::JdwpError error;
1224   mirror::Array* dst = DecodeNonNullArray(array_id, &error);
1225   if (dst == nullptr) {
1226     return error;
1227   }
1228 
1229   if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1230     LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1231     return JDWP::ERR_INVALID_LENGTH;
1232   }
1233   JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1234 
1235   if (IsPrimitiveTag(element_tag)) {
1236     size_t width = GetTagWidth(element_tag);
1237     if (width == 8) {
1238       CopyArrayData<uint64_t>(dst, request, offset, count);
1239     } else if (width == 4) {
1240       CopyArrayData<uint32_t>(dst, request, offset, count);
1241     } else if (width == 2) {
1242       CopyArrayData<uint16_t>(dst, request, offset, count);
1243     } else {
1244       CopyArrayData<uint8_t>(dst, request, offset, count);
1245     }
1246   } else {
1247     mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1248     for (int i = 0; i < count; ++i) {
1249       JDWP::ObjectId id = request->ReadObjectId();
1250       mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
1251       if (error != JDWP::ERR_NONE) {
1252         return error;
1253       }
1254       // Check if the object's type is compatible with the array's type.
1255       if (o != nullptr && !o->InstanceOf(oa->GetClass()->GetComponentType())) {
1256         return JDWP::ERR_TYPE_MISMATCH;
1257       }
1258       oa->Set<false>(offset + i, o);
1259     }
1260   }
1261 
1262   return JDWP::ERR_NONE;
1263 }
1264 
CreateString(const std::string & str,JDWP::ObjectId * new_string_id)1265 JDWP::JdwpError Dbg::CreateString(const std::string& str, JDWP::ObjectId* new_string_id) {
1266   Thread* self = Thread::Current();
1267   mirror::String* new_string = mirror::String::AllocFromModifiedUtf8(self, str.c_str());
1268   if (new_string == nullptr) {
1269     DCHECK(self->IsExceptionPending());
1270     self->ClearException();
1271     LOG(ERROR) << "Could not allocate string";
1272     *new_string_id = 0;
1273     return JDWP::ERR_OUT_OF_MEMORY;
1274   }
1275   *new_string_id = gRegistry->Add(new_string);
1276   return JDWP::ERR_NONE;
1277 }
1278 
CreateObject(JDWP::RefTypeId class_id,JDWP::ObjectId * new_object_id)1279 JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id) {
1280   JDWP::JdwpError error;
1281   mirror::Class* c = DecodeClass(class_id, &error);
1282   if (c == nullptr) {
1283     *new_object_id = 0;
1284     return error;
1285   }
1286   Thread* self = Thread::Current();
1287   mirror::Object* new_object;
1288   if (c->IsStringClass()) {
1289     // Special case for java.lang.String.
1290     gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
1291     mirror::SetStringCountVisitor visitor(0);
1292     new_object = mirror::String::Alloc<true>(self, 0, allocator_type, visitor);
1293   } else {
1294     new_object = c->AllocObject(self);
1295   }
1296   if (new_object == nullptr) {
1297     DCHECK(self->IsExceptionPending());
1298     self->ClearException();
1299     LOG(ERROR) << "Could not allocate object of type " << PrettyDescriptor(c);
1300     *new_object_id = 0;
1301     return JDWP::ERR_OUT_OF_MEMORY;
1302   }
1303   *new_object_id = gRegistry->Add(new_object);
1304   return JDWP::ERR_NONE;
1305 }
1306 
1307 /*
1308  * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1309  */
CreateArrayObject(JDWP::RefTypeId array_class_id,uint32_t length,JDWP::ObjectId * new_array_id)1310 JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1311                                        JDWP::ObjectId* new_array_id) {
1312   JDWP::JdwpError error;
1313   mirror::Class* c = DecodeClass(array_class_id, &error);
1314   if (c == nullptr) {
1315     *new_array_id = 0;
1316     return error;
1317   }
1318   Thread* self = Thread::Current();
1319   gc::Heap* heap = Runtime::Current()->GetHeap();
1320   mirror::Array* new_array = mirror::Array::Alloc<true>(self, c, length,
1321                                                         c->GetComponentSizeShift(),
1322                                                         heap->GetCurrentAllocator());
1323   if (new_array == nullptr) {
1324     DCHECK(self->IsExceptionPending());
1325     self->ClearException();
1326     LOG(ERROR) << "Could not allocate array of type " << PrettyDescriptor(c);
1327     *new_array_id = 0;
1328     return JDWP::ERR_OUT_OF_MEMORY;
1329   }
1330   *new_array_id = gRegistry->Add(new_array);
1331   return JDWP::ERR_NONE;
1332 }
1333 
ToFieldId(const ArtField * f)1334 JDWP::FieldId Dbg::ToFieldId(const ArtField* f) {
1335   return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1336 }
1337 
ToMethodId(ArtMethod * m)1338 static JDWP::MethodId ToMethodId(ArtMethod* m)
1339     SHARED_REQUIRES(Locks::mutator_lock_) {
1340   return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(GetCanonicalMethod(m)));
1341 }
1342 
FromFieldId(JDWP::FieldId fid)1343 static ArtField* FromFieldId(JDWP::FieldId fid)
1344     SHARED_REQUIRES(Locks::mutator_lock_) {
1345   return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
1346 }
1347 
FromMethodId(JDWP::MethodId mid)1348 static ArtMethod* FromMethodId(JDWP::MethodId mid)
1349     SHARED_REQUIRES(Locks::mutator_lock_) {
1350   return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
1351 }
1352 
MatchThread(JDWP::ObjectId expected_thread_id,Thread * event_thread)1353 bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
1354   CHECK(event_thread != nullptr);
1355   JDWP::JdwpError error;
1356   mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(
1357       expected_thread_id, &error);
1358   return expected_thread_peer == event_thread->GetPeer();
1359 }
1360 
MatchLocation(const JDWP::JdwpLocation & expected_location,const JDWP::EventLocation & event_location)1361 bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
1362                         const JDWP::EventLocation& event_location) {
1363   if (expected_location.dex_pc != event_location.dex_pc) {
1364     return false;
1365   }
1366   ArtMethod* m = FromMethodId(expected_location.method_id);
1367   return m == event_location.method;
1368 }
1369 
MatchType(mirror::Class * event_class,JDWP::RefTypeId class_id)1370 bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) {
1371   if (event_class == nullptr) {
1372     return false;
1373   }
1374   JDWP::JdwpError error;
1375   mirror::Class* expected_class = DecodeClass(class_id, &error);
1376   CHECK(expected_class != nullptr);
1377   return expected_class->IsAssignableFrom(event_class);
1378 }
1379 
MatchField(JDWP::RefTypeId expected_type_id,JDWP::FieldId expected_field_id,ArtField * event_field)1380 bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
1381                      ArtField* event_field) {
1382   ArtField* expected_field = FromFieldId(expected_field_id);
1383   if (expected_field != event_field) {
1384     return false;
1385   }
1386   return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
1387 }
1388 
MatchInstance(JDWP::ObjectId expected_instance_id,mirror::Object * event_instance)1389 bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
1390   JDWP::JdwpError error;
1391   mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id, &error);
1392   return modifier_instance == event_instance;
1393 }
1394 
SetJdwpLocation(JDWP::JdwpLocation * location,ArtMethod * m,uint32_t dex_pc)1395 void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) {
1396   if (m == nullptr) {
1397     memset(location, 0, sizeof(*location));
1398   } else {
1399     mirror::Class* c = m->GetDeclaringClass();
1400     location->type_tag = GetTypeTag(c);
1401     location->class_id = gRegistry->AddRefType(c);
1402     location->method_id = ToMethodId(m);
1403     location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1404   }
1405 }
1406 
GetMethodName(JDWP::MethodId method_id)1407 std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
1408   ArtMethod* m = FromMethodId(method_id);
1409   if (m == nullptr) {
1410     return "null";
1411   }
1412   return m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
1413 }
1414 
GetFieldName(JDWP::FieldId field_id)1415 std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
1416   ArtField* f = FromFieldId(field_id);
1417   if (f == nullptr) {
1418     return "null";
1419   }
1420   return f->GetName();
1421 }
1422 
1423 /*
1424  * Augment the access flags for synthetic methods and fields by setting
1425  * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1426  * flags not specified by the Java programming language.
1427  */
MangleAccessFlags(uint32_t accessFlags)1428 static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1429   accessFlags &= kAccJavaFlagsMask;
1430   if ((accessFlags & kAccSynthetic) != 0) {
1431     accessFlags |= 0xf0000000;
1432   }
1433   return accessFlags;
1434 }
1435 
1436 /*
1437  * Circularly shifts registers so that arguments come first. Debuggers
1438  * expect slots to begin with arguments, but dex code places them at
1439  * the end.
1440  */
MangleSlot(uint16_t slot,ArtMethod * m)1441 static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
1442     SHARED_REQUIRES(Locks::mutator_lock_) {
1443   const DexFile::CodeItem* code_item = m->GetCodeItem();
1444   if (code_item == nullptr) {
1445     // We should not get here for a method without code (native, proxy or abstract). Log it and
1446     // return the slot as is since all registers are arguments.
1447     LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1448     return slot;
1449   }
1450   uint16_t ins_size = code_item->ins_size_;
1451   uint16_t locals_size = code_item->registers_size_ - ins_size;
1452   if (slot >= locals_size) {
1453     return slot - locals_size;
1454   } else {
1455     return slot + ins_size;
1456   }
1457 }
1458 
1459 /*
1460  * Circularly shifts registers so that arguments come last. Reverts
1461  * slots to dex style argument placement.
1462  */
DemangleSlot(uint16_t slot,ArtMethod * m,JDWP::JdwpError * error)1463 static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
1464     SHARED_REQUIRES(Locks::mutator_lock_) {
1465   const DexFile::CodeItem* code_item = m->GetCodeItem();
1466   if (code_item == nullptr) {
1467     // We should not get here for a method without code (native, proxy or abstract). Log it and
1468     // return the slot as is since all registers are arguments.
1469     LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1470     uint16_t vreg_count = ArtMethod::NumArgRegisters(m->GetShorty());
1471     if (slot < vreg_count) {
1472       *error = JDWP::ERR_NONE;
1473       return slot;
1474     }
1475   } else {
1476     if (slot < code_item->registers_size_) {
1477       uint16_t ins_size = code_item->ins_size_;
1478       uint16_t locals_size = code_item->registers_size_ - ins_size;
1479       *error = JDWP::ERR_NONE;
1480       return (slot < ins_size) ? slot + locals_size : slot - ins_size;
1481     }
1482   }
1483 
1484   // Slot is invalid in the method.
1485   LOG(ERROR) << "Invalid local slot " << slot << " for method " << PrettyMethod(m);
1486   *error = JDWP::ERR_INVALID_SLOT;
1487   return DexFile::kDexNoIndex16;
1488 }
1489 
OutputDeclaredFields(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1490 JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic,
1491                                           JDWP::ExpandBuf* pReply) {
1492   JDWP::JdwpError error;
1493   mirror::Class* c = DecodeClass(class_id, &error);
1494   if (c == nullptr) {
1495     return error;
1496   }
1497 
1498   size_t instance_field_count = c->NumInstanceFields();
1499   size_t static_field_count = c->NumStaticFields();
1500 
1501   expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1502 
1503   for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1504     ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) :
1505         c->GetStaticField(i - instance_field_count);
1506     expandBufAddFieldId(pReply, ToFieldId(f));
1507     expandBufAddUtf8String(pReply, f->GetName());
1508     expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1509     if (with_generic) {
1510       static const char genericSignature[1] = "";
1511       expandBufAddUtf8String(pReply, genericSignature);
1512     }
1513     expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1514   }
1515   return JDWP::ERR_NONE;
1516 }
1517 
OutputDeclaredMethods(JDWP::RefTypeId class_id,bool with_generic,JDWP::ExpandBuf * pReply)1518 JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1519                                            JDWP::ExpandBuf* pReply) {
1520   JDWP::JdwpError error;
1521   mirror::Class* c = DecodeClass(class_id, &error);
1522   if (c == nullptr) {
1523     return error;
1524   }
1525 
1526   expandBufAdd4BE(pReply, c->NumMethods());
1527 
1528   auto* cl = Runtime::Current()->GetClassLinker();
1529   auto ptr_size = cl->GetImagePointerSize();
1530   for (ArtMethod& m : c->GetMethods(ptr_size)) {
1531     expandBufAddMethodId(pReply, ToMethodId(&m));
1532     expandBufAddUtf8String(pReply, m.GetInterfaceMethodIfProxy(sizeof(void*))->GetName());
1533     expandBufAddUtf8String(pReply,
1534                            m.GetInterfaceMethodIfProxy(sizeof(void*))->GetSignature().ToString());
1535     if (with_generic) {
1536       const char* generic_signature = "";
1537       expandBufAddUtf8String(pReply, generic_signature);
1538     }
1539     expandBufAdd4BE(pReply, MangleAccessFlags(m.GetAccessFlags()));
1540   }
1541   return JDWP::ERR_NONE;
1542 }
1543 
OutputDeclaredInterfaces(JDWP::RefTypeId class_id,JDWP::ExpandBuf * pReply)1544 JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1545   JDWP::JdwpError error;
1546   Thread* self = Thread::Current();
1547   StackHandleScope<1> hs(self);
1548   Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, &error)));
1549   if (c.Get() == nullptr) {
1550     return error;
1551   }
1552   size_t interface_count = c->NumDirectInterfaces();
1553   expandBufAdd4BE(pReply, interface_count);
1554   for (size_t i = 0; i < interface_count; ++i) {
1555     expandBufAddRefTypeId(pReply,
1556                           gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1557   }
1558   return JDWP::ERR_NONE;
1559 }
1560 
OutputLineTable(JDWP::RefTypeId,JDWP::MethodId method_id,JDWP::ExpandBuf * pReply)1561 void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) {
1562   struct DebugCallbackContext {
1563     int numItems;
1564     JDWP::ExpandBuf* pReply;
1565 
1566     static bool Callback(void* context, const DexFile::PositionInfo& entry) {
1567       DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1568       expandBufAdd8BE(pContext->pReply, entry.address_);
1569       expandBufAdd4BE(pContext->pReply, entry.line_);
1570       pContext->numItems++;
1571       return false;
1572     }
1573   };
1574   ArtMethod* m = FromMethodId(method_id);
1575   const DexFile::CodeItem* code_item = m->GetCodeItem();
1576   uint64_t start, end;
1577   if (code_item == nullptr) {
1578     DCHECK(m->IsNative() || m->IsProxyMethod());
1579     start = -1;
1580     end = -1;
1581   } else {
1582     start = 0;
1583     // Return the index of the last instruction
1584     end = code_item->insns_size_in_code_units_ - 1;
1585   }
1586 
1587   expandBufAdd8BE(pReply, start);
1588   expandBufAdd8BE(pReply, end);
1589 
1590   // Add numLines later
1591   size_t numLinesOffset = expandBufGetLength(pReply);
1592   expandBufAdd4BE(pReply, 0);
1593 
1594   DebugCallbackContext context;
1595   context.numItems = 0;
1596   context.pReply = pReply;
1597 
1598   if (code_item != nullptr) {
1599     m->GetDexFile()->DecodeDebugPositionInfo(code_item, DebugCallbackContext::Callback, &context);
1600   }
1601 
1602   JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1603 }
1604 
OutputVariableTable(JDWP::RefTypeId,JDWP::MethodId method_id,bool with_generic,JDWP::ExpandBuf * pReply)1605 void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1606                               JDWP::ExpandBuf* pReply) {
1607   struct DebugCallbackContext {
1608     ArtMethod* method;
1609     JDWP::ExpandBuf* pReply;
1610     size_t variable_count;
1611     bool with_generic;
1612 
1613     static void Callback(void* context, const DexFile::LocalInfo& entry)
1614         SHARED_REQUIRES(Locks::mutator_lock_) {
1615       DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1616 
1617       uint16_t slot = entry.reg_;
1618       VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1619                                  pContext->variable_count, entry.start_address_,
1620                                  entry.end_address_ - entry.start_address_,
1621                                  entry.name_, entry.descriptor_, entry.signature_, slot,
1622                                  MangleSlot(slot, pContext->method));
1623 
1624       slot = MangleSlot(slot, pContext->method);
1625 
1626       expandBufAdd8BE(pContext->pReply, entry.start_address_);
1627       expandBufAddUtf8String(pContext->pReply, entry.name_);
1628       expandBufAddUtf8String(pContext->pReply, entry.descriptor_);
1629       if (pContext->with_generic) {
1630         expandBufAddUtf8String(pContext->pReply, entry.signature_);
1631       }
1632       expandBufAdd4BE(pContext->pReply, entry.end_address_- entry.start_address_);
1633       expandBufAdd4BE(pContext->pReply, slot);
1634 
1635       ++pContext->variable_count;
1636     }
1637   };
1638   ArtMethod* m = FromMethodId(method_id);
1639 
1640   // arg_count considers doubles and longs to take 2 units.
1641   // variable_count considers everything to take 1 unit.
1642   std::string shorty(m->GetShorty());
1643   expandBufAdd4BE(pReply, ArtMethod::NumArgRegisters(shorty));
1644 
1645   // We don't know the total number of variables yet, so leave a blank and update it later.
1646   size_t variable_count_offset = expandBufGetLength(pReply);
1647   expandBufAdd4BE(pReply, 0);
1648 
1649   DebugCallbackContext context;
1650   context.method = m;
1651   context.pReply = pReply;
1652   context.variable_count = 0;
1653   context.with_generic = with_generic;
1654 
1655   const DexFile::CodeItem* code_item = m->GetCodeItem();
1656   if (code_item != nullptr) {
1657     m->GetDexFile()->DecodeDebugLocalInfo(
1658         code_item, m->IsStatic(), m->GetDexMethodIndex(), DebugCallbackContext::Callback,
1659         &context);
1660   }
1661 
1662   JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1663 }
1664 
OutputMethodReturnValue(JDWP::MethodId method_id,const JValue * return_value,JDWP::ExpandBuf * pReply)1665 void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1666                                   JDWP::ExpandBuf* pReply) {
1667   ArtMethod* m = FromMethodId(method_id);
1668   JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1669   OutputJValue(tag, return_value, pReply);
1670 }
1671 
OutputFieldValue(JDWP::FieldId field_id,const JValue * field_value,JDWP::ExpandBuf * pReply)1672 void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1673                            JDWP::ExpandBuf* pReply) {
1674   ArtField* f = FromFieldId(field_id);
1675   JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1676   OutputJValue(tag, field_value, pReply);
1677 }
1678 
GetBytecodes(JDWP::RefTypeId,JDWP::MethodId method_id,std::vector<uint8_t> * bytecodes)1679 JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1680                                   std::vector<uint8_t>* bytecodes) {
1681   ArtMethod* m = FromMethodId(method_id);
1682   if (m == nullptr) {
1683     return JDWP::ERR_INVALID_METHODID;
1684   }
1685   const DexFile::CodeItem* code_item = m->GetCodeItem();
1686   size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1687   const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1688   const uint8_t* end = begin + byte_count;
1689   for (const uint8_t* p = begin; p != end; ++p) {
1690     bytecodes->push_back(*p);
1691   }
1692   return JDWP::ERR_NONE;
1693 }
1694 
GetFieldBasicTag(JDWP::FieldId field_id)1695 JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1696   return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1697 }
1698 
GetStaticFieldBasicTag(JDWP::FieldId field_id)1699 JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1700   return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1701 }
1702 
GetArtFieldValue(ArtField * f,mirror::Object * o)1703 static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
1704     SHARED_REQUIRES(Locks::mutator_lock_) {
1705   Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
1706   JValue field_value;
1707   switch (fieldType) {
1708     case Primitive::kPrimBoolean:
1709       field_value.SetZ(f->GetBoolean(o));
1710       return field_value;
1711 
1712     case Primitive::kPrimByte:
1713       field_value.SetB(f->GetByte(o));
1714       return field_value;
1715 
1716     case Primitive::kPrimChar:
1717       field_value.SetC(f->GetChar(o));
1718       return field_value;
1719 
1720     case Primitive::kPrimShort:
1721       field_value.SetS(f->GetShort(o));
1722       return field_value;
1723 
1724     case Primitive::kPrimInt:
1725     case Primitive::kPrimFloat:
1726       // Int and Float must be treated as 32-bit values in JDWP.
1727       field_value.SetI(f->GetInt(o));
1728       return field_value;
1729 
1730     case Primitive::kPrimLong:
1731     case Primitive::kPrimDouble:
1732       // Long and Double must be treated as 64-bit values in JDWP.
1733       field_value.SetJ(f->GetLong(o));
1734       return field_value;
1735 
1736     case Primitive::kPrimNot:
1737       field_value.SetL(f->GetObject(o));
1738       return field_value;
1739 
1740     case Primitive::kPrimVoid:
1741       LOG(FATAL) << "Attempt to read from field of type 'void'";
1742       UNREACHABLE();
1743   }
1744   LOG(FATAL) << "Attempt to read from field of unknown type";
1745   UNREACHABLE();
1746 }
1747 
GetFieldValueImpl(JDWP::RefTypeId ref_type_id,JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply,bool is_static)1748 static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1749                                          JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1750                                          bool is_static)
1751     SHARED_REQUIRES(Locks::mutator_lock_) {
1752   JDWP::JdwpError error;
1753   mirror::Class* c = DecodeClass(ref_type_id, &error);
1754   if (ref_type_id != 0 && c == nullptr) {
1755     return error;
1756   }
1757 
1758   Thread* self = Thread::Current();
1759   StackHandleScope<2> hs(self);
1760   MutableHandle<mirror::Object>
1761       o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error)));
1762   if ((!is_static && o.Get() == nullptr) || error != JDWP::ERR_NONE) {
1763     return JDWP::ERR_INVALID_OBJECT;
1764   }
1765   ArtField* f = FromFieldId(field_id);
1766 
1767   mirror::Class* receiver_class = c;
1768   if (receiver_class == nullptr && o.Get() != nullptr) {
1769     receiver_class = o->GetClass();
1770   }
1771 
1772   // TODO: should we give up now if receiver_class is null?
1773   if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1774     LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1775     return JDWP::ERR_INVALID_FIELDID;
1776   }
1777 
1778   // Ensure the field's class is initialized.
1779   Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass()));
1780   if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) {
1781     LOG(WARNING) << "Not able to initialize class for SetValues: " << PrettyClass(klass.Get());
1782   }
1783 
1784   // The RI only enforces the static/non-static mismatch in one direction.
1785   // TODO: should we change the tests and check both?
1786   if (is_static) {
1787     if (!f->IsStatic()) {
1788       return JDWP::ERR_INVALID_FIELDID;
1789     }
1790   } else {
1791     if (f->IsStatic()) {
1792       LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.GetValues"
1793                    << " on static field " << PrettyField(f);
1794     }
1795   }
1796   if (f->IsStatic()) {
1797     o.Assign(f->GetDeclaringClass());
1798   }
1799 
1800   JValue field_value(GetArtFieldValue(f, o.Get()));
1801   JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1802   Dbg::OutputJValue(tag, &field_value, pReply);
1803   return JDWP::ERR_NONE;
1804 }
1805 
GetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1806 JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1807                                    JDWP::ExpandBuf* pReply) {
1808   return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1809 }
1810 
GetStaticFieldValue(JDWP::RefTypeId ref_type_id,JDWP::FieldId field_id,JDWP::ExpandBuf * pReply)1811 JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
1812                                          JDWP::ExpandBuf* pReply) {
1813   return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1814 }
1815 
SetArtFieldValue(ArtField * f,mirror::Object * o,uint64_t value,int width)1816 static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
1817     SHARED_REQUIRES(Locks::mutator_lock_) {
1818   Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
1819   // Debugging only happens at runtime so we know we are not running in a transaction.
1820   static constexpr bool kNoTransactionMode = false;
1821   switch (fieldType) {
1822     case Primitive::kPrimBoolean:
1823       CHECK_EQ(width, 1);
1824       f->SetBoolean<kNoTransactionMode>(o, static_cast<uint8_t>(value));
1825       return JDWP::ERR_NONE;
1826 
1827     case Primitive::kPrimByte:
1828       CHECK_EQ(width, 1);
1829       f->SetByte<kNoTransactionMode>(o, static_cast<uint8_t>(value));
1830       return JDWP::ERR_NONE;
1831 
1832     case Primitive::kPrimChar:
1833       CHECK_EQ(width, 2);
1834       f->SetChar<kNoTransactionMode>(o, static_cast<uint16_t>(value));
1835       return JDWP::ERR_NONE;
1836 
1837     case Primitive::kPrimShort:
1838       CHECK_EQ(width, 2);
1839       f->SetShort<kNoTransactionMode>(o, static_cast<int16_t>(value));
1840       return JDWP::ERR_NONE;
1841 
1842     case Primitive::kPrimInt:
1843     case Primitive::kPrimFloat:
1844       CHECK_EQ(width, 4);
1845       // Int and Float must be treated as 32-bit values in JDWP.
1846       f->SetInt<kNoTransactionMode>(o, static_cast<int32_t>(value));
1847       return JDWP::ERR_NONE;
1848 
1849     case Primitive::kPrimLong:
1850     case Primitive::kPrimDouble:
1851       CHECK_EQ(width, 8);
1852       // Long and Double must be treated as 64-bit values in JDWP.
1853       f->SetLong<kNoTransactionMode>(o, value);
1854       return JDWP::ERR_NONE;
1855 
1856     case Primitive::kPrimNot: {
1857       JDWP::JdwpError error;
1858       mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
1859       if (error != JDWP::ERR_NONE) {
1860         return JDWP::ERR_INVALID_OBJECT;
1861       }
1862       if (v != nullptr) {
1863         mirror::Class* field_type;
1864         {
1865           StackHandleScope<2> hs(Thread::Current());
1866           HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1867           HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1868           field_type = f->GetType<true>();
1869         }
1870         if (!field_type->IsAssignableFrom(v->GetClass())) {
1871           return JDWP::ERR_INVALID_OBJECT;
1872         }
1873       }
1874       f->SetObject<kNoTransactionMode>(o, v);
1875       return JDWP::ERR_NONE;
1876     }
1877 
1878     case Primitive::kPrimVoid:
1879       LOG(FATAL) << "Attempt to write to field of type 'void'";
1880       UNREACHABLE();
1881   }
1882   LOG(FATAL) << "Attempt to write to field of unknown type";
1883   UNREACHABLE();
1884 }
1885 
SetFieldValueImpl(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width,bool is_static)1886 static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1887                                          uint64_t value, int width, bool is_static)
1888     SHARED_REQUIRES(Locks::mutator_lock_) {
1889   JDWP::JdwpError error;
1890   Thread* self = Thread::Current();
1891   StackHandleScope<2> hs(self);
1892   MutableHandle<mirror::Object>
1893       o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error)));
1894   if ((!is_static && o.Get() == nullptr) || error != JDWP::ERR_NONE) {
1895     return JDWP::ERR_INVALID_OBJECT;
1896   }
1897   ArtField* f = FromFieldId(field_id);
1898 
1899   // Ensure the field's class is initialized.
1900   Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass()));
1901   if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) {
1902     LOG(WARNING) << "Not able to initialize class for SetValues: " << PrettyClass(klass.Get());
1903   }
1904 
1905   // The RI only enforces the static/non-static mismatch in one direction.
1906   // TODO: should we change the tests and check both?
1907   if (is_static) {
1908     if (!f->IsStatic()) {
1909       return JDWP::ERR_INVALID_FIELDID;
1910     }
1911   } else {
1912     if (f->IsStatic()) {
1913       LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues"
1914                    << " on static field " << PrettyField(f);
1915     }
1916   }
1917   if (f->IsStatic()) {
1918     o.Assign(f->GetDeclaringClass());
1919   }
1920   return SetArtFieldValue(f, o.Get(), value, width);
1921 }
1922 
SetFieldValue(JDWP::ObjectId object_id,JDWP::FieldId field_id,uint64_t value,int width)1923 JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1924                                    int width) {
1925   return SetFieldValueImpl(object_id, field_id, value, width, false);
1926 }
1927 
SetStaticFieldValue(JDWP::FieldId field_id,uint64_t value,int width)1928 JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1929   return SetFieldValueImpl(0, field_id, value, width, true);
1930 }
1931 
StringToUtf8(JDWP::ObjectId string_id,std::string * str)1932 JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
1933   JDWP::JdwpError error;
1934   mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id, &error);
1935   if (error != JDWP::ERR_NONE) {
1936     return error;
1937   }
1938   if (obj == nullptr) {
1939     return JDWP::ERR_INVALID_OBJECT;
1940   }
1941   {
1942     ScopedObjectAccessUnchecked soa(Thread::Current());
1943     mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
1944     if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
1945       // This isn't a string.
1946       return JDWP::ERR_INVALID_STRING;
1947     }
1948   }
1949   *str = obj->AsString()->ToModifiedUtf8();
1950   return JDWP::ERR_NONE;
1951 }
1952 
OutputJValue(JDWP::JdwpTag tag,const JValue * return_value,JDWP::ExpandBuf * pReply)1953 void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1954   if (IsPrimitiveTag(tag)) {
1955     expandBufAdd1(pReply, tag);
1956     if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1957       expandBufAdd1(pReply, return_value->GetI());
1958     } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1959       expandBufAdd2BE(pReply, return_value->GetI());
1960     } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1961       expandBufAdd4BE(pReply, return_value->GetI());
1962     } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1963       expandBufAdd8BE(pReply, return_value->GetJ());
1964     } else {
1965       CHECK_EQ(tag, JDWP::JT_VOID);
1966     }
1967   } else {
1968     ScopedObjectAccessUnchecked soa(Thread::Current());
1969     mirror::Object* value = return_value->GetL();
1970     expandBufAdd1(pReply, TagFromObject(soa, value));
1971     expandBufAddObjectId(pReply, gRegistry->Add(value));
1972   }
1973 }
1974 
GetThreadName(JDWP::ObjectId thread_id,std::string * name)1975 JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
1976   ScopedObjectAccessUnchecked soa(Thread::Current());
1977   JDWP::JdwpError error;
1978   DecodeThread(soa, thread_id, &error);
1979   if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1980     return error;
1981   }
1982 
1983   // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1984   mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
1985   CHECK(thread_object != nullptr) << error;
1986   ArtField* java_lang_Thread_name_field =
1987       soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1988   mirror::String* s =
1989       reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1990   if (s != nullptr) {
1991     *name = s->ToModifiedUtf8();
1992   }
1993   return JDWP::ERR_NONE;
1994 }
1995 
GetThreadGroup(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)1996 JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1997   ScopedObjectAccessUnchecked soa(Thread::Current());
1998   JDWP::JdwpError error;
1999   mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
2000   if (error != JDWP::ERR_NONE) {
2001     return JDWP::ERR_INVALID_OBJECT;
2002   }
2003   ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup");
2004   // Okay, so it's an object, but is it actually a thread?
2005   DecodeThread(soa, thread_id, &error);
2006   if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2007     // Zombie threads are in the null group.
2008     expandBufAddObjectId(pReply, JDWP::ObjectId(0));
2009     error = JDWP::ERR_NONE;
2010   } else if (error == JDWP::ERR_NONE) {
2011     mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
2012     CHECK(c != nullptr);
2013     ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2014     CHECK(f != nullptr);
2015     mirror::Object* group = f->GetObject(thread_object);
2016     CHECK(group != nullptr);
2017     JDWP::ObjectId thread_group_id = gRegistry->Add(group);
2018     expandBufAddObjectId(pReply, thread_group_id);
2019   }
2020   return error;
2021 }
2022 
DecodeThreadGroup(ScopedObjectAccessUnchecked & soa,JDWP::ObjectId thread_group_id,JDWP::JdwpError * error)2023 static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
2024                                          JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
2025     SHARED_REQUIRES(Locks::mutator_lock_) {
2026   mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
2027                                                                                 error);
2028   if (*error != JDWP::ERR_NONE) {
2029     return nullptr;
2030   }
2031   if (thread_group == nullptr) {
2032     *error = JDWP::ERR_INVALID_OBJECT;
2033     return nullptr;
2034   }
2035   mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2036   CHECK(c != nullptr);
2037   if (!c->IsAssignableFrom(thread_group->GetClass())) {
2038     // This is not a java.lang.ThreadGroup.
2039     *error = JDWP::ERR_INVALID_THREAD_GROUP;
2040     return nullptr;
2041   }
2042   *error = JDWP::ERR_NONE;
2043   return thread_group;
2044 }
2045 
GetThreadGroupName(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2046 JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2047   ScopedObjectAccessUnchecked soa(Thread::Current());
2048   JDWP::JdwpError error;
2049   mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2050   if (error != JDWP::ERR_NONE) {
2051     return error;
2052   }
2053   ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupName");
2054   ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
2055   CHECK(f != nullptr);
2056   mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
2057 
2058   std::string thread_group_name(s->ToModifiedUtf8());
2059   expandBufAddUtf8String(pReply, thread_group_name);
2060   return JDWP::ERR_NONE;
2061 }
2062 
GetThreadGroupParent(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2063 JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2064   ScopedObjectAccessUnchecked soa(Thread::Current());
2065   JDWP::JdwpError error;
2066   mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2067   if (error != JDWP::ERR_NONE) {
2068     return error;
2069   }
2070   mirror::Object* parent;
2071   {
2072     ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupParent");
2073     ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
2074     CHECK(f != nullptr);
2075     parent = f->GetObject(thread_group);
2076   }
2077   JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
2078   expandBufAddObjectId(pReply, parent_group_id);
2079   return JDWP::ERR_NONE;
2080 }
2081 
GetChildThreadGroups(ScopedObjectAccessUnchecked & soa,mirror::Object * thread_group,std::vector<JDWP::ObjectId> * child_thread_group_ids)2082 static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
2083                                  std::vector<JDWP::ObjectId>* child_thread_group_ids)
2084     SHARED_REQUIRES(Locks::mutator_lock_) {
2085   CHECK(thread_group != nullptr);
2086 
2087   // Get the int "ngroups" count of this thread group...
2088   ArtField* ngroups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_ngroups);
2089   CHECK(ngroups_field != nullptr);
2090   const int32_t size = ngroups_field->GetInt(thread_group);
2091   if (size == 0) {
2092     return;
2093   }
2094 
2095   // Get the ThreadGroup[] "groups" out of this thread group...
2096   ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
2097   mirror::Object* groups_array = groups_field->GetObject(thread_group);
2098 
2099   CHECK(groups_array != nullptr);
2100   CHECK(groups_array->IsObjectArray());
2101 
2102   mirror::ObjectArray<mirror::Object>* groups_array_as_array =
2103       groups_array->AsObjectArray<mirror::Object>();
2104 
2105   // Copy the first 'size' elements out of the array into the result.
2106   ObjectRegistry* registry = Dbg::GetObjectRegistry();
2107   for (int32_t i = 0; i < size; ++i) {
2108     child_thread_group_ids->push_back(registry->Add(groups_array_as_array->Get(i)));
2109   }
2110 }
2111 
GetThreadGroupChildren(JDWP::ObjectId thread_group_id,JDWP::ExpandBuf * pReply)2112 JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
2113                                             JDWP::ExpandBuf* pReply) {
2114   ScopedObjectAccessUnchecked soa(Thread::Current());
2115   JDWP::JdwpError error;
2116   mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2117   if (error != JDWP::ERR_NONE) {
2118     return error;
2119   }
2120 
2121   // Add child threads.
2122   {
2123     std::vector<JDWP::ObjectId> child_thread_ids;
2124     GetThreads(thread_group, &child_thread_ids);
2125     expandBufAdd4BE(pReply, child_thread_ids.size());
2126     for (JDWP::ObjectId child_thread_id : child_thread_ids) {
2127       expandBufAddObjectId(pReply, child_thread_id);
2128     }
2129   }
2130 
2131   // Add child thread groups.
2132   {
2133     std::vector<JDWP::ObjectId> child_thread_groups_ids;
2134     GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
2135     expandBufAdd4BE(pReply, child_thread_groups_ids.size());
2136     for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
2137       expandBufAddObjectId(pReply, child_thread_group_id);
2138     }
2139   }
2140 
2141   return JDWP::ERR_NONE;
2142 }
2143 
GetSystemThreadGroupId()2144 JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
2145   ScopedObjectAccessUnchecked soa(Thread::Current());
2146   ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
2147   mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2148   return gRegistry->Add(group);
2149 }
2150 
ToJdwpThreadStatus(ThreadState state)2151 JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2152   switch (state) {
2153     case kBlocked:
2154       return JDWP::TS_MONITOR;
2155     case kNative:
2156     case kRunnable:
2157     case kSuspended:
2158       return JDWP::TS_RUNNING;
2159     case kSleeping:
2160       return JDWP::TS_SLEEPING;
2161     case kStarting:
2162     case kTerminated:
2163       return JDWP::TS_ZOMBIE;
2164     case kTimedWaiting:
2165     case kWaitingForCheckPointsToRun:
2166     case kWaitingForDebuggerSend:
2167     case kWaitingForDebuggerSuspension:
2168     case kWaitingForDebuggerToAttach:
2169     case kWaitingForDeoptimization:
2170     case kWaitingForGcToComplete:
2171     case kWaitingForGetObjectsAllocated:
2172     case kWaitingForJniOnLoad:
2173     case kWaitingForMethodTracingStart:
2174     case kWaitingForSignalCatcherOutput:
2175     case kWaitingForVisitObjects:
2176     case kWaitingInMainDebuggerLoop:
2177     case kWaitingInMainSignalCatcherLoop:
2178     case kWaitingPerformingGc:
2179     case kWaitingWeakGcRootRead:
2180     case kWaitingForGcThreadFlip:
2181     case kWaiting:
2182       return JDWP::TS_WAIT;
2183       // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2184   }
2185   LOG(FATAL) << "Unknown thread state: " << state;
2186   return JDWP::TS_ZOMBIE;
2187 }
2188 
GetThreadStatus(JDWP::ObjectId thread_id,JDWP::JdwpThreadStatus * pThreadStatus,JDWP::JdwpSuspendStatus * pSuspendStatus)2189 JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2190                                      JDWP::JdwpSuspendStatus* pSuspendStatus) {
2191   ScopedObjectAccess soa(Thread::Current());
2192 
2193   *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2194 
2195   JDWP::JdwpError error;
2196   Thread* thread = DecodeThread(soa, thread_id, &error);
2197   if (error != JDWP::ERR_NONE) {
2198     if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2199       *pThreadStatus = JDWP::TS_ZOMBIE;
2200       return JDWP::ERR_NONE;
2201     }
2202     return error;
2203   }
2204 
2205   if (IsSuspendedForDebugger(soa, thread)) {
2206     *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2207   }
2208 
2209   *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2210   return JDWP::ERR_NONE;
2211 }
2212 
GetThreadDebugSuspendCount(JDWP::ObjectId thread_id,JDWP::ExpandBuf * pReply)2213 JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2214   ScopedObjectAccess soa(Thread::Current());
2215   JDWP::JdwpError error;
2216   Thread* thread = DecodeThread(soa, thread_id, &error);
2217   if (error != JDWP::ERR_NONE) {
2218     return error;
2219   }
2220   MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2221   expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2222   return JDWP::ERR_NONE;
2223 }
2224 
Interrupt(JDWP::ObjectId thread_id)2225 JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2226   ScopedObjectAccess soa(Thread::Current());
2227   JDWP::JdwpError error;
2228   Thread* thread = DecodeThread(soa, thread_id, &error);
2229   if (error != JDWP::ERR_NONE) {
2230     return error;
2231   }
2232   thread->Interrupt(soa.Self());
2233   return JDWP::ERR_NONE;
2234 }
2235 
IsInDesiredThreadGroup(ScopedObjectAccessUnchecked & soa,mirror::Object * desired_thread_group,mirror::Object * peer)2236 static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2237                                    mirror::Object* desired_thread_group, mirror::Object* peer)
2238     SHARED_REQUIRES(Locks::mutator_lock_) {
2239   // Do we want threads from all thread groups?
2240   if (desired_thread_group == nullptr) {
2241     return true;
2242   }
2243   ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2244   DCHECK(thread_group_field != nullptr);
2245   mirror::Object* group = thread_group_field->GetObject(peer);
2246   return (group == desired_thread_group);
2247 }
2248 
GetThreads(mirror::Object * thread_group,std::vector<JDWP::ObjectId> * thread_ids)2249 void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
2250   ScopedObjectAccessUnchecked soa(Thread::Current());
2251   std::list<Thread*> all_threads_list;
2252   {
2253     MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2254     all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2255   }
2256   for (Thread* t : all_threads_list) {
2257     if (t == Dbg::GetDebugThread()) {
2258       // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2259       // query all threads, so it's easier if we just don't tell them about this thread.
2260       continue;
2261     }
2262     if (t->IsStillStarting()) {
2263       // This thread is being started (and has been registered in the thread list). However, it is
2264       // not completely started yet so we must ignore it.
2265       continue;
2266     }
2267     mirror::Object* peer = t->GetPeer();
2268     if (peer == nullptr) {
2269       // peer might be null if the thread is still starting up. We can't tell the debugger about
2270       // this thread yet.
2271       // TODO: if we identified threads to the debugger by their Thread*
2272       // rather than their peer's mirror::Object*, we could fix this.
2273       // Doing so might help us report ZOMBIE threads too.
2274       continue;
2275     }
2276     if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2277       thread_ids->push_back(gRegistry->Add(peer));
2278     }
2279   }
2280 }
2281 
GetStackDepth(Thread * thread)2282 static int GetStackDepth(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) {
2283   struct CountStackDepthVisitor : public StackVisitor {
2284     explicit CountStackDepthVisitor(Thread* thread_in)
2285         : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2286           depth(0) {}
2287 
2288     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2289     // annotalysis.
2290     bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2291       if (!GetMethod()->IsRuntimeMethod()) {
2292         ++depth;
2293       }
2294       return true;
2295     }
2296     size_t depth;
2297   };
2298 
2299   CountStackDepthVisitor visitor(thread);
2300   visitor.WalkStack();
2301   return visitor.depth;
2302 }
2303 
GetThreadFrameCount(JDWP::ObjectId thread_id,size_t * result)2304 JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
2305   ScopedObjectAccess soa(Thread::Current());
2306   JDWP::JdwpError error;
2307   *result = 0;
2308   Thread* thread = DecodeThread(soa, thread_id, &error);
2309   if (error != JDWP::ERR_NONE) {
2310     return error;
2311   }
2312   if (!IsSuspendedForDebugger(soa, thread)) {
2313     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2314   }
2315   *result = GetStackDepth(thread);
2316   return JDWP::ERR_NONE;
2317 }
2318 
GetThreadFrames(JDWP::ObjectId thread_id,size_t start_frame,size_t frame_count,JDWP::ExpandBuf * buf)2319 JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2320                                      size_t frame_count, JDWP::ExpandBuf* buf) {
2321   class GetFrameVisitor : public StackVisitor {
2322    public:
2323     GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
2324                     JDWP::ExpandBuf* buf_in)
2325         SHARED_REQUIRES(Locks::mutator_lock_)
2326         : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2327           depth_(0),
2328           start_frame_(start_frame_in),
2329           frame_count_(frame_count_in),
2330           buf_(buf_in) {
2331       expandBufAdd4BE(buf_, frame_count_);
2332     }
2333 
2334     bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2335       if (GetMethod()->IsRuntimeMethod()) {
2336         return true;  // The debugger can't do anything useful with a frame that has no Method*.
2337       }
2338       if (depth_ >= start_frame_ + frame_count_) {
2339         return false;
2340       }
2341       if (depth_ >= start_frame_) {
2342         JDWP::FrameId frame_id(GetFrameId());
2343         JDWP::JdwpLocation location;
2344         SetJdwpLocation(&location, GetMethod(), GetDexPc());
2345         VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2346         expandBufAdd8BE(buf_, frame_id);
2347         expandBufAddLocation(buf_, location);
2348       }
2349       ++depth_;
2350       return true;
2351     }
2352 
2353    private:
2354     size_t depth_;
2355     const size_t start_frame_;
2356     const size_t frame_count_;
2357     JDWP::ExpandBuf* buf_;
2358   };
2359 
2360   ScopedObjectAccessUnchecked soa(Thread::Current());
2361   JDWP::JdwpError error;
2362   Thread* thread = DecodeThread(soa, thread_id, &error);
2363   if (error != JDWP::ERR_NONE) {
2364     return error;
2365   }
2366   if (!IsSuspendedForDebugger(soa, thread)) {
2367     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2368   }
2369   GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2370   visitor.WalkStack();
2371   return JDWP::ERR_NONE;
2372 }
2373 
GetThreadSelfId()2374 JDWP::ObjectId Dbg::GetThreadSelfId() {
2375   return GetThreadId(Thread::Current());
2376 }
2377 
GetThreadId(Thread * thread)2378 JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
2379   ScopedObjectAccessUnchecked soa(Thread::Current());
2380   return gRegistry->Add(thread->GetPeer());
2381 }
2382 
SuspendVM()2383 void Dbg::SuspendVM() {
2384   Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2385 }
2386 
ResumeVM()2387 void Dbg::ResumeVM() {
2388   Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
2389 }
2390 
SuspendThread(JDWP::ObjectId thread_id,bool request_suspension)2391 JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2392   Thread* self = Thread::Current();
2393   ScopedLocalRef<jobject> peer(self->GetJniEnv(), nullptr);
2394   {
2395     ScopedObjectAccess soa(self);
2396     JDWP::JdwpError error;
2397     peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id, &error)));
2398   }
2399   if (peer.get() == nullptr) {
2400     return JDWP::ERR_THREAD_NOT_ALIVE;
2401   }
2402   // Suspend thread to build stack trace.
2403   bool timed_out;
2404   ThreadList* thread_list = Runtime::Current()->GetThreadList();
2405   Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2406                                                     &timed_out);
2407   if (thread != nullptr) {
2408     return JDWP::ERR_NONE;
2409   } else if (timed_out) {
2410     return JDWP::ERR_INTERNAL;
2411   } else {
2412     return JDWP::ERR_THREAD_NOT_ALIVE;
2413   }
2414 }
2415 
ResumeThread(JDWP::ObjectId thread_id)2416 void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2417   ScopedObjectAccessUnchecked soa(Thread::Current());
2418   JDWP::JdwpError error;
2419   mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id, &error);
2420   CHECK(peer != nullptr) << error;
2421   Thread* thread;
2422   {
2423     MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2424     thread = Thread::FromManagedThread(soa, peer);
2425   }
2426   if (thread == nullptr) {
2427     LOG(WARNING) << "No such thread for resume: " << peer;
2428     return;
2429   }
2430   bool needs_resume;
2431   {
2432     MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2433     needs_resume = thread->GetSuspendCount() > 0;
2434   }
2435   if (needs_resume) {
2436     Runtime::Current()->GetThreadList()->Resume(thread, true);
2437   }
2438 }
2439 
SuspendSelf()2440 void Dbg::SuspendSelf() {
2441   Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2442 }
2443 
2444 struct GetThisVisitor : public StackVisitor {
GetThisVisitorart::GetThisVisitor2445   GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
2446       SHARED_REQUIRES(Locks::mutator_lock_)
2447       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2448         this_object(nullptr),
2449         frame_id(frame_id_in) {}
2450 
2451   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2452   // annotalysis.
VisitFrameart::GetThisVisitor2453   virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2454     if (frame_id != GetFrameId()) {
2455       return true;  // continue
2456     } else {
2457       this_object = GetThisObject();
2458       return false;
2459     }
2460   }
2461 
2462   mirror::Object* this_object;
2463   JDWP::FrameId frame_id;
2464 };
2465 
GetThisObject(JDWP::ObjectId thread_id,JDWP::FrameId frame_id,JDWP::ObjectId * result)2466 JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2467                                    JDWP::ObjectId* result) {
2468   ScopedObjectAccessUnchecked soa(Thread::Current());
2469   JDWP::JdwpError error;
2470   Thread* thread = DecodeThread(soa, thread_id, &error);
2471   if (error != JDWP::ERR_NONE) {
2472     return error;
2473   }
2474   if (!IsSuspendedForDebugger(soa, thread)) {
2475     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2476   }
2477   std::unique_ptr<Context> context(Context::Create());
2478   GetThisVisitor visitor(thread, context.get(), frame_id);
2479   visitor.WalkStack();
2480   *result = gRegistry->Add(visitor.this_object);
2481   return JDWP::ERR_NONE;
2482 }
2483 
2484 // Walks the stack until we find the frame with the given FrameId.
2485 class FindFrameVisitor FINAL : public StackVisitor {
2486  public:
FindFrameVisitor(Thread * thread,Context * context,JDWP::FrameId frame_id)2487   FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2488       SHARED_REQUIRES(Locks::mutator_lock_)
2489       : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2490         frame_id_(frame_id),
2491         error_(JDWP::ERR_INVALID_FRAMEID) {}
2492 
2493   // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2494   // annotalysis.
VisitFrame()2495   bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2496     if (GetFrameId() != frame_id_) {
2497       return true;  // Not our frame, carry on.
2498     }
2499     ArtMethod* m = GetMethod();
2500     if (m->IsNative()) {
2501       // We can't read/write local value from/into native method.
2502       error_ = JDWP::ERR_OPAQUE_FRAME;
2503     } else {
2504       // We found our frame.
2505       error_ = JDWP::ERR_NONE;
2506     }
2507     return false;
2508   }
2509 
GetError() const2510   JDWP::JdwpError GetError() const {
2511     return error_;
2512   }
2513 
2514  private:
2515   const JDWP::FrameId frame_id_;
2516   JDWP::JdwpError error_;
2517 
2518   DISALLOW_COPY_AND_ASSIGN(FindFrameVisitor);
2519 };
2520 
GetLocalValues(JDWP::Request * request,JDWP::ExpandBuf * pReply)2521 JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
2522   JDWP::ObjectId thread_id = request->ReadThreadId();
2523   JDWP::FrameId frame_id = request->ReadFrameId();
2524 
2525   ScopedObjectAccessUnchecked soa(Thread::Current());
2526   JDWP::JdwpError error;
2527   Thread* thread = DecodeThread(soa, thread_id, &error);
2528   if (error != JDWP::ERR_NONE) {
2529     return error;
2530   }
2531   if (!IsSuspendedForDebugger(soa, thread)) {
2532     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2533   }
2534   // Find the frame with the given frame_id.
2535   std::unique_ptr<Context> context(Context::Create());
2536   FindFrameVisitor visitor(thread, context.get(), frame_id);
2537   visitor.WalkStack();
2538   if (visitor.GetError() != JDWP::ERR_NONE) {
2539     return visitor.GetError();
2540   }
2541 
2542   // Read the values from visitor's context.
2543   int32_t slot_count = request->ReadSigned32("slot count");
2544   expandBufAdd4BE(pReply, slot_count);     /* "int values" */
2545   for (int32_t i = 0; i < slot_count; ++i) {
2546     uint32_t slot = request->ReadUnsigned32("slot");
2547     JDWP::JdwpTag reqSigByte = request->ReadTag();
2548 
2549     VLOG(jdwp) << "    --> slot " << slot << " " << reqSigByte;
2550 
2551     size_t width = Dbg::GetTagWidth(reqSigByte);
2552     uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
2553     error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
2554     if (error != JDWP::ERR_NONE) {
2555       return error;
2556     }
2557   }
2558   return JDWP::ERR_NONE;
2559 }
2560 
2561 constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
2562 
GetStackContextAsString(const StackVisitor & visitor)2563 static std::string GetStackContextAsString(const StackVisitor& visitor)
2564     SHARED_REQUIRES(Locks::mutator_lock_) {
2565   return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
2566                       PrettyMethod(visitor.GetMethod()).c_str());
2567 }
2568 
FailGetLocalValue(const StackVisitor & visitor,uint16_t vreg,JDWP::JdwpTag tag)2569 static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
2570                                          JDWP::JdwpTag tag)
2571     SHARED_REQUIRES(Locks::mutator_lock_) {
2572   LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
2573              << GetStackContextAsString(visitor);
2574   return kStackFrameLocalAccessError;
2575 }
2576 
GetLocalValue(const StackVisitor & visitor,ScopedObjectAccessUnchecked & soa,int slot,JDWP::JdwpTag tag,uint8_t * buf,size_t width)2577 JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa,
2578                                    int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2579   ArtMethod* m = visitor.GetMethod();
2580   JDWP::JdwpError error = JDWP::ERR_NONE;
2581   uint16_t vreg = DemangleSlot(slot, m, &error);
2582   if (error != JDWP::ERR_NONE) {
2583     return error;
2584   }
2585   // TODO: check that the tag is compatible with the actual type of the slot!
2586   switch (tag) {
2587     case JDWP::JT_BOOLEAN: {
2588       CHECK_EQ(width, 1U);
2589       uint32_t intVal;
2590       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2591         return FailGetLocalValue(visitor, vreg, tag);
2592       }
2593       VLOG(jdwp) << "get boolean local " << vreg << " = " << intVal;
2594       JDWP::Set1(buf + 1, intVal != 0);
2595       break;
2596     }
2597     case JDWP::JT_BYTE: {
2598       CHECK_EQ(width, 1U);
2599       uint32_t intVal;
2600       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2601         return FailGetLocalValue(visitor, vreg, tag);
2602       }
2603       VLOG(jdwp) << "get byte local " << vreg << " = " << intVal;
2604       JDWP::Set1(buf + 1, intVal);
2605       break;
2606     }
2607     case JDWP::JT_SHORT:
2608     case JDWP::JT_CHAR: {
2609       CHECK_EQ(width, 2U);
2610       uint32_t intVal;
2611       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2612         return FailGetLocalValue(visitor, vreg, tag);
2613       }
2614       VLOG(jdwp) << "get short/char local " << vreg << " = " << intVal;
2615       JDWP::Set2BE(buf + 1, intVal);
2616       break;
2617     }
2618     case JDWP::JT_INT: {
2619       CHECK_EQ(width, 4U);
2620       uint32_t intVal;
2621       if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2622         return FailGetLocalValue(visitor, vreg, tag);
2623       }
2624       VLOG(jdwp) << "get int local " << vreg << " = " << intVal;
2625       JDWP::Set4BE(buf + 1, intVal);
2626       break;
2627     }
2628     case JDWP::JT_FLOAT: {
2629       CHECK_EQ(width, 4U);
2630       uint32_t intVal;
2631       if (!visitor.GetVReg(m, vreg, kFloatVReg, &intVal)) {
2632         return FailGetLocalValue(visitor, vreg, tag);
2633       }
2634       VLOG(jdwp) << "get float local " << vreg << " = " << intVal;
2635       JDWP::Set4BE(buf + 1, intVal);
2636       break;
2637     }
2638     case JDWP::JT_ARRAY:
2639     case JDWP::JT_CLASS_LOADER:
2640     case JDWP::JT_CLASS_OBJECT:
2641     case JDWP::JT_OBJECT:
2642     case JDWP::JT_STRING:
2643     case JDWP::JT_THREAD:
2644     case JDWP::JT_THREAD_GROUP: {
2645       CHECK_EQ(width, sizeof(JDWP::ObjectId));
2646       uint32_t intVal;
2647       if (!visitor.GetVReg(m, vreg, kReferenceVReg, &intVal)) {
2648         return FailGetLocalValue(visitor, vreg, tag);
2649       }
2650       mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2651       VLOG(jdwp) << "get " << tag << " object local " << vreg << " = " << o;
2652       if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2653         LOG(FATAL) << StringPrintf("Found invalid object %#" PRIxPTR " in register v%u",
2654                                    reinterpret_cast<uintptr_t>(o), vreg)
2655                                    << GetStackContextAsString(visitor);
2656         UNREACHABLE();
2657       }
2658       tag = TagFromObject(soa, o);
2659       JDWP::SetObjectId(buf + 1, gRegistry->Add(o));
2660       break;
2661     }
2662     case JDWP::JT_DOUBLE: {
2663       CHECK_EQ(width, 8U);
2664       uint64_t longVal;
2665       if (!visitor.GetVRegPair(m, vreg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2666         return FailGetLocalValue(visitor, vreg, tag);
2667       }
2668       VLOG(jdwp) << "get double local " << vreg << " = " << longVal;
2669       JDWP::Set8BE(buf + 1, longVal);
2670       break;
2671     }
2672     case JDWP::JT_LONG: {
2673       CHECK_EQ(width, 8U);
2674       uint64_t longVal;
2675       if (!visitor.GetVRegPair(m, vreg, kLongLoVReg, kLongHiVReg, &longVal)) {
2676         return FailGetLocalValue(visitor, vreg, tag);
2677       }
2678       VLOG(jdwp) << "get long local " << vreg << " = " << longVal;
2679       JDWP::Set8BE(buf + 1, longVal);
2680       break;
2681     }
2682     default:
2683       LOG(FATAL) << "Unknown tag " << tag;
2684       UNREACHABLE();
2685   }
2686 
2687   // Prepend tag, which may have been updated.
2688   JDWP::Set1(buf, tag);
2689   return JDWP::ERR_NONE;
2690 }
2691 
SetLocalValues(JDWP::Request * request)2692 JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) {
2693   JDWP::ObjectId thread_id = request->ReadThreadId();
2694   JDWP::FrameId frame_id = request->ReadFrameId();
2695 
2696   ScopedObjectAccessUnchecked soa(Thread::Current());
2697   JDWP::JdwpError error;
2698   Thread* thread = DecodeThread(soa, thread_id, &error);
2699   if (error != JDWP::ERR_NONE) {
2700     return error;
2701   }
2702   if (!IsSuspendedForDebugger(soa, thread)) {
2703     return JDWP::ERR_THREAD_NOT_SUSPENDED;
2704   }
2705   // Find the frame with the given frame_id.
2706   std::unique_ptr<Context> context(Context::Create());
2707   FindFrameVisitor visitor(thread, context.get(), frame_id);
2708   visitor.WalkStack();
2709   if (visitor.GetError() != JDWP::ERR_NONE) {
2710     return visitor.GetError();
2711   }
2712 
2713   // Writes the values into visitor's context.
2714   int32_t slot_count = request->ReadSigned32("slot count");
2715   for (int32_t i = 0; i < slot_count; ++i) {
2716     uint32_t slot = request->ReadUnsigned32("slot");
2717     JDWP::JdwpTag sigByte = request->ReadTag();
2718     size_t width = Dbg::GetTagWidth(sigByte);
2719     uint64_t value = request->ReadValue(width);
2720 
2721     VLOG(jdwp) << "    --> slot " << slot << " " << sigByte << " " << value;
2722     error = Dbg::SetLocalValue(thread, visitor, slot, sigByte, value, width);
2723     if (error != JDWP::ERR_NONE) {
2724       return error;
2725     }
2726   }
2727   return JDWP::ERR_NONE;
2728 }
2729 
2730 template<typename T>
FailSetLocalValue(const StackVisitor & visitor,uint16_t vreg,JDWP::JdwpTag tag,T value)2731 static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
2732                                          JDWP::JdwpTag tag, T value)
2733     SHARED_REQUIRES(Locks::mutator_lock_) {
2734   LOG(ERROR) << "Failed to write " << tag << " local " << value
2735              << " (0x" << std::hex << value << ") into register v" << vreg
2736              << GetStackContextAsString(visitor);
2737   return kStackFrameLocalAccessError;
2738 }
2739 
SetLocalValue(Thread * thread,StackVisitor & visitor,int slot,JDWP::JdwpTag tag,uint64_t value,size_t width)2740 JDWP::JdwpError Dbg::SetLocalValue(Thread* thread, StackVisitor& visitor, int slot,
2741                                    JDWP::JdwpTag tag, uint64_t value, size_t width) {
2742   ArtMethod* m = visitor.GetMethod();
2743   JDWP::JdwpError error = JDWP::ERR_NONE;
2744   uint16_t vreg = DemangleSlot(slot, m, &error);
2745   if (error != JDWP::ERR_NONE) {
2746     return error;
2747   }
2748   // TODO: check that the tag is compatible with the actual type of the slot!
2749   switch (tag) {
2750     case JDWP::JT_BOOLEAN:
2751     case JDWP::JT_BYTE:
2752       CHECK_EQ(width, 1U);
2753       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2754         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2755       }
2756       break;
2757     case JDWP::JT_SHORT:
2758     case JDWP::JT_CHAR:
2759       CHECK_EQ(width, 2U);
2760       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2761         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2762       }
2763       break;
2764     case JDWP::JT_INT:
2765       CHECK_EQ(width, 4U);
2766       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2767         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2768       }
2769       break;
2770     case JDWP::JT_FLOAT:
2771       CHECK_EQ(width, 4U);
2772       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kFloatVReg)) {
2773         return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2774       }
2775       break;
2776     case JDWP::JT_ARRAY:
2777     case JDWP::JT_CLASS_LOADER:
2778     case JDWP::JT_CLASS_OBJECT:
2779     case JDWP::JT_OBJECT:
2780     case JDWP::JT_STRING:
2781     case JDWP::JT_THREAD:
2782     case JDWP::JT_THREAD_GROUP: {
2783       CHECK_EQ(width, sizeof(JDWP::ObjectId));
2784       mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value),
2785                                                           &error);
2786       if (error != JDWP::ERR_NONE) {
2787         VLOG(jdwp) << tag << " object " << o << " is an invalid object";
2788         return JDWP::ERR_INVALID_OBJECT;
2789       }
2790       if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2791                                  kReferenceVReg)) {
2792         return FailSetLocalValue(visitor, vreg, tag, reinterpret_cast<uintptr_t>(o));
2793       }
2794       break;
2795     }
2796     case JDWP::JT_DOUBLE: {
2797       CHECK_EQ(width, 8U);
2798       if (!visitor.SetVRegPair(m, vreg, value, kDoubleLoVReg, kDoubleHiVReg)) {
2799         return FailSetLocalValue(visitor, vreg, tag, value);
2800       }
2801       break;
2802     }
2803     case JDWP::JT_LONG: {
2804       CHECK_EQ(width, 8U);
2805       if (!visitor.SetVRegPair(m, vreg, value, kLongLoVReg, kLongHiVReg)) {
2806         return FailSetLocalValue(visitor, vreg, tag, value);
2807       }
2808       break;
2809     }
2810     default:
2811       LOG(FATAL) << "Unknown tag " << tag;
2812       UNREACHABLE();
2813   }
2814 
2815   // If we set the local variable in a compiled frame, we need to trigger a deoptimization of
2816   // the stack so we continue execution with the interpreter using the new value(s) of the updated
2817   // local variable(s). To achieve this, we install instrumentation exit stub on each method of the
2818   // thread's stack. The stub will cause the deoptimization to happen.
2819   if (!visitor.IsShadowFrame() && thread->HasDebuggerShadowFrames()) {
2820     Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(thread);
2821   }
2822 
2823   return JDWP::ERR_NONE;
2824 }
2825 
SetEventLocation(JDWP::EventLocation * location,ArtMethod * m,uint32_t dex_pc)2826 static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
2827     SHARED_REQUIRES(Locks::mutator_lock_) {
2828   DCHECK(location != nullptr);
2829   if (m == nullptr) {
2830     memset(location, 0, sizeof(*location));
2831   } else {
2832     location->method = GetCanonicalMethod(m);
2833     location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
2834   }
2835 }
2836 
PostLocationEvent(ArtMethod * m,int dex_pc,mirror::Object * this_object,int event_flags,const JValue * return_value)2837 void Dbg::PostLocationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
2838                             int event_flags, const JValue* return_value) {
2839   if (!IsDebuggerActive()) {
2840     return;
2841   }
2842   DCHECK(m != nullptr);
2843   DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2844   JDWP::EventLocation location;
2845   SetEventLocation(&location, m, dex_pc);
2846 
2847   // We need to be sure no exception is pending when calling JdwpState::PostLocationEvent.
2848   // This is required to be able to call JNI functions to create JDWP ids. To achieve this,
2849   // we temporarily clear the current thread's exception (if any) and will restore it after
2850   // the call.
2851   // Note: the only way to get a pending exception here is to suspend on a move-exception
2852   // instruction.
2853   Thread* const self = Thread::Current();
2854   StackHandleScope<1> hs(self);
2855   Handle<mirror::Throwable> pending_exception(hs.NewHandle(self->GetException()));
2856   self->ClearException();
2857   if (kIsDebugBuild && pending_exception.Get() != nullptr) {
2858     const DexFile::CodeItem* code_item = location.method->GetCodeItem();
2859     const Instruction* instr = Instruction::At(&code_item->insns_[location.dex_pc]);
2860     CHECK_EQ(Instruction::MOVE_EXCEPTION, instr->Opcode());
2861   }
2862 
2863   gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
2864 
2865   if (pending_exception.Get() != nullptr) {
2866     self->SetException(pending_exception.Get());
2867   }
2868 }
2869 
PostFieldAccessEvent(ArtMethod * m,int dex_pc,mirror::Object * this_object,ArtField * f)2870 void Dbg::PostFieldAccessEvent(ArtMethod* m, int dex_pc,
2871                                mirror::Object* this_object, ArtField* f) {
2872   if (!IsDebuggerActive()) {
2873     return;
2874   }
2875   DCHECK(m != nullptr);
2876   DCHECK(f != nullptr);
2877   JDWP::EventLocation location;
2878   SetEventLocation(&location, m, dex_pc);
2879 
2880   gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
2881 }
2882 
PostFieldModificationEvent(ArtMethod * m,int dex_pc,mirror::Object * this_object,ArtField * f,const JValue * field_value)2883 void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc,
2884                                      mirror::Object* this_object, ArtField* f,
2885                                      const JValue* field_value) {
2886   if (!IsDebuggerActive()) {
2887     return;
2888   }
2889   DCHECK(m != nullptr);
2890   DCHECK(f != nullptr);
2891   DCHECK(field_value != nullptr);
2892   JDWP::EventLocation location;
2893   SetEventLocation(&location, m, dex_pc);
2894 
2895   gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
2896 }
2897 
2898 /**
2899  * Finds the location where this exception will be caught. We search until we reach the top
2900  * frame, in which case this exception is considered uncaught.
2901  */
2902 class CatchLocationFinder : public StackVisitor {
2903  public:
CatchLocationFinder(Thread * self,const Handle<mirror::Throwable> & exception,Context * context)2904   CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
2905       SHARED_REQUIRES(Locks::mutator_lock_)
2906     : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2907       exception_(exception),
2908       handle_scope_(self),
2909       this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
2910       catch_method_(nullptr),
2911       throw_method_(nullptr),
2912       catch_dex_pc_(DexFile::kDexNoIndex),
2913       throw_dex_pc_(DexFile::kDexNoIndex) {
2914   }
2915 
VisitFrame()2916   bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2917     ArtMethod* method = GetMethod();
2918     DCHECK(method != nullptr);
2919     if (method->IsRuntimeMethod()) {
2920       // Ignore callee save method.
2921       DCHECK(method->IsCalleeSaveMethod());
2922       return true;
2923     }
2924 
2925     uint32_t dex_pc = GetDexPc();
2926     if (throw_method_ == nullptr) {
2927       // First Java method found. It is either the method that threw the exception,
2928       // or the Java native method that is reporting an exception thrown by
2929       // native code.
2930       this_at_throw_.Assign(GetThisObject());
2931       throw_method_ = method;
2932       throw_dex_pc_ = dex_pc;
2933     }
2934 
2935     if (dex_pc != DexFile::kDexNoIndex) {
2936       StackHandleScope<1> hs(GetThread());
2937       uint32_t found_dex_pc;
2938       Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
2939       bool unused_clear_exception;
2940       found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
2941       if (found_dex_pc != DexFile::kDexNoIndex) {
2942         catch_method_ = method;
2943         catch_dex_pc_ = found_dex_pc;
2944         return false;  // End stack walk.
2945       }
2946     }
2947     return true;  // Continue stack walk.
2948   }
2949 
GetCatchMethod()2950   ArtMethod* GetCatchMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
2951     return catch_method_;
2952   }
2953 
GetThrowMethod()2954   ArtMethod* GetThrowMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
2955     return throw_method_;
2956   }
2957 
GetThisAtThrow()2958   mirror::Object* GetThisAtThrow() SHARED_REQUIRES(Locks::mutator_lock_) {
2959     return this_at_throw_.Get();
2960   }
2961 
GetCatchDexPc() const2962   uint32_t GetCatchDexPc() const {
2963     return catch_dex_pc_;
2964   }
2965 
GetThrowDexPc() const2966   uint32_t GetThrowDexPc() const {
2967     return throw_dex_pc_;
2968   }
2969 
2970  private:
2971   const Handle<mirror::Throwable>& exception_;
2972   StackHandleScope<1> handle_scope_;
2973   MutableHandle<mirror::Object> this_at_throw_;
2974   ArtMethod* catch_method_;
2975   ArtMethod* throw_method_;
2976   uint32_t catch_dex_pc_;
2977   uint32_t throw_dex_pc_;
2978 
2979   DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
2980 };
2981 
PostException(mirror::Throwable * exception_object)2982 void Dbg::PostException(mirror::Throwable* exception_object) {
2983   if (!IsDebuggerActive()) {
2984     return;
2985   }
2986   Thread* const self = Thread::Current();
2987   StackHandleScope<1> handle_scope(self);
2988   Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
2989   std::unique_ptr<Context> context(Context::Create());
2990   CatchLocationFinder clf(self, h_exception, context.get());
2991   clf.WalkStack(/* include_transitions */ false);
2992   JDWP::EventLocation exception_throw_location;
2993   SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
2994   JDWP::EventLocation exception_catch_location;
2995   SetEventLocation(&exception_catch_location, clf.GetCatchMethod(), clf.GetCatchDexPc());
2996 
2997   gJdwpState->PostException(&exception_throw_location, h_exception.Get(), &exception_catch_location,
2998                             clf.GetThisAtThrow());
2999 }
3000 
PostClassPrepare(mirror::Class * c)3001 void Dbg::PostClassPrepare(mirror::Class* c) {
3002   if (!IsDebuggerActive()) {
3003     return;
3004   }
3005   gJdwpState->PostClassPrepare(c);
3006 }
3007 
UpdateDebugger(Thread * thread,mirror::Object * this_object,ArtMethod * m,uint32_t dex_pc,int event_flags,const JValue * return_value)3008 void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
3009                          ArtMethod* m, uint32_t dex_pc,
3010                          int event_flags, const JValue* return_value) {
3011   if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
3012     return;
3013   }
3014 
3015   if (IsBreakpoint(m, dex_pc)) {
3016     event_flags |= kBreakpoint;
3017   }
3018 
3019   // If the debugger is single-stepping one of our threads, check to
3020   // see if we're that thread and we've reached a step point.
3021   const SingleStepControl* single_step_control = thread->GetSingleStepControl();
3022   if (single_step_control != nullptr) {
3023     CHECK(!m->IsNative());
3024     if (single_step_control->GetStepDepth() == JDWP::SD_INTO) {
3025       // Step into method calls.  We break when the line number
3026       // or method pointer changes.  If we're in SS_MIN mode, we
3027       // always stop.
3028       if (single_step_control->GetMethod() != m) {
3029         event_flags |= kSingleStep;
3030         VLOG(jdwp) << "SS new method";
3031       } else if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
3032         event_flags |= kSingleStep;
3033         VLOG(jdwp) << "SS new instruction";
3034       } else if (single_step_control->ContainsDexPc(dex_pc)) {
3035         event_flags |= kSingleStep;
3036         VLOG(jdwp) << "SS new line";
3037       }
3038     } else if (single_step_control->GetStepDepth() == JDWP::SD_OVER) {
3039       // Step over method calls.  We break when the line number is
3040       // different and the frame depth is <= the original frame
3041       // depth.  (We can't just compare on the method, because we
3042       // might get unrolled past it by an exception, and it's tricky
3043       // to identify recursion.)
3044 
3045       int stack_depth = GetStackDepth(thread);
3046 
3047       if (stack_depth < single_step_control->GetStackDepth()) {
3048         // Popped up one or more frames, always trigger.
3049         event_flags |= kSingleStep;
3050         VLOG(jdwp) << "SS method pop";
3051       } else if (stack_depth == single_step_control->GetStackDepth()) {
3052         // Same depth, see if we moved.
3053         if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
3054           event_flags |= kSingleStep;
3055           VLOG(jdwp) << "SS new instruction";
3056         } else if (single_step_control->ContainsDexPc(dex_pc)) {
3057           event_flags |= kSingleStep;
3058           VLOG(jdwp) << "SS new line";
3059         }
3060       }
3061     } else {
3062       CHECK_EQ(single_step_control->GetStepDepth(), JDWP::SD_OUT);
3063       // Return from the current method.  We break when the frame
3064       // depth pops up.
3065 
3066       // This differs from the "method exit" break in that it stops
3067       // with the PC at the next instruction in the returned-to
3068       // function, rather than the end of the returning function.
3069 
3070       int stack_depth = GetStackDepth(thread);
3071       if (stack_depth < single_step_control->GetStackDepth()) {
3072         event_flags |= kSingleStep;
3073         VLOG(jdwp) << "SS method pop";
3074       }
3075     }
3076   }
3077 
3078   // If there's something interesting going on, see if it matches one
3079   // of the debugger filters.
3080   if (event_flags != 0) {
3081     Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
3082   }
3083 }
3084 
GetReferenceCounterForEvent(uint32_t instrumentation_event)3085 size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
3086   switch (instrumentation_event) {
3087     case instrumentation::Instrumentation::kMethodEntered:
3088       return &method_enter_event_ref_count_;
3089     case instrumentation::Instrumentation::kMethodExited:
3090       return &method_exit_event_ref_count_;
3091     case instrumentation::Instrumentation::kDexPcMoved:
3092       return &dex_pc_change_event_ref_count_;
3093     case instrumentation::Instrumentation::kFieldRead:
3094       return &field_read_event_ref_count_;
3095     case instrumentation::Instrumentation::kFieldWritten:
3096       return &field_write_event_ref_count_;
3097     case instrumentation::Instrumentation::kExceptionCaught:
3098       return &exception_catch_event_ref_count_;
3099     default:
3100       return nullptr;
3101   }
3102 }
3103 
3104 // Process request while all mutator threads are suspended.
ProcessDeoptimizationRequest(const DeoptimizationRequest & request)3105 void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
3106   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3107   switch (request.GetKind()) {
3108     case DeoptimizationRequest::kNothing:
3109       LOG(WARNING) << "Ignoring empty deoptimization request.";
3110       break;
3111     case DeoptimizationRequest::kRegisterForEvent:
3112       VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
3113                                  request.InstrumentationEvent());
3114       instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
3115       instrumentation_events_ |= request.InstrumentationEvent();
3116       break;
3117     case DeoptimizationRequest::kUnregisterForEvent:
3118       VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
3119                                  request.InstrumentationEvent());
3120       instrumentation->RemoveListener(&gDebugInstrumentationListener,
3121                                       request.InstrumentationEvent());
3122       instrumentation_events_ &= ~request.InstrumentationEvent();
3123       break;
3124     case DeoptimizationRequest::kFullDeoptimization:
3125       VLOG(jdwp) << "Deoptimize the world ...";
3126       instrumentation->DeoptimizeEverything(kDbgInstrumentationKey);
3127       VLOG(jdwp) << "Deoptimize the world DONE";
3128       break;
3129     case DeoptimizationRequest::kFullUndeoptimization:
3130       VLOG(jdwp) << "Undeoptimize the world ...";
3131       instrumentation->UndeoptimizeEverything(kDbgInstrumentationKey);
3132       VLOG(jdwp) << "Undeoptimize the world DONE";
3133       break;
3134     case DeoptimizationRequest::kSelectiveDeoptimization:
3135       VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
3136       instrumentation->Deoptimize(request.Method());
3137       VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
3138       break;
3139     case DeoptimizationRequest::kSelectiveUndeoptimization:
3140       VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
3141       instrumentation->Undeoptimize(request.Method());
3142       VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
3143       break;
3144     default:
3145       LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
3146       break;
3147   }
3148 }
3149 
RequestDeoptimization(const DeoptimizationRequest & req)3150 void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
3151   if (req.GetKind() == DeoptimizationRequest::kNothing) {
3152     // Nothing to do.
3153     return;
3154   }
3155   MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3156   RequestDeoptimizationLocked(req);
3157 }
3158 
RequestDeoptimizationLocked(const DeoptimizationRequest & req)3159 void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
3160   switch (req.GetKind()) {
3161     case DeoptimizationRequest::kRegisterForEvent: {
3162       DCHECK_NE(req.InstrumentationEvent(), 0u);
3163       size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3164       CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3165                                                 req.InstrumentationEvent());
3166       if (*counter == 0) {
3167         VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
3168                                    deoptimization_requests_.size(), req.InstrumentationEvent());
3169         deoptimization_requests_.push_back(req);
3170       }
3171       *counter = *counter + 1;
3172       break;
3173     }
3174     case DeoptimizationRequest::kUnregisterForEvent: {
3175       DCHECK_NE(req.InstrumentationEvent(), 0u);
3176       size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3177       CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3178                                                 req.InstrumentationEvent());
3179       *counter = *counter - 1;
3180       if (*counter == 0) {
3181         VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
3182                                    deoptimization_requests_.size(), req.InstrumentationEvent());
3183         deoptimization_requests_.push_back(req);
3184       }
3185       break;
3186     }
3187     case DeoptimizationRequest::kFullDeoptimization: {
3188       DCHECK(req.Method() == nullptr);
3189       if (full_deoptimization_event_count_ == 0) {
3190         VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3191                    << " for full deoptimization";
3192         deoptimization_requests_.push_back(req);
3193       }
3194       ++full_deoptimization_event_count_;
3195       break;
3196     }
3197     case DeoptimizationRequest::kFullUndeoptimization: {
3198       DCHECK(req.Method() == nullptr);
3199       DCHECK_GT(full_deoptimization_event_count_, 0U);
3200       --full_deoptimization_event_count_;
3201       if (full_deoptimization_event_count_ == 0) {
3202         VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3203                    << " for full undeoptimization";
3204         deoptimization_requests_.push_back(req);
3205       }
3206       break;
3207     }
3208     case DeoptimizationRequest::kSelectiveDeoptimization: {
3209       DCHECK(req.Method() != nullptr);
3210       VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3211                  << " for deoptimization of " << PrettyMethod(req.Method());
3212       deoptimization_requests_.push_back(req);
3213       break;
3214     }
3215     case DeoptimizationRequest::kSelectiveUndeoptimization: {
3216       DCHECK(req.Method() != nullptr);
3217       VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3218                  << " for undeoptimization of " << PrettyMethod(req.Method());
3219       deoptimization_requests_.push_back(req);
3220       break;
3221     }
3222     default: {
3223       LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
3224       break;
3225     }
3226   }
3227 }
3228 
ManageDeoptimization()3229 void Dbg::ManageDeoptimization() {
3230   Thread* const self = Thread::Current();
3231   {
3232     // Avoid suspend/resume if there is no pending request.
3233     MutexLock mu(self, *Locks::deoptimization_lock_);
3234     if (deoptimization_requests_.empty()) {
3235       return;
3236     }
3237   }
3238   CHECK_EQ(self->GetState(), kRunnable);
3239   ScopedThreadSuspension sts(self, kWaitingForDeoptimization);
3240   // Required for ProcessDeoptimizationRequest.
3241   gc::ScopedGCCriticalSection gcs(self,
3242                                   gc::kGcCauseInstrumentation,
3243                                   gc::kCollectorTypeInstrumentation);
3244   // We need to suspend mutator threads first.
3245   ScopedSuspendAll ssa(__FUNCTION__);
3246   const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3247   {
3248     MutexLock mu(self, *Locks::deoptimization_lock_);
3249     size_t req_index = 0;
3250     for (DeoptimizationRequest& request : deoptimization_requests_) {
3251       VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3252       ProcessDeoptimizationRequest(request);
3253     }
3254     deoptimization_requests_.clear();
3255   }
3256   CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3257 }
3258 
FindFirstBreakpointForMethod(ArtMethod * m)3259 static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
3260     SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3261   for (Breakpoint& breakpoint : gBreakpoints) {
3262     if (breakpoint.IsInMethod(m)) {
3263       return &breakpoint;
3264     }
3265   }
3266   return nullptr;
3267 }
3268 
MethodHasAnyBreakpoints(ArtMethod * method)3269 bool Dbg::MethodHasAnyBreakpoints(ArtMethod* method) {
3270   ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3271   return FindFirstBreakpointForMethod(method) != nullptr;
3272 }
3273 
3274 // Sanity checks all existing breakpoints on the same method.
SanityCheckExistingBreakpoints(ArtMethod * m,DeoptimizationRequest::Kind deoptimization_kind)3275 static void SanityCheckExistingBreakpoints(ArtMethod* m,
3276                                            DeoptimizationRequest::Kind deoptimization_kind)
3277     SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3278   for (const Breakpoint& breakpoint : gBreakpoints) {
3279     if (breakpoint.IsInMethod(m)) {
3280       CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
3281     }
3282   }
3283   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3284   if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3285     // We should have deoptimized everything but not "selectively" deoptimized this method.
3286     CHECK(instrumentation->AreAllMethodsDeoptimized());
3287     CHECK(!instrumentation->IsDeoptimized(m));
3288   } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3289     // We should have "selectively" deoptimized this method.
3290     // Note: while we have not deoptimized everything for this method, we may have done it for
3291     // another event.
3292     CHECK(instrumentation->IsDeoptimized(m));
3293   } else {
3294     // This method does not require deoptimization.
3295     CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3296     CHECK(!instrumentation->IsDeoptimized(m));
3297   }
3298 }
3299 
3300 // Returns the deoptimization kind required to set a breakpoint in a method.
3301 // If a breakpoint has already been set, we also return the first breakpoint
3302 // through the given 'existing_brkpt' pointer.
GetRequiredDeoptimizationKind(Thread * self,ArtMethod * m,const Breakpoint ** existing_brkpt)3303 static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
3304                                                                  ArtMethod* m,
3305                                                                  const Breakpoint** existing_brkpt)
3306     SHARED_REQUIRES(Locks::mutator_lock_) {
3307   if (!Dbg::RequiresDeoptimization()) {
3308     // We already run in interpreter-only mode so we don't need to deoptimize anything.
3309     VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
3310                << PrettyMethod(m);
3311     return DeoptimizationRequest::kNothing;
3312   }
3313   const Breakpoint* first_breakpoint;
3314   {
3315     ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3316     first_breakpoint = FindFirstBreakpointForMethod(m);
3317     *existing_brkpt = first_breakpoint;
3318   }
3319 
3320   if (first_breakpoint == nullptr) {
3321     // There is no breakpoint on this method yet: we need to deoptimize. If this method is default,
3322     // we deoptimize everything; otherwise we deoptimize only this method. We
3323     // deoptimize with defaults because we do not know everywhere they are used. It is possible some
3324     // of the copies could be missed.
3325     // TODO Deoptimizing on default methods might not be necessary in all cases.
3326     bool need_full_deoptimization = m->IsDefault();
3327     if (need_full_deoptimization) {
3328       VLOG(jdwp) << "Need full deoptimization because of copying of method "
3329                  << PrettyMethod(m);
3330       return DeoptimizationRequest::kFullDeoptimization;
3331     } else {
3332       // We don't need to deoptimize if the method has not been compiled.
3333       const bool is_compiled = m->HasAnyCompiledCode();
3334       if (is_compiled) {
3335         VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
3336         return DeoptimizationRequest::kSelectiveDeoptimization;
3337       } else {
3338         // Method is not compiled: we don't need to deoptimize.
3339         VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
3340         return DeoptimizationRequest::kNothing;
3341       }
3342     }
3343   } else {
3344     // There is at least one breakpoint for this method: we don't need to deoptimize.
3345     // Let's check that all breakpoints are configured the same way for deoptimization.
3346     VLOG(jdwp) << "Breakpoint already set: no deoptimization is required";
3347     DeoptimizationRequest::Kind deoptimization_kind = first_breakpoint->GetDeoptimizationKind();
3348     if (kIsDebugBuild) {
3349       ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3350       SanityCheckExistingBreakpoints(m, deoptimization_kind);
3351     }
3352     return DeoptimizationRequest::kNothing;
3353   }
3354 }
3355 
3356 // Installs a breakpoint at the specified location. Also indicates through the deoptimization
3357 // request if we need to deoptimize.
WatchLocation(const JDWP::JdwpLocation * location,DeoptimizationRequest * req)3358 void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3359   Thread* const self = Thread::Current();
3360   ArtMethod* m = FromMethodId(location->method_id);
3361   DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3362 
3363   const Breakpoint* existing_breakpoint = nullptr;
3364   const DeoptimizationRequest::Kind deoptimization_kind =
3365       GetRequiredDeoptimizationKind(self, m, &existing_breakpoint);
3366   req->SetKind(deoptimization_kind);
3367   if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3368     req->SetMethod(m);
3369   } else {
3370     CHECK(deoptimization_kind == DeoptimizationRequest::kNothing ||
3371           deoptimization_kind == DeoptimizationRequest::kFullDeoptimization);
3372     req->SetMethod(nullptr);
3373   }
3374 
3375   {
3376     WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3377     // If there is at least one existing breakpoint on the same method, the new breakpoint
3378     // must have the same deoptimization kind than the existing breakpoint(s).
3379     DeoptimizationRequest::Kind breakpoint_deoptimization_kind;
3380     if (existing_breakpoint != nullptr) {
3381       breakpoint_deoptimization_kind = existing_breakpoint->GetDeoptimizationKind();
3382     } else {
3383       breakpoint_deoptimization_kind = deoptimization_kind;
3384     }
3385     gBreakpoints.push_back(Breakpoint(m, location->dex_pc, breakpoint_deoptimization_kind));
3386     VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3387                << gBreakpoints[gBreakpoints.size() - 1];
3388   }
3389 }
3390 
3391 // Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3392 // request if we need to undeoptimize.
UnwatchLocation(const JDWP::JdwpLocation * location,DeoptimizationRequest * req)3393 void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3394   WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3395   ArtMethod* m = FromMethodId(location->method_id);
3396   DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3397   DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
3398   for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3399     if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].IsInMethod(m)) {
3400       VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3401       deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind();
3402       DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization,
3403                 Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3404       gBreakpoints.erase(gBreakpoints.begin() + i);
3405       break;
3406     }
3407   }
3408   const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3409   if (existing_breakpoint == nullptr) {
3410     // There is no more breakpoint on this method: we need to undeoptimize.
3411     if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3412       // This method required full deoptimization: we need to undeoptimize everything.
3413       req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3414       req->SetMethod(nullptr);
3415     } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3416       // This method required selective deoptimization: we need to undeoptimize only that method.
3417       req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3418       req->SetMethod(m);
3419     } else {
3420       // This method had no need for deoptimization: do nothing.
3421       CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3422       req->SetKind(DeoptimizationRequest::kNothing);
3423       req->SetMethod(nullptr);
3424     }
3425   } else {
3426     // There is at least one breakpoint for this method: we don't need to undeoptimize.
3427     req->SetKind(DeoptimizationRequest::kNothing);
3428     req->SetMethod(nullptr);
3429     if (kIsDebugBuild) {
3430       SanityCheckExistingBreakpoints(m, deoptimization_kind);
3431     }
3432   }
3433 }
3434 
IsForcedInterpreterNeededForCallingImpl(Thread * thread,ArtMethod * m)3435 bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) {
3436   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3437   if (ssc == nullptr) {
3438     // If we are not single-stepping, then we don't have to force interpreter.
3439     return false;
3440   }
3441   if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
3442     // If we are in interpreter only mode, then we don't have to force interpreter.
3443     return false;
3444   }
3445 
3446   if (!m->IsNative() && !m->IsProxyMethod()) {
3447     // If we want to step into a method, then we have to force interpreter on that call.
3448     if (ssc->GetStepDepth() == JDWP::SD_INTO) {
3449       return true;
3450     }
3451   }
3452   return false;
3453 }
3454 
IsForcedInterpreterNeededForResolutionImpl(Thread * thread,ArtMethod * m)3455 bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
3456   instrumentation::Instrumentation* const instrumentation =
3457       Runtime::Current()->GetInstrumentation();
3458   // If we are in interpreter only mode, then we don't have to force interpreter.
3459   if (instrumentation->InterpretOnly()) {
3460     return false;
3461   }
3462   // We can only interpret pure Java method.
3463   if (m->IsNative() || m->IsProxyMethod()) {
3464     return false;
3465   }
3466   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3467   if (ssc != nullptr) {
3468     // If we want to step into a method, then we have to force interpreter on that call.
3469     if (ssc->GetStepDepth() == JDWP::SD_INTO) {
3470       return true;
3471     }
3472     // If we are stepping out from a static initializer, by issuing a step
3473     // in or step over, that was implicitly invoked by calling a static method,
3474     // then we need to step into that method. Having a lower stack depth than
3475     // the one the single step control has indicates that the step originates
3476     // from the static initializer.
3477     if (ssc->GetStepDepth() != JDWP::SD_OUT &&
3478         ssc->GetStackDepth() > GetStackDepth(thread)) {
3479       return true;
3480     }
3481   }
3482   // There are cases where we have to force interpreter on deoptimized methods,
3483   // because in some cases the call will not be performed by invoking an entry
3484   // point that has been replaced by the deoptimization, but instead by directly
3485   // invoking the compiled code of the method, for example.
3486   return instrumentation->IsDeoptimized(m);
3487 }
3488 
IsForcedInstrumentationNeededForResolutionImpl(Thread * thread,ArtMethod * m)3489 bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
3490   // The upcall can be null and in that case we don't need to do anything.
3491   if (m == nullptr) {
3492     return false;
3493   }
3494   instrumentation::Instrumentation* const instrumentation =
3495       Runtime::Current()->GetInstrumentation();
3496   // If we are in interpreter only mode, then we don't have to force interpreter.
3497   if (instrumentation->InterpretOnly()) {
3498     return false;
3499   }
3500   // We can only interpret pure Java method.
3501   if (m->IsNative() || m->IsProxyMethod()) {
3502     return false;
3503   }
3504   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3505   if (ssc != nullptr) {
3506     // If we are stepping out from a static initializer, by issuing a step
3507     // out, that was implicitly invoked by calling a static method, then we
3508     // need to step into the caller of that method. Having a lower stack
3509     // depth than the one the single step control has indicates that the
3510     // step originates from the static initializer.
3511     if (ssc->GetStepDepth() == JDWP::SD_OUT &&
3512         ssc->GetStackDepth() > GetStackDepth(thread)) {
3513       return true;
3514     }
3515   }
3516   // If we are returning from a static intializer, that was implicitly
3517   // invoked by calling a static method and the caller is deoptimized,
3518   // then we have to deoptimize the stack without forcing interpreter
3519   // on the static method that was called originally. This problem can
3520   // be solved easily by forcing instrumentation on the called method,
3521   // because the instrumentation exit hook will recognise the need of
3522   // stack deoptimization by calling IsForcedInterpreterNeededForUpcall.
3523   return instrumentation->IsDeoptimized(m);
3524 }
3525 
IsForcedInterpreterNeededForUpcallImpl(Thread * thread,ArtMethod * m)3526 bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) {
3527   // The upcall can be null and in that case we don't need to do anything.
3528   if (m == nullptr) {
3529     return false;
3530   }
3531   instrumentation::Instrumentation* const instrumentation =
3532       Runtime::Current()->GetInstrumentation();
3533   // If we are in interpreter only mode, then we don't have to force interpreter.
3534   if (instrumentation->InterpretOnly()) {
3535     return false;
3536   }
3537   // We can only interpret pure Java method.
3538   if (m->IsNative() || m->IsProxyMethod()) {
3539     return false;
3540   }
3541   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3542   if (ssc != nullptr) {
3543     // The debugger is not interested in what is happening under the level
3544     // of the step, thus we only force interpreter when we are not below of
3545     // the step.
3546     if (ssc->GetStackDepth() >= GetStackDepth(thread)) {
3547       return true;
3548     }
3549   }
3550   if (thread->HasDebuggerShadowFrames()) {
3551     // We need to deoptimize the stack for the exception handling flow so that
3552     // we don't miss any deoptimization that should be done when there are
3553     // debugger shadow frames.
3554     return true;
3555   }
3556   // We have to require stack deoptimization if the upcall is deoptimized.
3557   return instrumentation->IsDeoptimized(m);
3558 }
3559 
3560 class NeedsDeoptimizationVisitor : public StackVisitor {
3561  public:
3562   explicit NeedsDeoptimizationVisitor(Thread* self)
SHARED_REQUIRES(Locks::mutator_lock_)3563       SHARED_REQUIRES(Locks::mutator_lock_)
3564     : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
3565       needs_deoptimization_(false) {}
3566 
VisitFrame()3567   bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
3568     // The visitor is meant to be used when handling exception from compiled code only.
3569     CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: " << PrettyMethod(GetMethod());
3570     ArtMethod* method = GetMethod();
3571     if (method == nullptr) {
3572       // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
3573       // so we can stop the visit.
3574       DCHECK(!needs_deoptimization_);
3575       return false;
3576     }
3577     if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
3578       // We found a compiled frame in the stack but instrumentation is set to interpret
3579       // everything: we need to deoptimize.
3580       needs_deoptimization_ = true;
3581       return false;
3582     }
3583     if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
3584       // We found a deoptimized method in the stack.
3585       needs_deoptimization_ = true;
3586       return false;
3587     }
3588     ShadowFrame* frame = GetThread()->FindDebuggerShadowFrame(GetFrameId());
3589     if (frame != nullptr) {
3590       // The debugger allocated a ShadowFrame to update a variable in the stack: we need to
3591       // deoptimize the stack to execute (and deallocate) this frame.
3592       needs_deoptimization_ = true;
3593       return false;
3594     }
3595     return true;
3596   }
3597 
NeedsDeoptimization() const3598   bool NeedsDeoptimization() const {
3599     return needs_deoptimization_;
3600   }
3601 
3602  private:
3603   // Do we need to deoptimize the stack?
3604   bool needs_deoptimization_;
3605 
3606   DISALLOW_COPY_AND_ASSIGN(NeedsDeoptimizationVisitor);
3607 };
3608 
3609 // Do we need to deoptimize the stack to handle an exception?
IsForcedInterpreterNeededForExceptionImpl(Thread * thread)3610 bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
3611   const SingleStepControl* const ssc = thread->GetSingleStepControl();
3612   if (ssc != nullptr) {
3613     // We deopt to step into the catch handler.
3614     return true;
3615   }
3616   // Deoptimization is required if at least one method in the stack needs it. However we
3617   // skip frames that will be unwound (thus not executed).
3618   NeedsDeoptimizationVisitor visitor(thread);
3619   visitor.WalkStack(true);  // includes upcall.
3620   return visitor.NeedsDeoptimization();
3621 }
3622 
3623 // Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3624 // cause suspension if the thread is the current thread.
3625 class ScopedDebuggerThreadSuspension {
3626  public:
ScopedDebuggerThreadSuspension(Thread * self,JDWP::ObjectId thread_id)3627   ScopedDebuggerThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3628       REQUIRES(!Locks::thread_list_lock_)
3629       SHARED_REQUIRES(Locks::mutator_lock_) :
3630       thread_(nullptr),
3631       error_(JDWP::ERR_NONE),
3632       self_suspend_(false),
3633       other_suspend_(false) {
3634     ScopedObjectAccessUnchecked soa(self);
3635     thread_ = DecodeThread(soa, thread_id, &error_);
3636     if (error_ == JDWP::ERR_NONE) {
3637       if (thread_ == soa.Self()) {
3638         self_suspend_ = true;
3639       } else {
3640         Thread* suspended_thread;
3641         {
3642           ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension);
3643           jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
3644           bool timed_out;
3645           ThreadList* const thread_list = Runtime::Current()->GetThreadList();
3646           suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
3647         }
3648         if (suspended_thread == nullptr) {
3649           // Thread terminated from under us while suspending.
3650           error_ = JDWP::ERR_INVALID_THREAD;
3651         } else {
3652           CHECK_EQ(suspended_thread, thread_);
3653           other_suspend_ = true;
3654         }
3655       }
3656     }
3657   }
3658 
GetThread() const3659   Thread* GetThread() const {
3660     return thread_;
3661   }
3662 
GetError() const3663   JDWP::JdwpError GetError() const {
3664     return error_;
3665   }
3666 
~ScopedDebuggerThreadSuspension()3667   ~ScopedDebuggerThreadSuspension() {
3668     if (other_suspend_) {
3669       Runtime::Current()->GetThreadList()->Resume(thread_, true);
3670     }
3671   }
3672 
3673  private:
3674   Thread* thread_;
3675   JDWP::JdwpError error_;
3676   bool self_suspend_;
3677   bool other_suspend_;
3678 };
3679 
ConfigureStep(JDWP::ObjectId thread_id,JDWP::JdwpStepSize step_size,JDWP::JdwpStepDepth step_depth)3680 JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3681                                    JDWP::JdwpStepDepth step_depth) {
3682   Thread* self = Thread::Current();
3683   ScopedDebuggerThreadSuspension sts(self, thread_id);
3684   if (sts.GetError() != JDWP::ERR_NONE) {
3685     return sts.GetError();
3686   }
3687 
3688   // Work out what ArtMethod* we're in, the current line number, and how deep the stack currently
3689   // is for step-out.
3690   struct SingleStepStackVisitor : public StackVisitor {
3691     explicit SingleStepStackVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
3692         : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
3693           stack_depth(0),
3694           method(nullptr),
3695           line_number(-1) {}
3696 
3697     // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3698     // annotalysis.
3699     bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3700       ArtMethod* m = GetMethod();
3701       if (!m->IsRuntimeMethod()) {
3702         ++stack_depth;
3703         if (method == nullptr) {
3704           mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3705           method = m;
3706           if (dex_cache != nullptr) {
3707             const DexFile& dex_file = *dex_cache->GetDexFile();
3708             line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
3709           }
3710         }
3711       }
3712       return true;
3713     }
3714 
3715     int stack_depth;
3716     ArtMethod* method;
3717     int32_t line_number;
3718   };
3719 
3720   Thread* const thread = sts.GetThread();
3721   SingleStepStackVisitor visitor(thread);
3722   visitor.WalkStack();
3723 
3724   // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3725   struct DebugCallbackContext {
3726     DebugCallbackContext(SingleStepControl* single_step_control_cb,
3727                          int32_t line_number_cb, const DexFile::CodeItem* code_item)
3728         : single_step_control_(single_step_control_cb), line_number_(line_number_cb),
3729           code_item_(code_item), last_pc_valid(false), last_pc(0) {
3730     }
3731 
3732     static bool Callback(void* raw_context, const DexFile::PositionInfo& entry) {
3733       DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3734       if (static_cast<int32_t>(entry.line_) == context->line_number_) {
3735         if (!context->last_pc_valid) {
3736           // Everything from this address until the next line change is ours.
3737           context->last_pc = entry.address_;
3738           context->last_pc_valid = true;
3739         }
3740         // Otherwise, if we're already in a valid range for this line,
3741         // just keep going (shouldn't really happen)...
3742       } else if (context->last_pc_valid) {  // and the line number is new
3743         // Add everything from the last entry up until here to the set
3744         for (uint32_t dex_pc = context->last_pc; dex_pc < entry.address_; ++dex_pc) {
3745           context->single_step_control_->AddDexPc(dex_pc);
3746         }
3747         context->last_pc_valid = false;
3748       }
3749       return false;  // There may be multiple entries for any given line.
3750     }
3751 
3752     ~DebugCallbackContext() {
3753       // If the line number was the last in the position table...
3754       if (last_pc_valid) {
3755         size_t end = code_item_->insns_size_in_code_units_;
3756         for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3757           single_step_control_->AddDexPc(dex_pc);
3758         }
3759       }
3760     }
3761 
3762     SingleStepControl* const single_step_control_;
3763     const int32_t line_number_;
3764     const DexFile::CodeItem* const code_item_;
3765     bool last_pc_valid;
3766     uint32_t last_pc;
3767   };
3768 
3769   // Allocate single step.
3770   SingleStepControl* single_step_control =
3771       new (std::nothrow) SingleStepControl(step_size, step_depth,
3772                                            visitor.stack_depth, visitor.method);
3773   if (single_step_control == nullptr) {
3774     LOG(ERROR) << "Failed to allocate SingleStepControl";
3775     return JDWP::ERR_OUT_OF_MEMORY;
3776   }
3777 
3778   ArtMethod* m = single_step_control->GetMethod();
3779   const int32_t line_number = visitor.line_number;
3780   // Note: if the thread is not running Java code (pure native thread), there is no "current"
3781   // method on the stack (and no line number either).
3782   if (m != nullptr && !m->IsNative()) {
3783     const DexFile::CodeItem* const code_item = m->GetCodeItem();
3784     DebugCallbackContext context(single_step_control, line_number, code_item);
3785     m->GetDexFile()->DecodeDebugPositionInfo(code_item, DebugCallbackContext::Callback, &context);
3786   }
3787 
3788   // Activate single-step in the thread.
3789   thread->ActivateSingleStepControl(single_step_control);
3790 
3791   if (VLOG_IS_ON(jdwp)) {
3792     VLOG(jdwp) << "Single-step thread: " << *thread;
3793     VLOG(jdwp) << "Single-step step size: " << single_step_control->GetStepSize();
3794     VLOG(jdwp) << "Single-step step depth: " << single_step_control->GetStepDepth();
3795     VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->GetMethod());
3796     VLOG(jdwp) << "Single-step current line: " << line_number;
3797     VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->GetStackDepth();
3798     VLOG(jdwp) << "Single-step dex_pc values:";
3799     for (uint32_t dex_pc : single_step_control->GetDexPcs()) {
3800       VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3801     }
3802   }
3803 
3804   return JDWP::ERR_NONE;
3805 }
3806 
UnconfigureStep(JDWP::ObjectId thread_id)3807 void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3808   ScopedObjectAccessUnchecked soa(Thread::Current());
3809   JDWP::JdwpError error;
3810   Thread* thread = DecodeThread(soa, thread_id, &error);
3811   if (error == JDWP::ERR_NONE) {
3812     thread->DeactivateSingleStepControl();
3813   }
3814 }
3815 
JdwpTagToShortyChar(JDWP::JdwpTag tag)3816 static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3817   switch (tag) {
3818     default:
3819       LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3820       UNREACHABLE();
3821 
3822     // Primitives.
3823     case JDWP::JT_BYTE:    return 'B';
3824     case JDWP::JT_CHAR:    return 'C';
3825     case JDWP::JT_FLOAT:   return 'F';
3826     case JDWP::JT_DOUBLE:  return 'D';
3827     case JDWP::JT_INT:     return 'I';
3828     case JDWP::JT_LONG:    return 'J';
3829     case JDWP::JT_SHORT:   return 'S';
3830     case JDWP::JT_VOID:    return 'V';
3831     case JDWP::JT_BOOLEAN: return 'Z';
3832 
3833     // Reference types.
3834     case JDWP::JT_ARRAY:
3835     case JDWP::JT_OBJECT:
3836     case JDWP::JT_STRING:
3837     case JDWP::JT_THREAD:
3838     case JDWP::JT_THREAD_GROUP:
3839     case JDWP::JT_CLASS_LOADER:
3840     case JDWP::JT_CLASS_OBJECT:
3841       return 'L';
3842   }
3843 }
3844 
PrepareInvokeMethod(uint32_t request_id,JDWP::ObjectId thread_id,JDWP::ObjectId object_id,JDWP::RefTypeId class_id,JDWP::MethodId method_id,uint32_t arg_count,uint64_t arg_values[],JDWP::JdwpTag * arg_types,uint32_t options)3845 JDWP::JdwpError Dbg::PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
3846                                          JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
3847                                          JDWP::MethodId method_id, uint32_t arg_count,
3848                                          uint64_t arg_values[], JDWP::JdwpTag* arg_types,
3849                                          uint32_t options) {
3850   Thread* const self = Thread::Current();
3851   CHECK_EQ(self, GetDebugThread()) << "This must be called by the JDWP thread";
3852   const bool resume_all_threads = ((options & JDWP::INVOKE_SINGLE_THREADED) == 0);
3853 
3854   ThreadList* thread_list = Runtime::Current()->GetThreadList();
3855   Thread* targetThread = nullptr;
3856   {
3857     ScopedObjectAccessUnchecked soa(self);
3858     JDWP::JdwpError error;
3859     targetThread = DecodeThread(soa, thread_id, &error);
3860     if (error != JDWP::ERR_NONE) {
3861       LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3862       return error;
3863     }
3864     if (targetThread->GetInvokeReq() != nullptr) {
3865       // Thread is already invoking a method on behalf of the debugger.
3866       LOG(ERROR) << "InvokeMethod request for thread already invoking a method: " << *targetThread;
3867       return JDWP::ERR_ALREADY_INVOKING;
3868     }
3869     if (!targetThread->IsReadyForDebugInvoke()) {
3870       // Thread is not suspended by an event so it cannot invoke a method.
3871       LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3872       return JDWP::ERR_INVALID_THREAD;
3873     }
3874 
3875     /*
3876      * According to the JDWP specs, we are expected to resume all threads (or only the
3877      * target thread) once. So if a thread has been suspended more than once (either by
3878      * the debugger for an event or by the runtime for GC), it will remain suspended before
3879      * the invoke is executed. This means the debugger is responsible to properly resume all
3880      * the threads it has suspended so the target thread can execute the method.
3881      *
3882      * However, for compatibility reason with older versions of debuggers (like Eclipse), we
3883      * fully resume all threads (by canceling *all* debugger suspensions) when the debugger
3884      * wants us to resume all threads. This is to avoid ending up in deadlock situation.
3885      *
3886      * On the other hand, if we are asked to only resume the target thread, then we follow the
3887      * JDWP specs by resuming that thread only once. This means the thread will remain suspended
3888      * if it has been suspended more than once before the invoke (and again, this is the
3889      * responsibility of the debugger to properly resume that thread before invoking a method).
3890      */
3891     int suspend_count;
3892     {
3893       MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3894       suspend_count = targetThread->GetSuspendCount();
3895     }
3896     if (suspend_count > 1 && resume_all_threads) {
3897       // The target thread will remain suspended even after we resume it. Let's emit a warning
3898       // to indicate the invoke won't be executed until the thread is resumed.
3899       LOG(WARNING) << *targetThread << " suspended more than once (suspend count == "
3900                    << suspend_count << "). This thread will invoke the method only once "
3901                    << "it is fully resumed.";
3902     }
3903 
3904     mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id, &error);
3905     if (error != JDWP::ERR_NONE) {
3906       return JDWP::ERR_INVALID_OBJECT;
3907     }
3908 
3909     gRegistry->Get<mirror::Object*>(thread_id, &error);
3910     if (error != JDWP::ERR_NONE) {
3911       return JDWP::ERR_INVALID_OBJECT;
3912     }
3913 
3914     mirror::Class* c = DecodeClass(class_id, &error);
3915     if (c == nullptr) {
3916       return error;
3917     }
3918 
3919     ArtMethod* m = FromMethodId(method_id);
3920     if (m->IsStatic() != (receiver == nullptr)) {
3921       return JDWP::ERR_INVALID_METHODID;
3922     }
3923     if (m->IsStatic()) {
3924       if (m->GetDeclaringClass() != c) {
3925         return JDWP::ERR_INVALID_METHODID;
3926       }
3927     } else {
3928       if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3929         return JDWP::ERR_INVALID_METHODID;
3930       }
3931     }
3932 
3933     // Check the argument list matches the method.
3934     uint32_t shorty_len = 0;
3935     const char* shorty = m->GetShorty(&shorty_len);
3936     if (shorty_len - 1 != arg_count) {
3937       return JDWP::ERR_ILLEGAL_ARGUMENT;
3938     }
3939 
3940     {
3941       StackHandleScope<2> hs(soa.Self());
3942       HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3943       HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3944       const DexFile::TypeList* types = m->GetParameterTypeList();
3945       for (size_t i = 0; i < arg_count; ++i) {
3946         if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3947           return JDWP::ERR_ILLEGAL_ARGUMENT;
3948         }
3949 
3950         if (shorty[i + 1] == 'L') {
3951           // Did we really get an argument of an appropriate reference type?
3952           mirror::Class* parameter_type =
3953               m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_,
3954                                        true /* resolve */,
3955                                        sizeof(void*));
3956           mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error);
3957           if (error != JDWP::ERR_NONE) {
3958             return JDWP::ERR_INVALID_OBJECT;
3959           }
3960           if (argument != nullptr && !argument->InstanceOf(parameter_type)) {
3961             return JDWP::ERR_ILLEGAL_ARGUMENT;
3962           }
3963 
3964           // Turn the on-the-wire ObjectId into a jobject.
3965           jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3966           v.l = gRegistry->GetJObject(arg_values[i]);
3967         }
3968       }
3969     }
3970 
3971     // Allocates a DebugInvokeReq.
3972     DebugInvokeReq* req = new (std::nothrow) DebugInvokeReq(request_id, thread_id, receiver, c, m,
3973                                                             options, arg_values, arg_count);
3974     if (req == nullptr) {
3975       LOG(ERROR) << "Failed to allocate DebugInvokeReq";
3976       return JDWP::ERR_OUT_OF_MEMORY;
3977     }
3978 
3979     // Attaches the DebugInvokeReq to the target thread so it executes the method when
3980     // it is resumed. Once the invocation completes, the target thread will delete it before
3981     // suspending itself (see ThreadList::SuspendSelfForDebugger).
3982     targetThread->SetDebugInvokeReq(req);
3983   }
3984 
3985   // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3986   // away we're sitting high and dry -- but we must release this before the UndoDebuggerSuspensions
3987   // call.
3988   if (resume_all_threads) {
3989     VLOG(jdwp) << "      Resuming all threads";
3990     thread_list->UndoDebuggerSuspensions();
3991   } else {
3992     VLOG(jdwp) << "      Resuming event thread only";
3993     thread_list->Resume(targetThread, true);
3994   }
3995 
3996   return JDWP::ERR_NONE;
3997 }
3998 
ExecuteMethod(DebugInvokeReq * pReq)3999 void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
4000   Thread* const self = Thread::Current();
4001   CHECK_NE(self, GetDebugThread()) << "This must be called by the event thread";
4002 
4003   ScopedObjectAccess soa(self);
4004 
4005   // We can be called while an exception is pending. We need
4006   // to preserve that across the method invocation.
4007   StackHandleScope<1> hs(soa.Self());
4008   Handle<mirror::Throwable> old_exception = hs.NewHandle(soa.Self()->GetException());
4009   soa.Self()->ClearException();
4010 
4011   // Execute the method then sends reply to the debugger.
4012   ExecuteMethodWithoutPendingException(soa, pReq);
4013 
4014   // If an exception was pending before the invoke, restore it now.
4015   if (old_exception.Get() != nullptr) {
4016     soa.Self()->SetException(old_exception.Get());
4017   }
4018 }
4019 
4020 // Helper function: write a variable-width value into the output input buffer.
WriteValue(JDWP::ExpandBuf * pReply,int width,uint64_t value)4021 static void WriteValue(JDWP::ExpandBuf* pReply, int width, uint64_t value) {
4022   switch (width) {
4023     case 1:
4024       expandBufAdd1(pReply, value);
4025       break;
4026     case 2:
4027       expandBufAdd2BE(pReply, value);
4028       break;
4029     case 4:
4030       expandBufAdd4BE(pReply, value);
4031       break;
4032     case 8:
4033       expandBufAdd8BE(pReply, value);
4034       break;
4035     default:
4036       LOG(FATAL) << width;
4037       UNREACHABLE();
4038   }
4039 }
4040 
ExecuteMethodWithoutPendingException(ScopedObjectAccess & soa,DebugInvokeReq * pReq)4041 void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) {
4042   soa.Self()->AssertNoPendingException();
4043 
4044   // Translate the method through the vtable, unless the debugger wants to suppress it.
4045   ArtMethod* m = pReq->method;
4046   size_t image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
4047   if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
4048     ArtMethod* actual_method =
4049         pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
4050     if (actual_method != m) {
4051       VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m)
4052                  << " to " << PrettyMethod(actual_method);
4053       m = actual_method;
4054     }
4055   }
4056   VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m)
4057              << " receiver=" << pReq->receiver.Read()
4058              << " arg_count=" << pReq->arg_count;
4059   CHECK(m != nullptr);
4060 
4061   static_assert(sizeof(jvalue) == sizeof(uint64_t), "jvalue and uint64_t have different sizes.");
4062 
4063   // Invoke the method.
4064   ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
4065   JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m),
4066                                     reinterpret_cast<jvalue*>(pReq->arg_values.get()));
4067 
4068   // Prepare JDWP ids for the reply.
4069   JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty());
4070   const bool is_object_result = (result_tag == JDWP::JT_OBJECT);
4071   StackHandleScope<3> hs(soa.Self());
4072   Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
4073   Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
4074   soa.Self()->ClearException();
4075 
4076   if (!IsDebuggerActive()) {
4077     // The debugger detached: we must not re-suspend threads. We also don't need to fill the reply
4078     // because it won't be sent either.
4079     return;
4080   }
4081 
4082   JDWP::ObjectId exceptionObjectId = gRegistry->Add(exception);
4083   uint64_t result_value = 0;
4084   if (exceptionObjectId != 0) {
4085     VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception.Get()
4086                << " " << exception->Dump();
4087     result_value = 0;
4088   } else if (is_object_result) {
4089     /* if no exception was thrown, examine object result more closely */
4090     JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get());
4091     if (new_tag != result_tag) {
4092       VLOG(jdwp) << "  JDWP promoted result from " << result_tag << " to " << new_tag;
4093       result_tag = new_tag;
4094     }
4095 
4096     // Register the object in the registry and reference its ObjectId. This ensures
4097     // GC safety and prevents from accessing stale reference if the object is moved.
4098     result_value = gRegistry->Add(object_result.Get());
4099   } else {
4100     // Primitive result.
4101     DCHECK(IsPrimitiveTag(result_tag));
4102     result_value = result.GetJ();
4103   }
4104   const bool is_constructor = m->IsConstructor() && !m->IsStatic();
4105   if (is_constructor) {
4106     // If we invoked a constructor (which actually returns void), return the receiver,
4107     // unless we threw, in which case we return null.
4108     DCHECK_EQ(JDWP::JT_VOID, result_tag);
4109     if (exceptionObjectId == 0) {
4110       if (m->GetDeclaringClass()->IsStringClass()) {
4111         // For string constructors, the new string is remapped to the receiver (stored in ref).
4112         Handle<mirror::Object> decoded_ref = hs.NewHandle(soa.Self()->DecodeJObject(ref.get()));
4113         result_value = gRegistry->Add(decoded_ref);
4114         result_tag = TagFromObject(soa, decoded_ref.Get());
4115       } else {
4116         // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
4117         // object registry.
4118         result_value = GetObjectRegistry()->Add(pReq->receiver.Read());
4119         result_tag = TagFromObject(soa, pReq->receiver.Read());
4120       }
4121     } else {
4122       result_value = 0;
4123       result_tag = JDWP::JT_OBJECT;
4124     }
4125   }
4126 
4127   // Suspend other threads if the invoke is not single-threaded.
4128   if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
4129     ScopedThreadSuspension sts(soa.Self(), kWaitingForDebuggerSuspension);
4130     VLOG(jdwp) << "      Suspending all threads";
4131     Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
4132   }
4133 
4134   VLOG(jdwp) << "  --> returned " << result_tag
4135              << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", result_value,
4136                              exceptionObjectId);
4137 
4138   // Show detailed debug output.
4139   if (result_tag == JDWP::JT_STRING && exceptionObjectId == 0) {
4140     if (result_value != 0) {
4141       if (VLOG_IS_ON(jdwp)) {
4142         std::string result_string;
4143         JDWP::JdwpError error = Dbg::StringToUtf8(result_value, &result_string);
4144         CHECK_EQ(error, JDWP::ERR_NONE);
4145         VLOG(jdwp) << "      string '" << result_string << "'";
4146       }
4147     } else {
4148       VLOG(jdwp) << "      string (null)";
4149     }
4150   }
4151 
4152   // Attach the reply to DebugInvokeReq so it can be sent to the debugger when the event thread
4153   // is ready to suspend.
4154   BuildInvokeReply(pReq->reply, pReq->request_id, result_tag, result_value, exceptionObjectId);
4155 }
4156 
BuildInvokeReply(JDWP::ExpandBuf * pReply,uint32_t request_id,JDWP::JdwpTag result_tag,uint64_t result_value,JDWP::ObjectId exception)4157 void Dbg::BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag,
4158                            uint64_t result_value, JDWP::ObjectId exception) {
4159   // Make room for the JDWP header since we do not know the size of the reply yet.
4160   JDWP::expandBufAddSpace(pReply, kJDWPHeaderLen);
4161 
4162   size_t width = GetTagWidth(result_tag);
4163   JDWP::expandBufAdd1(pReply, result_tag);
4164   if (width != 0) {
4165     WriteValue(pReply, width, result_value);
4166   }
4167   JDWP::expandBufAdd1(pReply, JDWP::JT_OBJECT);
4168   JDWP::expandBufAddObjectId(pReply, exception);
4169 
4170   // Now we know the size, we can complete the JDWP header.
4171   uint8_t* buf = expandBufGetBuffer(pReply);
4172   JDWP::Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReply));
4173   JDWP::Set4BE(buf + kJDWPHeaderIdOffset, request_id);
4174   JDWP::Set1(buf + kJDWPHeaderFlagsOffset, kJDWPFlagReply);  // flags
4175   JDWP::Set2BE(buf + kJDWPHeaderErrorCodeOffset, JDWP::ERR_NONE);
4176 }
4177 
FinishInvokeMethod(DebugInvokeReq * pReq)4178 void Dbg::FinishInvokeMethod(DebugInvokeReq* pReq) {
4179   CHECK_NE(Thread::Current(), GetDebugThread()) << "This must be called by the event thread";
4180 
4181   JDWP::ExpandBuf* const pReply = pReq->reply;
4182   CHECK(pReply != nullptr) << "No reply attached to DebugInvokeReq";
4183 
4184   // We need to prevent other threads (including JDWP thread) from interacting with the debugger
4185   // while we send the reply but are not yet suspended. The JDWP token will be released just before
4186   // we suspend ourself again (see ThreadList::SuspendSelfForDebugger).
4187   gJdwpState->AcquireJdwpTokenForEvent(pReq->thread_id);
4188 
4189   // Send the reply unless the debugger detached before the completion of the method.
4190   if (IsDebuggerActive()) {
4191     const size_t replyDataLength = expandBufGetLength(pReply) - kJDWPHeaderLen;
4192     VLOG(jdwp) << StringPrintf("REPLY INVOKE id=0x%06x (length=%zu)",
4193                                pReq->request_id, replyDataLength);
4194 
4195     gJdwpState->SendRequest(pReply);
4196   } else {
4197     VLOG(jdwp) << "Not sending invoke reply because debugger detached";
4198   }
4199 }
4200 
4201 /*
4202  * "request" contains a full JDWP packet, possibly with multiple chunks.  We
4203  * need to process each, accumulate the replies, and ship the whole thing
4204  * back.
4205  *
4206  * Returns "true" if we have a reply.  The reply buffer is newly allocated,
4207  * and includes the chunk type/length, followed by the data.
4208  *
4209  * OLD-TODO: we currently assume that the request and reply include a single
4210  * chunk.  If this becomes inconvenient we will need to adapt.
4211  */
DdmHandlePacket(JDWP::Request * request,uint8_t ** pReplyBuf,int * pReplyLen)4212 bool Dbg::DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen) {
4213   Thread* self = Thread::Current();
4214   JNIEnv* env = self->GetJniEnv();
4215 
4216   uint32_t type = request->ReadUnsigned32("type");
4217   uint32_t length = request->ReadUnsigned32("length");
4218 
4219   // Create a byte[] corresponding to 'request'.
4220   size_t request_length = request->size();
4221   ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
4222   if (dataArray.get() == nullptr) {
4223     LOG(WARNING) << "byte[] allocation failed: " << request_length;
4224     env->ExceptionClear();
4225     return false;
4226   }
4227   env->SetByteArrayRegion(dataArray.get(), 0, request_length,
4228                           reinterpret_cast<const jbyte*>(request->data()));
4229   request->Skip(request_length);
4230 
4231   // Run through and find all chunks.  [Currently just find the first.]
4232   ScopedByteArrayRO contents(env, dataArray.get());
4233   if (length != request_length) {
4234     LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
4235     return false;
4236   }
4237 
4238   // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
4239   ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
4240                                                                  WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
4241                                                                  type, dataArray.get(), 0, length));
4242   if (env->ExceptionCheck()) {
4243     LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
4244     env->ExceptionDescribe();
4245     env->ExceptionClear();
4246     return false;
4247   }
4248 
4249   if (chunk.get() == nullptr) {
4250     return false;
4251   }
4252 
4253   /*
4254    * Pull the pieces out of the chunk.  We copy the results into a
4255    * newly-allocated buffer that the caller can free.  We don't want to
4256    * continue using the Chunk object because nothing has a reference to it.
4257    *
4258    * We could avoid this by returning type/data/offset/length and having
4259    * the caller be aware of the object lifetime issues, but that
4260    * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
4261    * if we have responses for multiple chunks.
4262    *
4263    * So we're pretty much stuck with copying data around multiple times.
4264    */
4265   ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
4266   jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
4267   length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
4268   type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
4269 
4270   VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
4271   if (length == 0 || replyData.get() == nullptr) {
4272     return false;
4273   }
4274 
4275   const int kChunkHdrLen = 8;
4276   uint8_t* reply = new uint8_t[length + kChunkHdrLen];
4277   if (reply == nullptr) {
4278     LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
4279     return false;
4280   }
4281   JDWP::Set4BE(reply + 0, type);
4282   JDWP::Set4BE(reply + 4, length);
4283   env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
4284 
4285   *pReplyBuf = reply;
4286   *pReplyLen = length + kChunkHdrLen;
4287 
4288   VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
4289   return true;
4290 }
4291 
DdmBroadcast(bool connect)4292 void Dbg::DdmBroadcast(bool connect) {
4293   VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
4294 
4295   Thread* self = Thread::Current();
4296   if (self->GetState() != kRunnable) {
4297     LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
4298     /* try anyway? */
4299   }
4300 
4301   JNIEnv* env = self->GetJniEnv();
4302   jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
4303   env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
4304                             WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
4305                             event);
4306   if (env->ExceptionCheck()) {
4307     LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
4308     env->ExceptionDescribe();
4309     env->ExceptionClear();
4310   }
4311 }
4312 
DdmConnected()4313 void Dbg::DdmConnected() {
4314   Dbg::DdmBroadcast(true);
4315 }
4316 
DdmDisconnected()4317 void Dbg::DdmDisconnected() {
4318   Dbg::DdmBroadcast(false);
4319   gDdmThreadNotification = false;
4320 }
4321 
4322 /*
4323  * Send a notification when a thread starts, stops, or changes its name.
4324  *
4325  * Because we broadcast the full set of threads when the notifications are
4326  * first enabled, it's possible for "thread" to be actively executing.
4327  */
DdmSendThreadNotification(Thread * t,uint32_t type)4328 void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
4329   if (!gDdmThreadNotification) {
4330     return;
4331   }
4332 
4333   if (type == CHUNK_TYPE("THDE")) {
4334     uint8_t buf[4];
4335     JDWP::Set4BE(&buf[0], t->GetThreadId());
4336     Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
4337   } else {
4338     CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
4339     ScopedObjectAccessUnchecked soa(Thread::Current());
4340     StackHandleScope<1> hs(soa.Self());
4341     Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
4342     size_t char_count = (name.Get() != nullptr) ? name->GetLength() : 0;
4343     const jchar* chars = (name.Get() != nullptr) ? name->GetValue() : nullptr;
4344 
4345     std::vector<uint8_t> bytes;
4346     JDWP::Append4BE(bytes, t->GetThreadId());
4347     JDWP::AppendUtf16BE(bytes, chars, char_count);
4348     CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
4349     Dbg::DdmSendChunk(type, bytes);
4350   }
4351 }
4352 
DdmSetThreadNotification(bool enable)4353 void Dbg::DdmSetThreadNotification(bool enable) {
4354   // Enable/disable thread notifications.
4355   gDdmThreadNotification = enable;
4356   if (enable) {
4357     // Suspend the VM then post thread start notifications for all threads. Threads attaching will
4358     // see a suspension in progress and block until that ends. They then post their own start
4359     // notification.
4360     SuspendVM();
4361     std::list<Thread*> threads;
4362     Thread* self = Thread::Current();
4363     {
4364       MutexLock mu(self, *Locks::thread_list_lock_);
4365       threads = Runtime::Current()->GetThreadList()->GetList();
4366     }
4367     {
4368       ScopedObjectAccess soa(self);
4369       for (Thread* thread : threads) {
4370         Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
4371       }
4372     }
4373     ResumeVM();
4374   }
4375 }
4376 
PostThreadStartOrStop(Thread * t,uint32_t type)4377 void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
4378   if (IsDebuggerActive()) {
4379     gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
4380   }
4381   Dbg::DdmSendThreadNotification(t, type);
4382 }
4383 
PostThreadStart(Thread * t)4384 void Dbg::PostThreadStart(Thread* t) {
4385   Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
4386 }
4387 
PostThreadDeath(Thread * t)4388 void Dbg::PostThreadDeath(Thread* t) {
4389   Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
4390 }
4391 
DdmSendChunk(uint32_t type,size_t byte_count,const uint8_t * buf)4392 void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
4393   CHECK(buf != nullptr);
4394   iovec vec[1];
4395   vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
4396   vec[0].iov_len = byte_count;
4397   Dbg::DdmSendChunkV(type, vec, 1);
4398 }
4399 
DdmSendChunk(uint32_t type,const std::vector<uint8_t> & bytes)4400 void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
4401   DdmSendChunk(type, bytes.size(), &bytes[0]);
4402 }
4403 
DdmSendChunkV(uint32_t type,const iovec * iov,int iov_count)4404 void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
4405   if (gJdwpState == nullptr) {
4406     VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
4407   } else {
4408     gJdwpState->DdmSendChunkV(type, iov, iov_count);
4409   }
4410 }
4411 
GetJdwpState()4412 JDWP::JdwpState* Dbg::GetJdwpState() {
4413   return gJdwpState;
4414 }
4415 
DdmHandleHpifChunk(HpifWhen when)4416 int Dbg::DdmHandleHpifChunk(HpifWhen when) {
4417   if (when == HPIF_WHEN_NOW) {
4418     DdmSendHeapInfo(when);
4419     return true;
4420   }
4421 
4422   if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
4423     LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
4424     return false;
4425   }
4426 
4427   gDdmHpifWhen = when;
4428   return true;
4429 }
4430 
DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when,Dbg::HpsgWhat what,bool native)4431 bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
4432   if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
4433     LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
4434     return false;
4435   }
4436 
4437   if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
4438     LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
4439     return false;
4440   }
4441 
4442   if (native) {
4443     gDdmNhsgWhen = when;
4444     gDdmNhsgWhat = what;
4445   } else {
4446     gDdmHpsgWhen = when;
4447     gDdmHpsgWhat = what;
4448   }
4449   return true;
4450 }
4451 
DdmSendHeapInfo(HpifWhen reason)4452 void Dbg::DdmSendHeapInfo(HpifWhen reason) {
4453   // If there's a one-shot 'when', reset it.
4454   if (reason == gDdmHpifWhen) {
4455     if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
4456       gDdmHpifWhen = HPIF_WHEN_NEVER;
4457     }
4458   }
4459 
4460   /*
4461    * Chunk HPIF (client --> server)
4462    *
4463    * Heap Info. General information about the heap,
4464    * suitable for a summary display.
4465    *
4466    *   [u4]: number of heaps
4467    *
4468    *   For each heap:
4469    *     [u4]: heap ID
4470    *     [u8]: timestamp in ms since Unix epoch
4471    *     [u1]: capture reason (same as 'when' value from server)
4472    *     [u4]: max heap size in bytes (-Xmx)
4473    *     [u4]: current heap size in bytes
4474    *     [u4]: current number of bytes allocated
4475    *     [u4]: current number of objects allocated
4476    */
4477   uint8_t heap_count = 1;
4478   gc::Heap* heap = Runtime::Current()->GetHeap();
4479   std::vector<uint8_t> bytes;
4480   JDWP::Append4BE(bytes, heap_count);
4481   JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
4482   JDWP::Append8BE(bytes, MilliTime());
4483   JDWP::Append1BE(bytes, reason);
4484   JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
4485   JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
4486   JDWP::Append4BE(bytes, heap->GetBytesAllocated());
4487   JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
4488   CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
4489   Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
4490 }
4491 
4492 enum HpsgSolidity {
4493   SOLIDITY_FREE = 0,
4494   SOLIDITY_HARD = 1,
4495   SOLIDITY_SOFT = 2,
4496   SOLIDITY_WEAK = 3,
4497   SOLIDITY_PHANTOM = 4,
4498   SOLIDITY_FINALIZABLE = 5,
4499   SOLIDITY_SWEEP = 6,
4500 };
4501 
4502 enum HpsgKind {
4503   KIND_OBJECT = 0,
4504   KIND_CLASS_OBJECT = 1,
4505   KIND_ARRAY_1 = 2,
4506   KIND_ARRAY_2 = 3,
4507   KIND_ARRAY_4 = 4,
4508   KIND_ARRAY_8 = 5,
4509   KIND_UNKNOWN = 6,
4510   KIND_NATIVE = 7,
4511 };
4512 
4513 #define HPSG_PARTIAL (1<<7)
4514 #define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
4515 
4516 class HeapChunkContext {
4517  public:
4518   // Maximum chunk size.  Obtain this from the formula:
4519   // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
HeapChunkContext(bool merge,bool native)4520   HeapChunkContext(bool merge, bool native)
4521       : buf_(16384 - 16),
4522         type_(0),
4523         chunk_overhead_(0) {
4524     Reset();
4525     if (native) {
4526       type_ = CHUNK_TYPE("NHSG");
4527     } else {
4528       type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
4529     }
4530   }
4531 
~HeapChunkContext()4532   ~HeapChunkContext() {
4533     if (p_ > &buf_[0]) {
4534       Flush();
4535     }
4536   }
4537 
SetChunkOverhead(size_t chunk_overhead)4538   void SetChunkOverhead(size_t chunk_overhead) {
4539     chunk_overhead_ = chunk_overhead;
4540   }
4541 
ResetStartOfNextChunk()4542   void ResetStartOfNextChunk() {
4543     startOfNextMemoryChunk_ = nullptr;
4544   }
4545 
EnsureHeader(const void * chunk_ptr)4546   void EnsureHeader(const void* chunk_ptr) {
4547     if (!needHeader_) {
4548       return;
4549     }
4550 
4551     // Start a new HPSx chunk.
4552     JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
4553     JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
4554 
4555     JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
4556     JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
4557     // [u4]: length of piece, in allocation units
4558     // We won't know this until we're done, so save the offset and stuff in a dummy value.
4559     pieceLenField_ = p_;
4560     JDWP::Write4BE(&p_, 0x55555555);
4561     needHeader_ = false;
4562   }
4563 
Flush()4564   void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
4565     if (pieceLenField_ == nullptr) {
4566       // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4567       CHECK(needHeader_);
4568       return;
4569     }
4570     // Patch the "length of piece" field.
4571     CHECK_LE(&buf_[0], pieceLenField_);
4572     CHECK_LE(pieceLenField_, p_);
4573     JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4574 
4575     Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4576     Reset();
4577   }
4578 
HeapChunkJavaCallback(void * start,void * end,size_t used_bytes,void * arg)4579   static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
4580       SHARED_REQUIRES(Locks::heap_bitmap_lock_,
4581                             Locks::mutator_lock_) {
4582     reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
4583   }
4584 
HeapChunkNativeCallback(void * start,void * end,size_t used_bytes,void * arg)4585   static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
4586       SHARED_REQUIRES(Locks::mutator_lock_) {
4587     reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
4588   }
4589 
4590  private:
4591   enum { ALLOCATION_UNIT_SIZE = 8 };
4592 
Reset()4593   void Reset() {
4594     p_ = &buf_[0];
4595     ResetStartOfNextChunk();
4596     totalAllocationUnits_ = 0;
4597     needHeader_ = true;
4598     pieceLenField_ = nullptr;
4599   }
4600 
IsNative() const4601   bool IsNative() const {
4602     return type_ == CHUNK_TYPE("NHSG");
4603   }
4604 
4605   // Returns true if the object is not an empty chunk.
ProcessRecord(void * start,size_t used_bytes)4606   bool ProcessRecord(void* start, size_t used_bytes) SHARED_REQUIRES(Locks::mutator_lock_) {
4607     // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4608     // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4609     if (used_bytes == 0) {
4610       if (start == nullptr) {
4611         // Reset for start of new heap.
4612         startOfNextMemoryChunk_ = nullptr;
4613         Flush();
4614       }
4615       // Only process in use memory so that free region information
4616       // also includes dlmalloc book keeping.
4617       return false;
4618     }
4619     if (startOfNextMemoryChunk_ != nullptr) {
4620       // Transmit any pending free memory. Native free memory of over kMaxFreeLen could be because
4621       // of the use of mmaps, so don't report. If not free memory then start a new segment.
4622       bool flush = true;
4623       if (start > startOfNextMemoryChunk_) {
4624         const size_t kMaxFreeLen = 2 * kPageSize;
4625         void* free_start = startOfNextMemoryChunk_;
4626         void* free_end = start;
4627         const size_t free_len =
4628             reinterpret_cast<uintptr_t>(free_end) - reinterpret_cast<uintptr_t>(free_start);
4629         if (!IsNative() || free_len < kMaxFreeLen) {
4630           AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), free_start, free_len, IsNative());
4631           flush = false;
4632         }
4633       }
4634       if (flush) {
4635         startOfNextMemoryChunk_ = nullptr;
4636         Flush();
4637       }
4638     }
4639     return true;
4640   }
4641 
HeapChunkNativeCallback(void * start,void *,size_t used_bytes)4642   void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
4643       SHARED_REQUIRES(Locks::mutator_lock_) {
4644     if (ProcessRecord(start, used_bytes)) {
4645       uint8_t state = ExamineNativeObject(start);
4646       AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
4647       startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4648     }
4649   }
4650 
HeapChunkJavaCallback(void * start,void *,size_t used_bytes)4651   void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
4652       SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
4653     if (ProcessRecord(start, used_bytes)) {
4654       // Determine the type of this chunk.
4655       // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4656       // If it's the same, we should combine them.
4657       uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
4658       AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/);
4659       startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4660     }
4661   }
4662 
AppendChunk(uint8_t state,void * ptr,size_t length,bool is_native)4663   void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
4664       SHARED_REQUIRES(Locks::mutator_lock_) {
4665     // Make sure there's enough room left in the buffer.
4666     // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4667     // 17 bytes for any header.
4668     const size_t needed = ((RoundUp(length / ALLOCATION_UNIT_SIZE, 256) / 256) * 2) + 17;
4669     size_t byte_left = &buf_.back() - p_;
4670     if (byte_left < needed) {
4671       if (is_native) {
4672       // Cannot trigger memory allocation while walking native heap.
4673         return;
4674       }
4675       Flush();
4676     }
4677 
4678     byte_left = &buf_.back() - p_;
4679     if (byte_left < needed) {
4680       LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4681           << needed << " bytes)";
4682       return;
4683     }
4684     EnsureHeader(ptr);
4685     // Write out the chunk description.
4686     length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
4687     totalAllocationUnits_ += length;
4688     while (length > 256) {
4689       *p_++ = state | HPSG_PARTIAL;
4690       *p_++ = 255;     // length - 1
4691       length -= 256;
4692     }
4693     *p_++ = state;
4694     *p_++ = length - 1;
4695   }
4696 
ExamineNativeObject(const void * p)4697   uint8_t ExamineNativeObject(const void* p) SHARED_REQUIRES(Locks::mutator_lock_) {
4698     return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4699   }
4700 
ExamineJavaObject(mirror::Object * o)4701   uint8_t ExamineJavaObject(mirror::Object* o)
4702       SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4703     if (o == nullptr) {
4704       return HPSG_STATE(SOLIDITY_FREE, 0);
4705     }
4706     // It's an allocated chunk. Figure out what it is.
4707     gc::Heap* heap = Runtime::Current()->GetHeap();
4708     if (!heap->IsLiveObjectLocked(o)) {
4709       LOG(ERROR) << "Invalid object in managed heap: " << o;
4710       return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4711     }
4712     mirror::Class* c = o->GetClass();
4713     if (c == nullptr) {
4714       // The object was probably just created but hasn't been initialized yet.
4715       return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4716     }
4717     if (!heap->IsValidObjectAddress(c)) {
4718       LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4719       return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4720     }
4721     if (c->GetClass() == nullptr) {
4722       LOG(ERROR) << "Null class of class " << c << " for object " << o;
4723       return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4724     }
4725     if (c->IsClassClass()) {
4726       return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4727     }
4728     if (c->IsArrayClass()) {
4729       switch (c->GetComponentSize()) {
4730       case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4731       case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4732       case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4733       case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4734       }
4735     }
4736     return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4737   }
4738 
4739   std::vector<uint8_t> buf_;
4740   uint8_t* p_;
4741   uint8_t* pieceLenField_;
4742   void* startOfNextMemoryChunk_;
4743   size_t totalAllocationUnits_;
4744   uint32_t type_;
4745   bool needHeader_;
4746   size_t chunk_overhead_;
4747 
4748   DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4749 };
4750 
BumpPointerSpaceCallback(mirror::Object * obj,void * arg)4751 static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4752     SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
4753   const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4754   HeapChunkContext::HeapChunkJavaCallback(
4755       obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4756 }
4757 
DdmSendHeapSegments(bool native)4758 void Dbg::DdmSendHeapSegments(bool native) {
4759   Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
4760   Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
4761   if (when == HPSG_WHEN_NEVER) {
4762     return;
4763   }
4764   // Figure out what kind of chunks we'll be sending.
4765   CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS)
4766       << static_cast<int>(what);
4767 
4768   // First, send a heap start chunk.
4769   uint8_t heap_id[4];
4770   JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
4771   Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4772   Thread* self = Thread::Current();
4773   Locks::mutator_lock_->AssertSharedHeld(self);
4774 
4775   // Send a series of heap segment chunks.
4776   HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
4777   if (native) {
4778     UNIMPLEMENTED(WARNING) << "Native heap inspection is not supported";
4779   } else {
4780     gc::Heap* heap = Runtime::Current()->GetHeap();
4781     for (const auto& space : heap->GetContinuousSpaces()) {
4782       if (space->IsDlMallocSpace()) {
4783         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4784         // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4785         // allocation then the first sizeof(size_t) may belong to it.
4786         context.SetChunkOverhead(sizeof(size_t));
4787         space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4788       } else if (space->IsRosAllocSpace()) {
4789         context.SetChunkOverhead(0);
4790         // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
4791         // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
4792         ScopedThreadSuspension sts(self, kSuspended);
4793         ScopedSuspendAll ssa(__FUNCTION__);
4794         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4795         space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4796       } else if (space->IsBumpPointerSpace()) {
4797         ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4798         context.SetChunkOverhead(0);
4799         space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4800         HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
4801       } else if (space->IsRegionSpace()) {
4802         heap->IncrementDisableMovingGC(self);
4803         {
4804           ScopedThreadSuspension sts(self, kSuspended);
4805           ScopedSuspendAll ssa(__FUNCTION__);
4806           ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4807           context.SetChunkOverhead(0);
4808           space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
4809           HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
4810         }
4811         heap->DecrementDisableMovingGC(self);
4812       } else {
4813         UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4814       }
4815       context.ResetStartOfNextChunk();
4816     }
4817     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4818     // Walk the large objects, these are not in the AllocSpace.
4819     context.SetChunkOverhead(0);
4820     heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4821   }
4822 
4823   // Finally, send a heap end chunk.
4824   Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4825 }
4826 
SetAllocTrackingEnabled(bool enable)4827 void Dbg::SetAllocTrackingEnabled(bool enable) {
4828   gc::AllocRecordObjectMap::SetAllocTrackingEnabled(enable);
4829 }
4830 
DumpRecentAllocations()4831 void Dbg::DumpRecentAllocations() {
4832   ScopedObjectAccess soa(Thread::Current());
4833   MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4834   if (!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()) {
4835     LOG(INFO) << "Not recording tracked allocations";
4836     return;
4837   }
4838   gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
4839   CHECK(records != nullptr);
4840 
4841   const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize());
4842   uint16_t count = capped_count;
4843 
4844   LOG(INFO) << "Tracked allocations, (count=" << count << ")";
4845   for (auto it = records->RBegin(), end = records->REnd();
4846       count > 0 && it != end; count--, it++) {
4847     const gc::AllocRecord* record = &it->second;
4848 
4849     LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->GetTid(), record->ByteCount())
4850               << PrettyClass(record->GetClass());
4851 
4852     for (size_t stack_frame = 0, depth = record->GetDepth(); stack_frame < depth; ++stack_frame) {
4853       const gc::AllocRecordStackTraceElement& stack_element = record->StackElement(stack_frame);
4854       ArtMethod* m = stack_element.GetMethod();
4855       LOG(INFO) << "    " << PrettyMethod(m) << " line " << stack_element.ComputeLineNumber();
4856     }
4857 
4858     // pause periodically to help logcat catch up
4859     if ((count % 5) == 0) {
4860       usleep(40000);
4861     }
4862   }
4863 }
4864 
4865 class StringTable {
4866  public:
StringTable()4867   StringTable() {
4868   }
4869 
Add(const std::string & str)4870   void Add(const std::string& str) {
4871     table_.insert(str);
4872   }
4873 
Add(const char * str)4874   void Add(const char* str) {
4875     table_.insert(str);
4876   }
4877 
IndexOf(const char * s) const4878   size_t IndexOf(const char* s) const {
4879     auto it = table_.find(s);
4880     if (it == table_.end()) {
4881       LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4882     }
4883     return std::distance(table_.begin(), it);
4884   }
4885 
Size() const4886   size_t Size() const {
4887     return table_.size();
4888   }
4889 
WriteTo(std::vector<uint8_t> & bytes) const4890   void WriteTo(std::vector<uint8_t>& bytes) const {
4891     for (const std::string& str : table_) {
4892       const char* s = str.c_str();
4893       size_t s_len = CountModifiedUtf8Chars(s);
4894       std::unique_ptr<uint16_t[]> s_utf16(new uint16_t[s_len]);
4895       ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4896       JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4897     }
4898   }
4899 
4900  private:
4901   std::set<std::string> table_;
4902   DISALLOW_COPY_AND_ASSIGN(StringTable);
4903 };
4904 
GetMethodSourceFile(ArtMethod * method)4905 static const char* GetMethodSourceFile(ArtMethod* method)
4906     SHARED_REQUIRES(Locks::mutator_lock_) {
4907   DCHECK(method != nullptr);
4908   const char* source_file = method->GetDeclaringClassSourceFile();
4909   return (source_file != nullptr) ? source_file : "";
4910 }
4911 
4912 /*
4913  * The data we send to DDMS contains everything we have recorded.
4914  *
4915  * Message header (all values big-endian):
4916  * (1b) message header len (to allow future expansion); includes itself
4917  * (1b) entry header len
4918  * (1b) stack frame len
4919  * (2b) number of entries
4920  * (4b) offset to string table from start of message
4921  * (2b) number of class name strings
4922  * (2b) number of method name strings
4923  * (2b) number of source file name strings
4924  * For each entry:
4925  *   (4b) total allocation size
4926  *   (2b) thread id
4927  *   (2b) allocated object's class name index
4928  *   (1b) stack depth
4929  *   For each stack frame:
4930  *     (2b) method's class name
4931  *     (2b) method name
4932  *     (2b) method source file
4933  *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
4934  * (xb) class name strings
4935  * (xb) method name strings
4936  * (xb) source file strings
4937  *
4938  * As with other DDM traffic, strings are sent as a 4-byte length
4939  * followed by UTF-16 data.
4940  *
4941  * We send up 16-bit unsigned indexes into string tables.  In theory there
4942  * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4943  * each table, but in practice there should be far fewer.
4944  *
4945  * The chief reason for using a string table here is to keep the size of
4946  * the DDMS message to a minimum.  This is partly to make the protocol
4947  * efficient, but also because we have to form the whole thing up all at
4948  * once in a memory buffer.
4949  *
4950  * We use separate string tables for class names, method names, and source
4951  * files to keep the indexes small.  There will generally be no overlap
4952  * between the contents of these tables.
4953  */
GetRecentAllocations()4954 jbyteArray Dbg::GetRecentAllocations() {
4955   if ((false)) {
4956     DumpRecentAllocations();
4957   }
4958 
4959   Thread* self = Thread::Current();
4960   std::vector<uint8_t> bytes;
4961   {
4962     MutexLock mu(self, *Locks::alloc_tracker_lock_);
4963     gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
4964     // In case this method is called when allocation tracker is disabled,
4965     // we should still send some data back.
4966     gc::AllocRecordObjectMap dummy;
4967     if (records == nullptr) {
4968       CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
4969       records = &dummy;
4970     }
4971     // We don't need to wait on the condition variable records->new_record_condition_, because this
4972     // function only reads the class objects, which are already marked so it doesn't change their
4973     // reachability.
4974 
4975     //
4976     // Part 1: generate string tables.
4977     //
4978     StringTable class_names;
4979     StringTable method_names;
4980     StringTable filenames;
4981 
4982     const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize());
4983     uint16_t count = capped_count;
4984     for (auto it = records->RBegin(), end = records->REnd();
4985          count > 0 && it != end; count--, it++) {
4986       const gc::AllocRecord* record = &it->second;
4987       std::string temp;
4988       class_names.Add(record->GetClassDescriptor(&temp));
4989       for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) {
4990         ArtMethod* m = record->StackElement(i).GetMethod();
4991         class_names.Add(m->GetDeclaringClassDescriptor());
4992         method_names.Add(m->GetName());
4993         filenames.Add(GetMethodSourceFile(m));
4994       }
4995     }
4996 
4997     LOG(INFO) << "recent allocation records: " << capped_count;
4998     LOG(INFO) << "allocation records all objects: " << records->Size();
4999 
5000     //
5001     // Part 2: Generate the output and store it in the buffer.
5002     //
5003 
5004     // (1b) message header len (to allow future expansion); includes itself
5005     // (1b) entry header len
5006     // (1b) stack frame len
5007     const int kMessageHeaderLen = 15;
5008     const int kEntryHeaderLen = 9;
5009     const int kStackFrameLen = 8;
5010     JDWP::Append1BE(bytes, kMessageHeaderLen);
5011     JDWP::Append1BE(bytes, kEntryHeaderLen);
5012     JDWP::Append1BE(bytes, kStackFrameLen);
5013 
5014     // (2b) number of entries
5015     // (4b) offset to string table from start of message
5016     // (2b) number of class name strings
5017     // (2b) number of method name strings
5018     // (2b) number of source file name strings
5019     JDWP::Append2BE(bytes, capped_count);
5020     size_t string_table_offset = bytes.size();
5021     JDWP::Append4BE(bytes, 0);  // We'll patch this later...
5022     JDWP::Append2BE(bytes, class_names.Size());
5023     JDWP::Append2BE(bytes, method_names.Size());
5024     JDWP::Append2BE(bytes, filenames.Size());
5025 
5026     std::string temp;
5027     count = capped_count;
5028     // The last "count" number of allocation records in "records" are the most recent "count" number
5029     // of allocations. Reverse iterate to get them. The most recent allocation is sent first.
5030     for (auto it = records->RBegin(), end = records->REnd();
5031          count > 0 && it != end; count--, it++) {
5032       // For each entry:
5033       // (4b) total allocation size
5034       // (2b) thread id
5035       // (2b) allocated object's class name index
5036       // (1b) stack depth
5037       const gc::AllocRecord* record = &it->second;
5038       size_t stack_depth = record->GetDepth();
5039       size_t allocated_object_class_name_index =
5040           class_names.IndexOf(record->GetClassDescriptor(&temp));
5041       JDWP::Append4BE(bytes, record->ByteCount());
5042       JDWP::Append2BE(bytes, static_cast<uint16_t>(record->GetTid()));
5043       JDWP::Append2BE(bytes, allocated_object_class_name_index);
5044       JDWP::Append1BE(bytes, stack_depth);
5045 
5046       for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
5047         // For each stack frame:
5048         // (2b) method's class name
5049         // (2b) method name
5050         // (2b) method source file
5051         // (2b) line number, clipped to 32767; -2 if native; -1 if no source
5052         ArtMethod* m = record->StackElement(stack_frame).GetMethod();
5053         size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
5054         size_t method_name_index = method_names.IndexOf(m->GetName());
5055         size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
5056         JDWP::Append2BE(bytes, class_name_index);
5057         JDWP::Append2BE(bytes, method_name_index);
5058         JDWP::Append2BE(bytes, file_name_index);
5059         JDWP::Append2BE(bytes, record->StackElement(stack_frame).ComputeLineNumber());
5060       }
5061     }
5062 
5063     // (xb) class name strings
5064     // (xb) method name strings
5065     // (xb) source file strings
5066     JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
5067     class_names.WriteTo(bytes);
5068     method_names.WriteTo(bytes);
5069     filenames.WriteTo(bytes);
5070   }
5071   JNIEnv* env = self->GetJniEnv();
5072   jbyteArray result = env->NewByteArray(bytes.size());
5073   if (result != nullptr) {
5074     env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
5075   }
5076   return result;
5077 }
5078 
Method() const5079 ArtMethod* DeoptimizationRequest::Method() const {
5080   ScopedObjectAccessUnchecked soa(Thread::Current());
5081   return soa.DecodeMethod(method_);
5082 }
5083 
SetMethod(ArtMethod * m)5084 void DeoptimizationRequest::SetMethod(ArtMethod* m) {
5085   ScopedObjectAccessUnchecked soa(Thread::Current());
5086   method_ = soa.EncodeMethod(m);
5087 }
5088 
VisitRoots(RootVisitor * visitor)5089 void Dbg::VisitRoots(RootVisitor* visitor) {
5090   // Visit breakpoint roots, used to prevent unloading of methods with breakpoints.
5091   ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
5092   BufferedRootVisitor<128> root_visitor(visitor, RootInfo(kRootVMInternal));
5093   for (Breakpoint& breakpoint : gBreakpoints) {
5094     breakpoint.Method()->VisitRoots(root_visitor, sizeof(void*));
5095   }
5096 }
5097 
5098 }  // namespace art
5099