1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_DEBUGGER_H_ 18 #define ART_RUNTIME_DEBUGGER_H_ 19 20 #include <pthread.h> 21 22 #include <set> 23 #include <string> 24 #include <vector> 25 26 #include "art_method.h" 27 #include "base/array_ref.h" 28 #include "base/locks.h" 29 #include "base/logging.h" 30 #include "base/macros.h" 31 #include "instrumentation.h" 32 #include "jni.h" 33 #include "runtime.h" 34 #include "runtime_callbacks.h" 35 #include "thread.h" 36 #include "thread_state.h" 37 38 namespace art HIDDEN { 39 40 class Dbg { 41 public: 42 EXPORT static void SetJdwpAllowed(bool allowed); 43 EXPORT static bool IsJdwpAllowed(); 44 45 // Invoked by the GC in case we need to keep DDMS informed. 46 static void GcDidFinish() REQUIRES(!Locks::mutator_lock_); 47 48 static uint8_t ToJdwpThreadStatus(ThreadState state); 49 50 // Indicates whether we need to force the use of interpreter when returning from the 51 // interpreter into the runtime. This allows to deoptimize the stack and continue 52 // execution with interpreter for debugging. IsForcedInterpreterNeededForUpcall(Thread * thread,ArtMethod * m)53 static bool IsForcedInterpreterNeededForUpcall(Thread* thread, ArtMethod* m) 54 REQUIRES_SHARED(Locks::mutator_lock_) { 55 if (LIKELY(!thread->HasDebuggerShadowFrames())) { 56 return false; 57 } 58 // If we have debugger stack frames we always need to go back to interpreter unless we are 59 // native or a proxy. 60 return m != nullptr && !m->IsProxyMethod() && !m->IsNative(); 61 } 62 63 // Indicates whether we need to force the use of interpreter when handling an 64 // exception. This allows to deoptimize the stack and continue execution with 65 // the interpreter. 66 // Note: the interpreter will start by handling the exception when executing 67 // the deoptimized frames. IsForcedInterpreterNeededForException(Thread * thread)68 static bool IsForcedInterpreterNeededForException(Thread* thread) 69 REQUIRES_SHARED(Locks::mutator_lock_) { 70 // A quick check to avoid walking the stack. If there are no shadow frames or no method 71 // that needs to be deoptimized we can safely continue with optimized code. 72 if (LIKELY(!thread->HasDebuggerShadowFrames() && 73 Runtime::Current()->GetInstrumentation()->IsDeoptimizedMethodsEmpty())) { 74 return false; 75 } 76 return IsForcedInterpreterNeededForExceptionImpl(thread); 77 } 78 79 80 /* 81 * DDM support. 82 */ 83 static void DdmSendThreadNotification(Thread* t, uint32_t type) 84 REQUIRES_SHARED(Locks::mutator_lock_); 85 static void DdmSetThreadNotification(bool enable) 86 REQUIRES(!Locks::thread_list_lock_); 87 EXPORT static bool DdmHandleChunk(JNIEnv* env, 88 uint32_t type, 89 const ArrayRef<const jbyte>& data, 90 /*out*/ uint32_t* out_type, 91 /*out*/ std::vector<uint8_t>* out_data); 92 93 EXPORT static void DdmConnected() REQUIRES_SHARED(Locks::mutator_lock_); 94 EXPORT static void DdmDisconnected() REQUIRES_SHARED(Locks::mutator_lock_); 95 96 /* 97 * Allocation tracking support. 98 */ 99 static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_); 100 static jbyteArray GetRecentAllocations() 101 REQUIRES(!Locks::alloc_tracker_lock_) REQUIRES_SHARED(Locks::mutator_lock_); 102 static void DumpRecentAllocations() REQUIRES(!Locks::alloc_tracker_lock_); 103 104 enum HpifWhen { 105 HPIF_WHEN_NEVER = 0, 106 HPIF_WHEN_NOW = 1, 107 HPIF_WHEN_NEXT_GC = 2, 108 HPIF_WHEN_EVERY_GC = 3 109 }; 110 static int DdmHandleHpifChunk(HpifWhen when) 111 REQUIRES_SHARED(Locks::mutator_lock_); 112 113 enum HpsgWhen { 114 HPSG_WHEN_NEVER = 0, 115 HPSG_WHEN_EVERY_GC = 1, 116 }; 117 enum HpsgWhat { 118 HPSG_WHAT_MERGED_OBJECTS = 0, 119 HPSG_WHAT_DISTINCT_OBJECTS = 1, 120 }; 121 static bool DdmHandleHpsgNhsgChunk(HpsgWhen when, HpsgWhat what, bool native); 122 123 static void DdmSendHeapInfo(HpifWhen reason) 124 REQUIRES_SHARED(Locks::mutator_lock_); 125 static void DdmSendHeapSegments(bool native) 126 REQUIRES_SHARED(Locks::mutator_lock_); 127 GetThreadLifecycleCallback()128 static ThreadLifecycleCallback* GetThreadLifecycleCallback() { 129 return &thread_lifecycle_callback_; 130 } 131 132 private: 133 static void DdmBroadcast(bool connect) REQUIRES_SHARED(Locks::mutator_lock_); 134 135 static void PostThreadStart(Thread* t) 136 REQUIRES_SHARED(Locks::mutator_lock_); 137 static void PostThreadDeath(Thread* t) 138 REQUIRES_SHARED(Locks::mutator_lock_); 139 static void PostThreadStartOrStop(Thread*, uint32_t) 140 REQUIRES_SHARED(Locks::mutator_lock_); 141 142 static bool IsForcedInterpreterNeededForExceptionImpl(Thread* thread) 143 REQUIRES_SHARED(Locks::mutator_lock_); 144 145 class DbgThreadLifecycleCallback : public ThreadLifecycleCallback { 146 public: 147 void ThreadStart(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_); 148 void ThreadDeath(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_); 149 }; 150 151 static DbgThreadLifecycleCallback thread_lifecycle_callback_; 152 153 DISALLOW_COPY_AND_ASSIGN(Dbg); 154 }; 155 156 #define CHUNK_TYPE(_name) \ 157 static_cast<uint32_t>((_name)[0] << 24 | (_name)[1] << 16 | (_name)[2] << 8 | (_name)[3]) 158 159 } // namespace art 160 161 #endif // ART_RUNTIME_DEBUGGER_H_ 162