1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 /* 18 * Dalvik-specific side of debugger support. (The JDWP code is intended to 19 * be relatively generic.) 20 */ 21 #ifndef ART_RUNTIME_DEBUGGER_H_ 22 #define ART_RUNTIME_DEBUGGER_H_ 23 24 #include <pthread.h> 25 26 #include <set> 27 #include <string> 28 #include <vector> 29 30 #include "gc_root.h" 31 #include "jdwp/jdwp.h" 32 #include "jni.h" 33 #include "jvalue.h" 34 #include "thread.h" 35 #include "thread_state.h" 36 37 namespace art { 38 namespace mirror { 39 class Class; 40 class Object; 41 class Throwable; 42 } // namespace mirror 43 class ArtField; 44 class ArtMethod; 45 class ObjectRegistry; 46 class ScopedObjectAccess; 47 class ScopedObjectAccessUnchecked; 48 class StackVisitor; 49 class Thread; 50 51 /* 52 * Invoke-during-breakpoint support. 53 */ 54 struct DebugInvokeReq { DebugInvokeReqDebugInvokeReq55 DebugInvokeReq(uint32_t invoke_request_id, JDWP::ObjectId invoke_thread_id, 56 mirror::Object* invoke_receiver, mirror::Class* invoke_class, 57 ArtMethod* invoke_method, uint32_t invoke_options, 58 uint64_t args[], uint32_t args_count) 59 : request_id(invoke_request_id), thread_id(invoke_thread_id), receiver(invoke_receiver), 60 klass(invoke_class), method(invoke_method), arg_count(args_count), arg_values(args), 61 options(invoke_options), reply(JDWP::expandBufAlloc()) { 62 } 63 ~DebugInvokeReqDebugInvokeReq64 ~DebugInvokeReq() { 65 JDWP::expandBufFree(reply); 66 } 67 68 // Request 69 const uint32_t request_id; 70 const JDWP::ObjectId thread_id; 71 GcRoot<mirror::Object> receiver; // not used for ClassType.InvokeMethod. 72 GcRoot<mirror::Class> klass; 73 ArtMethod* const method; 74 const uint32_t arg_count; 75 std::unique_ptr<uint64_t[]> arg_values; // will be null if arg_count_ == 0. We take ownership 76 // of this array so we must delete it upon destruction. 77 const uint32_t options; 78 79 // Reply 80 JDWP::ExpandBuf* const reply; 81 82 void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) 83 SHARED_REQUIRES(Locks::mutator_lock_); 84 85 private: 86 DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq); 87 }; 88 89 // Thread local data-structure that holds fields for controlling single-stepping. 90 class SingleStepControl { 91 public: SingleStepControl(JDWP::JdwpStepSize step_size,JDWP::JdwpStepDepth step_depth,int stack_depth,ArtMethod * method)92 SingleStepControl(JDWP::JdwpStepSize step_size, JDWP::JdwpStepDepth step_depth, 93 int stack_depth, ArtMethod* method) 94 : step_size_(step_size), step_depth_(step_depth), 95 stack_depth_(stack_depth), method_(method) { 96 } 97 GetStepSize()98 JDWP::JdwpStepSize GetStepSize() const { 99 return step_size_; 100 } 101 GetStepDepth()102 JDWP::JdwpStepDepth GetStepDepth() const { 103 return step_depth_; 104 } 105 GetStackDepth()106 int GetStackDepth() const { 107 return stack_depth_; 108 } 109 GetMethod()110 ArtMethod* GetMethod() const { 111 return method_; 112 } 113 GetDexPcs()114 const std::set<uint32_t>& GetDexPcs() const { 115 return dex_pcs_; 116 } 117 118 void AddDexPc(uint32_t dex_pc); 119 120 bool ContainsDexPc(uint32_t dex_pc) const; 121 122 private: 123 // See JdwpStepSize and JdwpStepDepth for details. 124 const JDWP::JdwpStepSize step_size_; 125 const JDWP::JdwpStepDepth step_depth_; 126 127 // The stack depth when this single-step was initiated. This is used to support SD_OVER and SD_OUT 128 // single-step depth. 129 const int stack_depth_; 130 131 // The location this single-step was initiated from. 132 // A single-step is initiated in a suspended thread. We save here the current method and the 133 // set of DEX pcs associated to the source line number where the suspension occurred. 134 // This is used to support SD_INTO and SD_OVER single-step depths so we detect when a single-step 135 // causes the execution of an instruction in a different method or at a different line number. 136 ArtMethod* method_; 137 138 std::set<uint32_t> dex_pcs_; 139 140 DISALLOW_COPY_AND_ASSIGN(SingleStepControl); 141 }; 142 143 // TODO rename to InstrumentationRequest. 144 class DeoptimizationRequest { 145 public: 146 enum Kind { 147 kNothing, // no action. 148 kRegisterForEvent, // start listening for instrumentation event. 149 kUnregisterForEvent, // stop listening for instrumentation event. 150 kFullDeoptimization, // deoptimize everything. 151 kFullUndeoptimization, // undeoptimize everything. 152 kSelectiveDeoptimization, // deoptimize one method. 153 kSelectiveUndeoptimization // undeoptimize one method. 154 }; 155 DeoptimizationRequest()156 DeoptimizationRequest() : kind_(kNothing), instrumentation_event_(0), method_(nullptr) {} 157 158 DeoptimizationRequest(const DeoptimizationRequest& other) SHARED_REQUIRES(Locks::mutator_lock_)159 SHARED_REQUIRES(Locks::mutator_lock_) 160 : kind_(other.kind_), instrumentation_event_(other.instrumentation_event_) { 161 // Create a new JNI global reference for the method. 162 SetMethod(other.Method()); 163 } 164 165 ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_); 166 167 void SetMethod(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_); 168 169 // Name 'Kind()' would collide with the above enum name. GetKind()170 Kind GetKind() const { 171 return kind_; 172 } 173 SetKind(Kind kind)174 void SetKind(Kind kind) { 175 kind_ = kind; 176 } 177 InstrumentationEvent()178 uint32_t InstrumentationEvent() const { 179 return instrumentation_event_; 180 } 181 SetInstrumentationEvent(uint32_t instrumentation_event)182 void SetInstrumentationEvent(uint32_t instrumentation_event) { 183 instrumentation_event_ = instrumentation_event; 184 } 185 186 private: 187 Kind kind_; 188 189 // TODO we could use a union to hold the instrumentation_event and the method since they 190 // respectively have sense only for kRegisterForEvent/kUnregisterForEvent and 191 // kSelectiveDeoptimization/kSelectiveUndeoptimization. 192 193 // Event to start or stop listening to. Only for kRegisterForEvent and kUnregisterForEvent. 194 uint32_t instrumentation_event_; 195 196 // Method for selective deoptimization. 197 jmethodID method_; 198 }; 199 std::ostream& operator<<(std::ostream& os, const DeoptimizationRequest::Kind& rhs); 200 201 class Dbg { 202 public: 203 static void SetJdwpAllowed(bool allowed); 204 205 static void StartJdwp(); 206 static void StopJdwp(); 207 208 // Invoked by the GC in case we need to keep DDMS informed. 209 static void GcDidFinish() REQUIRES(!Locks::mutator_lock_); 210 211 // Return the DebugInvokeReq for the current thread. 212 static DebugInvokeReq* GetInvokeReq(); 213 214 static Thread* GetDebugThread(); 215 static void ClearWaitForEventThread(); 216 217 /* 218 * Enable/disable breakpoints and step modes. Used to provide a heads-up 219 * when the debugger attaches. 220 */ 221 static void Connected(); 222 static void GoActive() 223 REQUIRES(!Locks::breakpoint_lock_, !Locks::deoptimization_lock_, !Locks::mutator_lock_); 224 static void Disconnected() REQUIRES(!Locks::deoptimization_lock_, !Locks::mutator_lock_); Dispose()225 static void Dispose() { 226 gDisposed = true; 227 } 228 229 // Returns true if we're actually debugging with a real debugger, false if it's 230 // just DDMS (or nothing at all). IsDebuggerActive()231 static bool IsDebuggerActive() { 232 return gDebuggerActive; 233 } 234 235 // Configures JDWP with parsed command-line options. 236 static void ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options); 237 238 // Returns true if we had -Xrunjdwp or -agentlib:jdwp= on the command line. 239 static bool IsJdwpConfigured(); 240 241 // Returns true if a method has any breakpoints. 242 static bool MethodHasAnyBreakpoints(ArtMethod* method) 243 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::breakpoint_lock_); 244 IsDisposed()245 static bool IsDisposed() { 246 return gDisposed; 247 } 248 249 /* 250 * Time, in milliseconds, since the last debugger activity. Does not 251 * include DDMS activity. Returns -1 if there has been no activity. 252 * Returns 0 if we're in the middle of handling a debugger request. 253 */ 254 static int64_t LastDebuggerActivity(); 255 256 static void UndoDebuggerSuspensions() 257 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 258 259 /* 260 * Class, Object, Array 261 */ 262 static std::string GetClassName(JDWP::RefTypeId id) 263 SHARED_REQUIRES(Locks::mutator_lock_); 264 static std::string GetClassName(mirror::Class* klass) 265 SHARED_REQUIRES(Locks::mutator_lock_); 266 static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) 267 SHARED_REQUIRES(Locks::mutator_lock_); 268 static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) 269 SHARED_REQUIRES(Locks::mutator_lock_); 270 static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) 271 SHARED_REQUIRES(Locks::mutator_lock_); 272 static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) 273 SHARED_REQUIRES(Locks::mutator_lock_); 274 static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) 275 SHARED_REQUIRES(Locks::mutator_lock_); 276 static void GetClassList(std::vector<JDWP::RefTypeId>* classes) 277 SHARED_REQUIRES(Locks::mutator_lock_); 278 static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, 279 uint32_t* pStatus, std::string* pDescriptor) 280 SHARED_REQUIRES(Locks::mutator_lock_); 281 static void FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) 282 SHARED_REQUIRES(Locks::mutator_lock_); 283 static JDWP::JdwpError GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) 284 SHARED_REQUIRES(Locks::mutator_lock_); 285 static JDWP::JdwpError GetSignature(JDWP::RefTypeId ref_type_id, std::string* signature) 286 SHARED_REQUIRES(Locks::mutator_lock_); 287 static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId ref_type_id, std::string* source_file) 288 SHARED_REQUIRES(Locks::mutator_lock_); 289 static JDWP::JdwpError GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) 290 SHARED_REQUIRES(Locks::mutator_lock_); 291 static size_t GetTagWidth(JDWP::JdwpTag tag); 292 293 static JDWP::JdwpError GetArrayLength(JDWP::ObjectId array_id, int32_t* length) 294 SHARED_REQUIRES(Locks::mutator_lock_); 295 static JDWP::JdwpError OutputArray(JDWP::ObjectId array_id, int offset, int count, 296 JDWP::ExpandBuf* pReply) 297 SHARED_REQUIRES(Locks::mutator_lock_); 298 static JDWP::JdwpError SetArrayElements(JDWP::ObjectId array_id, int offset, int count, 299 JDWP::Request* request) 300 SHARED_REQUIRES(Locks::mutator_lock_); 301 302 static JDWP::JdwpError CreateString(const std::string& str, JDWP::ObjectId* new_string_id) 303 SHARED_REQUIRES(Locks::mutator_lock_); 304 static JDWP::JdwpError CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id) 305 SHARED_REQUIRES(Locks::mutator_lock_); 306 static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length, 307 JDWP::ObjectId* new_array_id) 308 SHARED_REQUIRES(Locks::mutator_lock_); 309 310 // 311 // Event filtering. 312 // 313 static bool MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) 314 SHARED_REQUIRES(Locks::mutator_lock_); 315 316 static bool MatchLocation(const JDWP::JdwpLocation& expected_location, 317 const JDWP::EventLocation& event_location) 318 SHARED_REQUIRES(Locks::mutator_lock_); 319 320 static bool MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) 321 SHARED_REQUIRES(Locks::mutator_lock_); 322 323 static bool MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id, 324 ArtField* event_field) 325 SHARED_REQUIRES(Locks::mutator_lock_); 326 327 static bool MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) 328 SHARED_REQUIRES(Locks::mutator_lock_); 329 330 // 331 // Monitors. 332 // 333 static JDWP::JdwpError GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) 334 SHARED_REQUIRES(Locks::mutator_lock_); 335 static JDWP::JdwpError GetOwnedMonitors(JDWP::ObjectId thread_id, 336 std::vector<JDWP::ObjectId>* monitors, 337 std::vector<uint32_t>* stack_depths) 338 REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 339 static JDWP::JdwpError GetContendedMonitor(JDWP::ObjectId thread_id, 340 JDWP::ObjectId* contended_monitor) 341 REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 342 343 // 344 // Heap. 345 // 346 static JDWP::JdwpError GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids, 347 std::vector<uint64_t>* counts) 348 SHARED_REQUIRES(Locks::mutator_lock_); 349 static JDWP::JdwpError GetInstances(JDWP::RefTypeId class_id, int32_t max_count, 350 std::vector<JDWP::ObjectId>* instances) 351 SHARED_REQUIRES(Locks::mutator_lock_); 352 static JDWP::JdwpError GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count, 353 std::vector<JDWP::ObjectId>* referring_objects) 354 SHARED_REQUIRES(Locks::mutator_lock_); 355 static JDWP::JdwpError DisableCollection(JDWP::ObjectId object_id) 356 SHARED_REQUIRES(Locks::mutator_lock_); 357 static JDWP::JdwpError EnableCollection(JDWP::ObjectId object_id) 358 SHARED_REQUIRES(Locks::mutator_lock_); 359 static JDWP::JdwpError IsCollected(JDWP::ObjectId object_id, bool* is_collected) 360 SHARED_REQUIRES(Locks::mutator_lock_); 361 static void DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) 362 SHARED_REQUIRES(Locks::mutator_lock_); 363 364 // 365 // Methods and fields. 366 // 367 static std::string GetMethodName(JDWP::MethodId method_id) 368 SHARED_REQUIRES(Locks::mutator_lock_); 369 static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId ref_type_id, bool with_generic, 370 JDWP::ExpandBuf* pReply) 371 SHARED_REQUIRES(Locks::mutator_lock_); 372 static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId ref_type_id, bool with_generic, 373 JDWP::ExpandBuf* pReply) 374 SHARED_REQUIRES(Locks::mutator_lock_); 375 static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId ref_type_id, 376 JDWP::ExpandBuf* pReply) 377 SHARED_REQUIRES(Locks::mutator_lock_); 378 static void OutputLineTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId method_id, 379 JDWP::ExpandBuf* pReply) 380 SHARED_REQUIRES(Locks::mutator_lock_); 381 static void OutputVariableTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId id, bool with_generic, 382 JDWP::ExpandBuf* pReply) 383 SHARED_REQUIRES(Locks::mutator_lock_); 384 static void OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value, 385 JDWP::ExpandBuf* pReply) 386 SHARED_REQUIRES(Locks::mutator_lock_); 387 static void OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value, 388 JDWP::ExpandBuf* pReply) 389 SHARED_REQUIRES(Locks::mutator_lock_); 390 static JDWP::JdwpError GetBytecodes(JDWP::RefTypeId class_id, JDWP::MethodId method_id, 391 std::vector<uint8_t>* bytecodes) 392 SHARED_REQUIRES(Locks::mutator_lock_); 393 394 static std::string GetFieldName(JDWP::FieldId field_id) 395 SHARED_REQUIRES(Locks::mutator_lock_); 396 static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId field_id) 397 SHARED_REQUIRES(Locks::mutator_lock_); 398 static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId field_id) 399 SHARED_REQUIRES(Locks::mutator_lock_); 400 static JDWP::JdwpError GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, 401 JDWP::ExpandBuf* pReply) 402 SHARED_REQUIRES(Locks::mutator_lock_); 403 static JDWP::JdwpError SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, 404 uint64_t value, int width) 405 SHARED_REQUIRES(Locks::mutator_lock_); 406 static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, 407 JDWP::ExpandBuf* pReply) 408 SHARED_REQUIRES(Locks::mutator_lock_); 409 static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) 410 SHARED_REQUIRES(Locks::mutator_lock_); 411 412 static JDWP::JdwpError StringToUtf8(JDWP::ObjectId string_id, std::string* str) 413 SHARED_REQUIRES(Locks::mutator_lock_); 414 static void OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) 415 SHARED_REQUIRES(Locks::mutator_lock_); 416 417 /* 418 * Thread, ThreadGroup, Frame 419 */ 420 static JDWP::JdwpError GetThreadName(JDWP::ObjectId thread_id, std::string* name) 421 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_); 422 static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) 423 SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_); 424 static JDWP::JdwpError GetThreadGroupName(JDWP::ObjectId thread_group_id, 425 JDWP::ExpandBuf* pReply) 426 SHARED_REQUIRES(Locks::mutator_lock_); 427 static JDWP::JdwpError GetThreadGroupParent(JDWP::ObjectId thread_group_id, 428 JDWP::ExpandBuf* pReply) 429 SHARED_REQUIRES(Locks::mutator_lock_); 430 static JDWP::JdwpError GetThreadGroupChildren(JDWP::ObjectId thread_group_id, 431 JDWP::ExpandBuf* pReply) 432 SHARED_REQUIRES(Locks::mutator_lock_); 433 static JDWP::ObjectId GetSystemThreadGroupId() 434 SHARED_REQUIRES(Locks::mutator_lock_); 435 436 static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state); 437 static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id, 438 JDWP::JdwpThreadStatus* pThreadStatus, 439 JDWP::JdwpSuspendStatus* pSuspendStatus) 440 REQUIRES(!Locks::thread_list_lock_); 441 static JDWP::JdwpError GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, 442 JDWP::ExpandBuf* pReply) 443 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 444 // static void WaitForSuspend(JDWP::ObjectId thread_id); 445 446 // Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0, 447 // returns all threads. 448 static void GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) 449 REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 450 451 static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) 452 REQUIRES(!Locks::thread_list_lock_); 453 static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame, 454 size_t frame_count, JDWP::ExpandBuf* buf) 455 REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 456 457 static JDWP::ObjectId GetThreadSelfId() SHARED_REQUIRES(Locks::mutator_lock_); 458 static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_); 459 460 static void SuspendVM() 461 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 462 static void ResumeVM() 463 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 464 static JDWP::JdwpError SuspendThread(JDWP::ObjectId thread_id, bool request_suspension = true) 465 REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, 466 !Locks::thread_suspend_count_lock_); 467 468 static void ResumeThread(JDWP::ObjectId thread_id) 469 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) 470 SHARED_REQUIRES(Locks::mutator_lock_); 471 static void SuspendSelf(); 472 473 static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, 474 JDWP::ObjectId* result) 475 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) 476 SHARED_REQUIRES(Locks::mutator_lock_); 477 static JDWP::JdwpError GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) 478 REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 479 static JDWP::JdwpError SetLocalValues(JDWP::Request* request) 480 REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 481 482 static JDWP::JdwpError Interrupt(JDWP::ObjectId thread_id) 483 REQUIRES(!Locks::thread_list_lock_); 484 485 /* 486 * Debugger notification 487 */ 488 enum EventFlag { 489 kBreakpoint = 0x01, 490 kSingleStep = 0x02, 491 kMethodEntry = 0x04, 492 kMethodExit = 0x08, 493 }; 494 static void PostFieldAccessEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object, 495 ArtField* f) 496 SHARED_REQUIRES(Locks::mutator_lock_); 497 static void PostFieldModificationEvent(ArtMethod* m, int dex_pc, 498 mirror::Object* this_object, ArtField* f, 499 const JValue* field_value) 500 SHARED_REQUIRES(Locks::mutator_lock_); 501 static void PostException(mirror::Throwable* exception) 502 SHARED_REQUIRES(Locks::mutator_lock_); 503 static void PostThreadStart(Thread* t) 504 SHARED_REQUIRES(Locks::mutator_lock_); 505 static void PostThreadDeath(Thread* t) 506 SHARED_REQUIRES(Locks::mutator_lock_); 507 static void PostClassPrepare(mirror::Class* c) 508 SHARED_REQUIRES(Locks::mutator_lock_); 509 510 static void UpdateDebugger(Thread* thread, mirror::Object* this_object, 511 ArtMethod* method, uint32_t new_dex_pc, 512 int event_flags, const JValue* return_value) 513 REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 514 515 // Indicates whether we need deoptimization for debugging. 516 static bool RequiresDeoptimization(); 517 518 // Records deoptimization request in the queue. 519 static void RequestDeoptimization(const DeoptimizationRequest& req) 520 REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 521 522 // Manage deoptimization after updating JDWP events list. Suspends all threads, processes each 523 // request and finally resumes all threads. 524 static void ManageDeoptimization() 525 REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 526 527 // Breakpoints. 528 static void WatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req) 529 REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 530 static void UnwatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req) 531 REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 532 533 /* 534 * Forced interpreter checkers for single-step and continue support. 535 */ 536 537 // Indicates whether we need to force the use of interpreter to invoke a method. 538 // This allows to single-step or continue into the called method. IsForcedInterpreterNeededForCalling(Thread * thread,ArtMethod * m)539 static bool IsForcedInterpreterNeededForCalling(Thread* thread, ArtMethod* m) 540 SHARED_REQUIRES(Locks::mutator_lock_) { 541 if (!IsDebuggerActive()) { 542 return false; 543 } 544 return IsForcedInterpreterNeededForCallingImpl(thread, m); 545 } 546 547 // Indicates whether we need to force the use of interpreter entrypoint when calling a 548 // method through the resolution trampoline. This allows to single-step or continue into 549 // the called method. IsForcedInterpreterNeededForResolution(Thread * thread,ArtMethod * m)550 static bool IsForcedInterpreterNeededForResolution(Thread* thread, ArtMethod* m) 551 SHARED_REQUIRES(Locks::mutator_lock_) { 552 if (!IsDebuggerActive()) { 553 return false; 554 } 555 return IsForcedInterpreterNeededForResolutionImpl(thread, m); 556 } 557 558 // Indicates whether we need to force the use of instrumentation entrypoint when calling 559 // a method through the resolution trampoline. This allows to deoptimize the stack for 560 // debugging when we returned from the called method. IsForcedInstrumentationNeededForResolution(Thread * thread,ArtMethod * m)561 static bool IsForcedInstrumentationNeededForResolution(Thread* thread, ArtMethod* m) 562 SHARED_REQUIRES(Locks::mutator_lock_) { 563 if (!IsDebuggerActive()) { 564 return false; 565 } 566 return IsForcedInstrumentationNeededForResolutionImpl(thread, m); 567 } 568 569 // Indicates whether we need to force the use of interpreter when returning from the 570 // interpreter into the runtime. This allows to deoptimize the stack and continue 571 // execution with interpreter for debugging. IsForcedInterpreterNeededForUpcall(Thread * thread,ArtMethod * m)572 static bool IsForcedInterpreterNeededForUpcall(Thread* thread, ArtMethod* m) 573 SHARED_REQUIRES(Locks::mutator_lock_) { 574 if (!IsDebuggerActive() && !thread->HasDebuggerShadowFrames()) { 575 return false; 576 } 577 return IsForcedInterpreterNeededForUpcallImpl(thread, m); 578 } 579 580 // Indicates whether we need to force the use of interpreter when handling an 581 // exception. This allows to deoptimize the stack and continue execution with 582 // the interpreter. 583 // Note: the interpreter will start by handling the exception when executing 584 // the deoptimized frames. IsForcedInterpreterNeededForException(Thread * thread)585 static bool IsForcedInterpreterNeededForException(Thread* thread) 586 SHARED_REQUIRES(Locks::mutator_lock_) { 587 if (!IsDebuggerActive() && !thread->HasDebuggerShadowFrames()) { 588 return false; 589 } 590 return IsForcedInterpreterNeededForExceptionImpl(thread); 591 } 592 593 // Single-stepping. 594 static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size, 595 JDWP::JdwpStepDepth depth) 596 SHARED_REQUIRES(Locks::mutator_lock_); 597 static void UnconfigureStep(JDWP::ObjectId thread_id) 598 REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 599 600 /* 601 * Invoke support 602 */ 603 604 // Called by the JDWP thread to prepare invocation in the event thread (suspended on an event). 605 // If the information sent by the debugger is incorrect, it will send a reply with the 606 // appropriate error code. Otherwise, it will attach a DebugInvokeReq object to the event thread 607 // and resume it (and possibly other threads depending on the invoke options). 608 // Unlike other commands, the JDWP thread will not send the reply to the debugger (see 609 // JdwpState::ProcessRequest). The reply will be sent by the event thread itself after method 610 // invocation completes (see FinishInvokeMethod). This is required to allow the JDWP thread to 611 // process incoming commands from the debugger while the invocation is still in progress in the 612 // event thread, especially if it gets suspended by a debug event occurring in another thread. 613 static JDWP::JdwpError PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id, 614 JDWP::ObjectId object_id, JDWP::RefTypeId class_id, 615 JDWP::MethodId method_id, uint32_t arg_count, 616 uint64_t arg_values[], JDWP::JdwpTag* arg_types, 617 uint32_t options) 618 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) 619 SHARED_REQUIRES(Locks::mutator_lock_); 620 621 // Called by the event thread to execute a method prepared by the JDWP thread in the given 622 // DebugInvokeReq object. Once the invocation completes, the event thread attaches a reply 623 // to that DebugInvokeReq object so it can be sent to the debugger only when the event thread 624 // is ready to suspend (see FinishInvokeMethod). 625 static void ExecuteMethod(DebugInvokeReq* pReq); 626 627 // Called by the event thread to send the reply of the invoke (created in ExecuteMethod) 628 // before suspending itself. This is to ensure the thread is ready to suspend before the 629 // debugger receives the reply. 630 static void FinishInvokeMethod(DebugInvokeReq* pReq); 631 632 /* 633 * DDM support. 634 */ 635 static void DdmSendThreadNotification(Thread* t, uint32_t type) 636 SHARED_REQUIRES(Locks::mutator_lock_); 637 static void DdmSetThreadNotification(bool enable) 638 REQUIRES(!Locks::thread_list_lock_); 639 static bool DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen); 640 static void DdmConnected() SHARED_REQUIRES(Locks::mutator_lock_); 641 static void DdmDisconnected() SHARED_REQUIRES(Locks::mutator_lock_); 642 static void DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) 643 SHARED_REQUIRES(Locks::mutator_lock_); 644 static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf) 645 SHARED_REQUIRES(Locks::mutator_lock_); 646 static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) 647 SHARED_REQUIRES(Locks::mutator_lock_); 648 649 // Visit breakpoint roots, used to prevent unloading of methods with breakpoints. 650 static void VisitRoots(RootVisitor* visitor) 651 SHARED_REQUIRES(Locks::mutator_lock_); 652 653 /* 654 * Allocation tracking support. 655 */ 656 static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_); 657 static jbyteArray GetRecentAllocations() 658 REQUIRES(!Locks::alloc_tracker_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 659 static void DumpRecentAllocations() REQUIRES(!Locks::alloc_tracker_lock_); 660 661 enum HpifWhen { 662 HPIF_WHEN_NEVER = 0, 663 HPIF_WHEN_NOW = 1, 664 HPIF_WHEN_NEXT_GC = 2, 665 HPIF_WHEN_EVERY_GC = 3 666 }; 667 static int DdmHandleHpifChunk(HpifWhen when) 668 SHARED_REQUIRES(Locks::mutator_lock_); 669 670 enum HpsgWhen { 671 HPSG_WHEN_NEVER = 0, 672 HPSG_WHEN_EVERY_GC = 1, 673 }; 674 enum HpsgWhat { 675 HPSG_WHAT_MERGED_OBJECTS = 0, 676 HPSG_WHAT_DISTINCT_OBJECTS = 1, 677 }; 678 static bool DdmHandleHpsgNhsgChunk(HpsgWhen when, HpsgWhat what, bool native); 679 680 static void DdmSendHeapInfo(HpifWhen reason) 681 SHARED_REQUIRES(Locks::mutator_lock_); 682 static void DdmSendHeapSegments(bool native) 683 SHARED_REQUIRES(Locks::mutator_lock_); 684 GetObjectRegistry()685 static ObjectRegistry* GetObjectRegistry() { 686 return gRegistry; 687 } 688 689 static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) 690 SHARED_REQUIRES(Locks::mutator_lock_); 691 692 static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass) 693 SHARED_REQUIRES(Locks::mutator_lock_); 694 695 static JDWP::FieldId ToFieldId(const ArtField* f) 696 SHARED_REQUIRES(Locks::mutator_lock_); 697 698 static void SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) 699 SHARED_REQUIRES(Locks::mutator_lock_) 700 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 701 702 static JDWP::JdwpState* GetJdwpState(); 703 GetInstrumentationEvents()704 static uint32_t GetInstrumentationEvents() SHARED_REQUIRES(Locks::mutator_lock_) { 705 return instrumentation_events_; 706 } 707 708 private: 709 static void ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) 710 SHARED_REQUIRES(Locks::mutator_lock_); 711 712 static void BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, 713 JDWP::JdwpTag result_tag, uint64_t result_value, 714 JDWP::ObjectId exception) 715 SHARED_REQUIRES(Locks::mutator_lock_); 716 717 static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor, 718 ScopedObjectAccessUnchecked& soa, int slot, 719 JDWP::JdwpTag tag, uint8_t* buf, size_t width) 720 REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 721 static JDWP::JdwpError SetLocalValue(Thread* thread, StackVisitor& visitor, int slot, 722 JDWP::JdwpTag tag, uint64_t value, size_t width) 723 REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 724 725 static void DdmBroadcast(bool connect) SHARED_REQUIRES(Locks::mutator_lock_); 726 static void PostThreadStartOrStop(Thread*, uint32_t) 727 SHARED_REQUIRES(Locks::mutator_lock_); 728 729 static void PostLocationEvent(ArtMethod* method, int pcOffset, 730 mirror::Object* thisPtr, int eventFlags, 731 const JValue* return_value) 732 SHARED_REQUIRES(Locks::mutator_lock_); 733 734 static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request) 735 REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_); 736 737 static void RequestDeoptimizationLocked(const DeoptimizationRequest& req) 738 REQUIRES(Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_); 739 740 static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) 741 SHARED_REQUIRES(Locks::mutator_lock_); 742 743 static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) 744 SHARED_REQUIRES(Locks::mutator_lock_); 745 746 static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) 747 SHARED_REQUIRES(Locks::mutator_lock_); 748 749 static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) 750 SHARED_REQUIRES(Locks::mutator_lock_); 751 752 static bool IsForcedInterpreterNeededForExceptionImpl(Thread* thread) 753 SHARED_REQUIRES(Locks::mutator_lock_); 754 755 // Indicates whether the debugger is making requests. 756 static bool gDebuggerActive; 757 758 // Indicates whether we should drop the JDWP connection because the runtime stops or the 759 // debugger called VirtualMachine.Dispose. 760 static bool gDisposed; 761 762 // The registry mapping objects to JDWP ids. 763 static ObjectRegistry* gRegistry; 764 765 // Deoptimization requests to be processed each time the event list is updated. This is used when 766 // registering and unregistering events so we do not deoptimize while holding the event list 767 // lock. 768 // TODO rename to instrumentation_requests. 769 static std::vector<DeoptimizationRequest> deoptimization_requests_ GUARDED_BY(Locks::deoptimization_lock_); 770 771 // Count the number of events requiring full deoptimization. When the counter is > 0, everything 772 // is deoptimized, otherwise everything is undeoptimized. 773 // Note: we fully deoptimize on the first event only (when the counter is set to 1). We fully 774 // undeoptimize when the last event is unregistered (when the counter is set to 0). 775 static size_t full_deoptimization_event_count_ GUARDED_BY(Locks::deoptimization_lock_); 776 777 static size_t* GetReferenceCounterForEvent(uint32_t instrumentation_event); 778 779 // Instrumentation event reference counters. 780 // TODO we could use an array instead of having all these dedicated counters. Instrumentation 781 // events are bits of a mask so we could convert them to array index. 782 static size_t dex_pc_change_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_); 783 static size_t method_enter_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_); 784 static size_t method_exit_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_); 785 static size_t field_read_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_); 786 static size_t field_write_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_); 787 static size_t exception_catch_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_); 788 static uint32_t instrumentation_events_ GUARDED_BY(Locks::mutator_lock_); 789 790 DISALLOW_COPY_AND_ASSIGN(Dbg); 791 }; 792 793 #define CHUNK_TYPE(_name) \ 794 static_cast<uint32_t>((_name)[0] << 24 | (_name)[1] << 16 | (_name)[2] << 8 | (_name)[3]) 795 796 } // namespace art 797 798 #endif // ART_RUNTIME_DEBUGGER_H_ 799