• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_INSTRUMENTATION_H_
18 #define ART_RUNTIME_INSTRUMENTATION_H_
19 
20 #include <stdint.h>
21 
22 #include <functional>
23 #include <list>
24 #include <memory>
25 #include <optional>
26 #include <queue>
27 #include <unordered_set>
28 
29 #include "arch/instruction_set.h"
30 #include "base/enums.h"
31 #include "base/locks.h"
32 #include "base/macros.h"
33 #include "base/safe_map.h"
34 #include "gc_root.h"
35 #include "jvalue.h"
36 #include "offsets.h"
37 
38 namespace art {
39 namespace mirror {
40 class Class;
41 class Object;
42 class Throwable;
43 }  // namespace mirror
44 class ArtField;
45 class ArtMethod;
46 template <typename T> class Handle;
47 template <typename T> class MutableHandle;
48 struct NthCallerVisitor;
49 union JValue;
50 class OatQuickMethodHeader;
51 class SHARED_LOCKABLE ReaderWriterMutex;
52 class ShadowFrame;
53 class Thread;
54 enum class DeoptimizationMethodType;
55 
56 namespace instrumentation {
57 
58 
59 // Do we want to deoptimize for method entry and exit listeners or just try to intercept
60 // invocations? Deoptimization forces all code to run in the interpreter and considerably hurts the
61 // application's performance.
62 static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = true;
63 
64 // an optional frame is either Some(const ShadowFrame& current_frame) or None depending on if the
65 // method being exited has a shadow-frame associed with the current stack frame. In cases where
66 // there is no shadow-frame associated with this stack frame this will be None.
67 using OptionalFrame = std::optional<std::reference_wrapper<const ShadowFrame>>;
68 
69 // Instrumentation event listener API. Registered listeners will get the appropriate call back for
70 // the events they are listening for. The call backs supply the thread, method and dex_pc the event
71 // occurred upon. The thread may or may not be Thread::Current().
72 struct InstrumentationListener {
InstrumentationListenerInstrumentationListener73   InstrumentationListener() {}
~InstrumentationListenerInstrumentationListener74   virtual ~InstrumentationListener() {}
75 
76   // Call-back for when a method is entered.
77   virtual void MethodEntered(Thread* thread, ArtMethod* method)
78       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
79 
80   virtual void MethodExited(Thread* thread,
81                             ArtMethod* method,
82                             OptionalFrame frame,
83                             MutableHandle<mirror::Object>& return_value)
84       REQUIRES_SHARED(Locks::mutator_lock_);
85 
86   // Call-back for when a method is exited. The implementor should either handler-ize the return
87   // value (if appropriate) or use the alternate MethodExited callback instead if they need to
88   // go through a suspend point.
89   virtual void MethodExited(Thread* thread,
90                             ArtMethod* method,
91                             OptionalFrame frame,
92                             JValue& return_value)
93       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
94 
95   // Call-back for when a method is popped due to an exception throw. A method will either cause a
96   // MethodExited call-back or a MethodUnwind call-back when its activation is removed.
97   virtual void MethodUnwind(Thread* thread,
98                             ArtMethod* method,
99                             uint32_t dex_pc)
100       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
101 
102   // Call-back for when the dex pc moves in a method.
103   virtual void DexPcMoved(Thread* thread,
104                           Handle<mirror::Object> this_object,
105                           ArtMethod* method,
106                           uint32_t new_dex_pc)
107       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
108 
109   // Call-back for when we read from a field.
110   virtual void FieldRead(Thread* thread,
111                          Handle<mirror::Object> this_object,
112                          ArtMethod* method,
113                          uint32_t dex_pc,
114                          ArtField* field) = 0;
115 
116   virtual void FieldWritten(Thread* thread,
117                             Handle<mirror::Object> this_object,
118                             ArtMethod* method,
119                             uint32_t dex_pc,
120                             ArtField* field,
121                             Handle<mirror::Object> field_value)
122       REQUIRES_SHARED(Locks::mutator_lock_);
123 
124   // Call-back for when we write into a field.
125   virtual void FieldWritten(Thread* thread,
126                             Handle<mirror::Object> this_object,
127                             ArtMethod* method,
128                             uint32_t dex_pc,
129                             ArtField* field,
130                             const JValue& field_value)
131       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
132 
133   // Call-back when an exception is thrown.
134   virtual void ExceptionThrown(Thread* thread,
135                                Handle<mirror::Throwable> exception_object)
136       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
137 
138   // Call-back when an exception is caught/handled by java code.
139   virtual void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
140       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
141 
142   // Call-back for when we execute a branch.
143   virtual void Branch(Thread* thread,
144                       ArtMethod* method,
145                       uint32_t dex_pc,
146                       int32_t dex_pc_offset)
147       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
148 
149   // Call-back when a shadow_frame with the needs_notify_pop_ boolean set is popped off the stack by
150   // either return or exceptions. Normally instrumentation listeners should ensure that there are
151   // shadow-frames by deoptimizing stacks.
152   virtual void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED,
153                                const ShadowFrame& frame ATTRIBUTE_UNUSED)
154       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
155 };
156 
157 class Instrumentation;
158 // A helper to send instrumentation events while popping the stack in a safe way.
159 class InstrumentationStackPopper {
160  public:
161   explicit InstrumentationStackPopper(Thread* self);
162   ~InstrumentationStackPopper() REQUIRES_SHARED(Locks::mutator_lock_);
163 
164   // Increase the number of frames being popped up to `stack_pointer`. Return true if the
165   // frames were popped without any exceptions, false otherwise. The exception that caused
166   // the pop is 'exception'.
167   bool PopFramesTo(uintptr_t stack_pointer, /*in-out*/MutableHandle<mirror::Throwable>& exception)
168       REQUIRES_SHARED(Locks::mutator_lock_);
169 
170  private:
171   Thread* self_;
172   Instrumentation* instrumentation_;
173   // The stack pointer limit for frames to pop.
174   uintptr_t pop_until_;
175 };
176 
177 // Instrumentation is a catch-all for when extra information is required from the runtime. The
178 // typical use for instrumentation is for profiling and debugging. Instrumentation may add stubs
179 // to method entry and exit, it may also force execution to be switched to the interpreter and
180 // trigger deoptimization.
181 class Instrumentation {
182  public:
183   enum InstrumentationEvent {
184     kMethodEntered = 0x1,
185     kMethodExited = 0x2,
186     kMethodUnwind = 0x4,
187     kDexPcMoved = 0x8,
188     kFieldRead = 0x10,
189     kFieldWritten = 0x20,
190     kExceptionThrown = 0x40,
191     kBranch = 0x80,
192     kWatchedFramePop = 0x200,
193     kExceptionHandled = 0x400,
194   };
195 
196   enum class InstrumentationLevel {
197     kInstrumentNothing,             // execute without instrumentation
198     kInstrumentWithEntryExitHooks,  // execute with entry/exit hooks
199     kInstrumentWithInterpreter      // execute with interpreter
200   };
201 
202   Instrumentation();
203 
RunExitHooksOffset()204   static constexpr MemberOffset RunExitHooksOffset() {
205     // Assert that run_entry_exit_hooks_ is 8bits wide. If the size changes
206     // update the compare instructions in the code generator when generating checks for
207     // MethodEntryExitHooks.
208     static_assert(sizeof(run_exit_hooks_) == 1, "run_exit_hooks_ isn't expected size");
209     return MemberOffset(OFFSETOF_MEMBER(Instrumentation, run_exit_hooks_));
210   }
211 
HaveMethodEntryListenersOffset()212   static constexpr MemberOffset HaveMethodEntryListenersOffset() {
213     // Assert that have_method_entry_listeners_ is 8bits wide. If the size changes
214     // update the compare instructions in the code generator when generating checks for
215     // MethodEntryExitHooks.
216     static_assert(sizeof(have_method_entry_listeners_) == 1,
217                   "have_method_entry_listeners_ isn't expected size");
218     return MemberOffset(OFFSETOF_MEMBER(Instrumentation, have_method_entry_listeners_));
219   }
220 
HaveMethodExitListenersOffset()221   static constexpr MemberOffset HaveMethodExitListenersOffset() {
222     // Assert that have_method_exit_listeners_ is 8bits wide. If the size changes
223     // update the compare instructions in the code generator when generating checks for
224     // MethodEntryExitHooks.
225     static_assert(sizeof(have_method_exit_listeners_) == 1,
226                   "have_method_exit_listeners_ isn't expected size");
227     return MemberOffset(OFFSETOF_MEMBER(Instrumentation, have_method_exit_listeners_));
228   }
229 
230   // Add a listener to be notified of the masked together sent of instrumentation events. This
231   // suspend the runtime to install stubs. You are expected to hold the mutator lock as a proxy
232   // for saying you should have suspended all threads (installing stubs while threads are running
233   // will break).
234   void AddListener(InstrumentationListener* listener, uint32_t events)
235       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
236 
237   // Removes listeners for the specified events.
238   void RemoveListener(InstrumentationListener* listener, uint32_t events)
239       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
240 
241   // Calls UndeoptimizeEverything which may visit class linker classes through ConfigureStubs.
242   void DisableDeoptimization(const char* key)
243       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
244 
245   // Enables entry exit hooks support. This is called in preparation for debug requests that require
246   // calling method entry / exit hooks.
247   void EnableEntryExitHooks(const char* key)
248       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
249 
250   // Switches the runtime state to non-java debuggable if entry / exit hooks are no longer required
251   // and the runtime did not start off as java debuggable.
252   void MaybeSwitchRuntimeDebugState(Thread* self)
253       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
254 
AreAllMethodsDeoptimized()255   bool AreAllMethodsDeoptimized() const {
256     return InterpreterStubsInstalled();
257   }
258   bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
259 
260   // Executes everything with interpreter.
261   void DeoptimizeEverything(const char* key)
262       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
263       REQUIRES(!Locks::thread_list_lock_,
264                !Locks::classlinker_classes_lock_);
265 
266   // Executes everything with compiled code (or interpreter if there is no code). May visit class
267   // linker classes through ConfigureStubs.
268   void UndeoptimizeEverything(const char* key)
269       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
270       REQUIRES(!Locks::thread_list_lock_,
271                !Locks::classlinker_classes_lock_);
272 
273   // Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static
274   // method (except a class initializer) set to the resolution trampoline will be deoptimized only
275   // once its declaring class is initialized.
276   void Deoptimize(ArtMethod* method) REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_);
277 
278   // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
279   // (except a class initializer) set to the resolution trampoline will be updated only once its
280   // declaring class is initialized.
281   void Undeoptimize(ArtMethod* method) REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_);
282 
283   // Indicates whether the method has been deoptimized so it is executed with the interpreter.
284   bool IsDeoptimized(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
285 
286   // Indicates if any method needs to be deoptimized. This is used to avoid walking the stack to
287   // determine if a deoptimization is required.
288   bool IsDeoptimizedMethodsEmpty() const REQUIRES_SHARED(Locks::mutator_lock_);
289 
290   // Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
291   void EnableMethodTracing(const char* key,
292                            InstrumentationListener* listener,
293                            bool needs_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners)
294       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
295           REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
296 
297   // Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter.
298   void DisableMethodTracing(const char* key)
299       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
300       REQUIRES(!Locks::thread_list_lock_,
301                !Locks::classlinker_classes_lock_);
302 
303 
304   void InstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
305   void UninstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
306   void InstrumentQuickAllocEntryPointsLocked()
307       REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
308                !Locks::runtime_shutdown_lock_);
309   void UninstrumentQuickAllocEntryPointsLocked()
310       REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
311                !Locks::runtime_shutdown_lock_);
312   void ResetQuickAllocEntryPoints() REQUIRES(Locks::runtime_shutdown_lock_);
313 
314   // Returns a string representation of the given entry point.
315   static std::string EntryPointString(const void* code);
316 
317   // Initialize the entrypoint of the method .`aot_code` is the AOT code.
318   void InitializeMethodsCode(ArtMethod* method, const void* aot_code)
319       REQUIRES_SHARED(Locks::mutator_lock_);
320 
321   // Update the code of a method respecting any installed stubs.
322   void UpdateMethodsCode(ArtMethod* method, const void* new_code)
323       REQUIRES_SHARED(Locks::mutator_lock_);
324 
325   // Update the code of a native method to a JITed stub.
326   void UpdateNativeMethodsCodeToJitCode(ArtMethod* method, const void* new_code)
327       REQUIRES_SHARED(Locks::mutator_lock_);
328 
329   // Return the code that we can execute for an invoke including from the JIT.
330   const void* GetCodeForInvoke(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
331 
332   // Return the code that we can execute considering the current instrumentation level.
333   // If interpreter stubs are installed return interpreter bridge. If the entry exit stubs
334   // are installed return an instrumentation entry point. Otherwise, return the code that
335   // can be executed including from the JIT.
336   const void* GetMaybeInstrumentedCodeForInvoke(ArtMethod* method)
337       REQUIRES_SHARED(Locks::mutator_lock_);
338 
ForceInterpretOnly()339   void ForceInterpretOnly() {
340     forced_interpret_only_ = true;
341   }
342 
EntryExitStubsInstalled()343   bool EntryExitStubsInstalled() const {
344     return instrumentation_level_ == InstrumentationLevel::kInstrumentWithEntryExitHooks ||
345            instrumentation_level_ == InstrumentationLevel::kInstrumentWithInterpreter;
346   }
347 
InterpreterStubsInstalled()348   bool InterpreterStubsInstalled() const {
349     return instrumentation_level_ == InstrumentationLevel::kInstrumentWithInterpreter;
350   }
351 
352   // Called by ArtMethod::Invoke to determine dispatch mechanism.
InterpretOnly()353   bool InterpretOnly() const {
354     return forced_interpret_only_ || InterpreterStubsInstalled();
355   }
356   bool InterpretOnly(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
357 
IsForcedInterpretOnly()358   bool IsForcedInterpretOnly() const {
359     return forced_interpret_only_;
360   }
361 
RunExitHooks()362   bool RunExitHooks() const {
363     return run_exit_hooks_;
364   }
365 
HasMethodEntryListeners()366   bool HasMethodEntryListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
367     return have_method_entry_listeners_;
368   }
369 
HasMethodExitListeners()370   bool HasMethodExitListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
371     return have_method_exit_listeners_;
372   }
373 
HasMethodUnwindListeners()374   bool HasMethodUnwindListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
375     return have_method_unwind_listeners_;
376   }
377 
HasDexPcListeners()378   bool HasDexPcListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
379     return have_dex_pc_listeners_;
380   }
381 
HasFieldReadListeners()382   bool HasFieldReadListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
383     return have_field_read_listeners_;
384   }
385 
HasFieldWriteListeners()386   bool HasFieldWriteListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
387     return have_field_write_listeners_;
388   }
389 
HasExceptionThrownListeners()390   bool HasExceptionThrownListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
391     return have_exception_thrown_listeners_;
392   }
393 
HasBranchListeners()394   bool HasBranchListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
395     return have_branch_listeners_;
396   }
397 
HasWatchedFramePopListeners()398   bool HasWatchedFramePopListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
399     return have_watched_frame_pop_listeners_;
400   }
401 
HasExceptionHandledListeners()402   bool HasExceptionHandledListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
403     return have_exception_handled_listeners_;
404   }
405 
406   // Returns if dex pc events need to be reported for the specified method.
407   // These events are reported when DexPCListeners are installed and at least one of the
408   // following conditions hold:
409   // 1. The method is deoptimized. This is done when there is a breakpoint on method.
410   // 2. When the thread is deoptimized. This is used when single stepping a single thread.
411   // 3. When interpreter stubs are installed. In this case no additional information is maintained
412   //    about which methods need dex pc move events. This is usually used for features which need
413   //    them for several methods across threads or need expensive processing. So it is OK to not
414   //    further optimize this case.
415   // DexPCListeners are installed when there is a breakpoint on any method / single stepping
416   // on any of thread. These are removed when the last breakpoint was removed. See AddListener and
417   // RemoveListener for more details.
418   bool NeedsDexPcEvents(ArtMethod* method, Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
419 
NeedsSlowInterpreterForListeners()420   bool NeedsSlowInterpreterForListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
421     return have_field_read_listeners_ ||
422            have_field_write_listeners_ ||
423            have_watched_frame_pop_listeners_ ||
424            have_exception_handled_listeners_;
425   }
426 
427   // Inform listeners that a method has been entered. A dex PC is provided as we may install
428   // listeners into executing code and get method enter events for methods already on the stack.
MethodEnterEvent(Thread * thread,ArtMethod * method)429   void MethodEnterEvent(Thread* thread, ArtMethod* method) const
430       REQUIRES_SHARED(Locks::mutator_lock_) {
431     if (UNLIKELY(HasMethodEntryListeners())) {
432       MethodEnterEventImpl(thread, method);
433     }
434   }
435 
436   // Inform listeners that a method has been exited.
437   template<typename T>
MethodExitEvent(Thread * thread,ArtMethod * method,OptionalFrame frame,T & return_value)438   void MethodExitEvent(Thread* thread,
439                        ArtMethod* method,
440                        OptionalFrame frame,
441                        T& return_value) const
442       REQUIRES_SHARED(Locks::mutator_lock_) {
443     if (UNLIKELY(HasMethodExitListeners())) {
444       MethodExitEventImpl(thread, method, frame, return_value);
445     }
446   }
447 
448   // Inform listeners that a method has been exited due to an exception.
449   void MethodUnwindEvent(Thread* thread,
450                          ArtMethod* method,
451                          uint32_t dex_pc) const
452       REQUIRES_SHARED(Locks::mutator_lock_);
453 
454   // Inform listeners that the dex pc has moved (only supported by the interpreter).
DexPcMovedEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc)455   void DexPcMovedEvent(Thread* thread,
456                        ObjPtr<mirror::Object> this_object,
457                        ArtMethod* method,
458                        uint32_t dex_pc) const
459       REQUIRES_SHARED(Locks::mutator_lock_) {
460     if (UNLIKELY(HasDexPcListeners())) {
461       DexPcMovedEventImpl(thread, this_object, method, dex_pc);
462     }
463   }
464 
465   // Inform listeners that a branch has been taken (only supported by the interpreter).
Branch(Thread * thread,ArtMethod * method,uint32_t dex_pc,int32_t offset)466   void Branch(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
467       REQUIRES_SHARED(Locks::mutator_lock_) {
468     if (UNLIKELY(HasBranchListeners())) {
469       BranchImpl(thread, method, dex_pc, offset);
470     }
471   }
472 
473   // Inform listeners that we read a field (only supported by the interpreter).
FieldReadEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field)474   void FieldReadEvent(Thread* thread,
475                       ObjPtr<mirror::Object> this_object,
476                       ArtMethod* method,
477                       uint32_t dex_pc,
478                       ArtField* field) const
479       REQUIRES_SHARED(Locks::mutator_lock_) {
480     if (UNLIKELY(HasFieldReadListeners())) {
481       FieldReadEventImpl(thread, this_object, method, dex_pc, field);
482     }
483   }
484 
485   // Inform listeners that we write a field (only supported by the interpreter).
FieldWriteEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field,const JValue & field_value)486   void FieldWriteEvent(Thread* thread,
487                        ObjPtr<mirror::Object> this_object,
488                        ArtMethod* method,
489                        uint32_t dex_pc,
490                        ArtField* field,
491                        const JValue& field_value) const
492       REQUIRES_SHARED(Locks::mutator_lock_) {
493     if (UNLIKELY(HasFieldWriteListeners())) {
494       FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value);
495     }
496   }
497 
498   // Inform listeners that a branch has been taken (only supported by the interpreter).
WatchedFramePopped(Thread * thread,const ShadowFrame & frame)499   void WatchedFramePopped(Thread* thread, const ShadowFrame& frame) const
500       REQUIRES_SHARED(Locks::mutator_lock_) {
501     if (UNLIKELY(HasWatchedFramePopListeners())) {
502       WatchedFramePopImpl(thread, frame);
503     }
504   }
505 
506   // Inform listeners that an exception was thrown.
507   void ExceptionThrownEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const
508       REQUIRES_SHARED(Locks::mutator_lock_);
509 
510   // Inform listeners that an exception has been handled. This is not sent for native code or for
511   // exceptions which reach the end of the thread's stack.
512   void ExceptionHandledEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const
513       REQUIRES_SHARED(Locks::mutator_lock_);
514 
515   JValue GetReturnValue(ArtMethod* method, bool* is_ref, uint64_t* gpr_result, uint64_t* fpr_result)
516       REQUIRES_SHARED(Locks::mutator_lock_);
517   bool PushDeoptContextIfNeeded(Thread* self,
518                                 DeoptimizationMethodType deopt_type,
519                                 bool is_ref,
520                                 const JValue& result) REQUIRES_SHARED(Locks::mutator_lock_);
521   void DeoptimizeIfNeeded(Thread* self,
522                           ArtMethod** sp,
523                           DeoptimizationMethodType type,
524                           JValue result,
525                           bool is_ref) REQUIRES_SHARED(Locks::mutator_lock_);
526   // This returns if the caller of runtime method requires a deoptimization. This checks both if the
527   // method requires a deopt or if this particular frame needs a deopt because of a class
528   // redefinition.
529   bool ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_);
530   bool ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp, size_t frame_size)
531       REQUIRES_SHARED(Locks::mutator_lock_);
532   // This returns if the specified method requires a deoptimization. This doesn't account if a stack
533   // frame involving this method requires a deoptimization.
534   bool NeedsSlowInterpreterForMethod(Thread* self, ArtMethod* method)
535       REQUIRES_SHARED(Locks::mutator_lock_);
536 
537   DeoptimizationMethodType GetDeoptimizationMethodType(ArtMethod* method)
538       REQUIRES_SHARED(Locks::mutator_lock_);
539 
540   // Call back for configure stubs.
541   void InstallStubsForClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
542 
543   void InstallStubsForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
544 
545   void UpdateEntrypointsForDebuggable() REQUIRES(art::Locks::mutator_lock_);
546 
547   // Install instrumentation exit stub on every method of the stack of the given thread.
548   // This is used by:
549   //  - the debugger to cause a deoptimization of the all frames in thread's stack (for
550   //    example, after updating local variables)
551   //  - to call method entry / exit hooks for tracing. For this we instrument
552   //    the stack frame to run entry / exit hooks but we don't need to deoptimize.
553   // deopt_all_frames indicates whether the frames need to deoptimize or not.
554   void InstrumentThreadStack(Thread* thread, bool deopt_all_frames) REQUIRES(Locks::mutator_lock_);
555   void InstrumentAllThreadStacks(bool deopt_all_frames) REQUIRES(Locks::mutator_lock_)
556       REQUIRES(!Locks::thread_list_lock_);
557 
558   // Force all currently running frames to be deoptimized back to interpreter. This should only be
559   // used in cases where basically all compiled code has been invalidated.
560   void DeoptimizeAllThreadFrames() REQUIRES(art::Locks::mutator_lock_);
561 
562   static size_t ComputeFrameId(Thread* self,
563                                size_t frame_depth,
564                                size_t inlined_frames_before_frame)
565       REQUIRES_SHARED(Locks::mutator_lock_);
566 
567   // Does not hold lock, used to check if someone changed from not instrumented to instrumented
568   // during a GC suspend point.
AllocEntrypointsInstrumented()569   bool AllocEntrypointsInstrumented() const REQUIRES_SHARED(Locks::mutator_lock_) {
570     return alloc_entrypoints_instrumented_;
571   }
572 
573   bool ProcessMethodUnwindCallbacks(Thread* self,
574                                     std::queue<ArtMethod*>& methods,
575                                     MutableHandle<mirror::Throwable>& exception)
576       REQUIRES_SHARED(Locks::mutator_lock_);
577 
578   InstrumentationLevel GetCurrentInstrumentationLevel() const;
579 
580   bool MethodSupportsExitEvents(ArtMethod* method, const OatQuickMethodHeader* header)
581       REQUIRES_SHARED(Locks::mutator_lock_);
582 
583  private:
584   // Returns true if moving to the given instrumentation level requires the installation of stubs.
585   // False otherwise.
586   bool RequiresInstrumentationInstallation(InstrumentationLevel new_level) const;
587 
588   // Update the current instrumentation_level_.
589   void UpdateInstrumentationLevel(InstrumentationLevel level);
590 
591   // Does the job of installing or removing instrumentation code within methods.
592   // In order to support multiple clients using instrumentation at the same time,
593   // the caller must pass a unique key (a string) identifying it so we remind which
594   // instrumentation level it needs. Therefore the current instrumentation level
595   // becomes the highest instrumentation level required by a client.
596   void ConfigureStubs(const char* key, InstrumentationLevel desired_instrumentation_level)
597       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
598       REQUIRES(!Locks::thread_list_lock_,
599                !Locks::classlinker_classes_lock_);
600   void UpdateStubs() REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
601       REQUIRES(!Locks::thread_list_lock_,
602                !Locks::classlinker_classes_lock_);
603 
604   // If there are no pending deoptimizations restores the stack to the normal state by updating the
605   // return pcs to actual return addresses from the instrumentation stack and clears the
606   // instrumentation stack.
607   void MaybeRestoreInstrumentationStack() REQUIRES(Locks::mutator_lock_);
608 
609   // No thread safety analysis to get around SetQuickAllocEntryPointsInstrumented requiring
610   // exclusive access to mutator lock which you can't get if the runtime isn't started.
611   void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS;
612 
613   void MethodEnterEventImpl(Thread* thread, ArtMethod* method) const
614       REQUIRES_SHARED(Locks::mutator_lock_);
615   template <typename T>
616   void MethodExitEventImpl(Thread* thread,
617                            ArtMethod* method,
618                            OptionalFrame frame,
619                            T& return_value) const
620       REQUIRES_SHARED(Locks::mutator_lock_);
621   void DexPcMovedEventImpl(Thread* thread,
622                            ObjPtr<mirror::Object> this_object,
623                            ArtMethod* method,
624                            uint32_t dex_pc) const
625       REQUIRES_SHARED(Locks::mutator_lock_);
626   void BranchImpl(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
627       REQUIRES_SHARED(Locks::mutator_lock_);
628   void WatchedFramePopImpl(Thread* thread, const ShadowFrame& frame) const
629       REQUIRES_SHARED(Locks::mutator_lock_);
630   void FieldReadEventImpl(Thread* thread,
631                           ObjPtr<mirror::Object> this_object,
632                           ArtMethod* method,
633                           uint32_t dex_pc,
634                           ArtField* field) const
635       REQUIRES_SHARED(Locks::mutator_lock_);
636   void FieldWriteEventImpl(Thread* thread,
637                            ObjPtr<mirror::Object> this_object,
638                            ArtMethod* method,
639                            uint32_t dex_pc,
640                            ArtField* field,
641                            const JValue& field_value) const
642       REQUIRES_SHARED(Locks::mutator_lock_);
643 
644   // Read barrier-aware utility functions for accessing deoptimized_methods_
645   bool AddDeoptimizedMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
646   bool IsDeoptimizedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
647   bool RemoveDeoptimizedMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
648   void UpdateMethodsCodeImpl(ArtMethod* method, const void* new_code)
649       REQUIRES_SHARED(Locks::mutator_lock_);
650 
651   // We need to run method exit hooks for two reasons:
652   // 1. When method exit listeners are installed
653   // 2. When we need to check if the caller of this method needs a deoptimization. This is needed
654   // only for deoptimizing the currently active invocations on stack when we deoptimize a method or
655   // invalidate the JITed code when redefining the classes. So future invocations don't need to do
656   // this check.
657   //
658   // For JITed code of non-native methods we already have a stack slot reserved for deoptimizing
659   // on demand and we use that stack slot to check if the caller needs a deoptimization. JITed code
660   // checks if there are any method exit listeners or if the stack slot is set to determine if
661   // method exit hooks need to be executed.
662   //
663   // For JITed JNI stubs there is no reserved stack slot for this and we just use this variable to
664   // check if we need to run method entry / exit hooks. This variable would be set when either of
665   // the above conditions are true. If we need method exit hooks only for case 2, we would call exit
666   // hooks for any future invocations which aren't necessary.
667   // QuickToInterpreterBridge and GenericJniStub also use this for same reasons.
668   // If calling entry / exit hooks becomes expensive we could do the same optimization we did for
669   // JITed code by having a reserved stack slot.
670   bool run_exit_hooks_;
671 
672   // The required level of instrumentation. This could be one of the following values:
673   // kInstrumentNothing: no instrumentation support is needed
674   // kInstrumentWithEntryExitHooks: needs support to call method entry/exit stubs.
675   // kInstrumentWithInterpreter: only execute with interpreter
676   Instrumentation::InstrumentationLevel instrumentation_level_;
677 
678   // Did the runtime request we only run in the interpreter? ie -Xint mode.
679   bool forced_interpret_only_;
680 
681   // Do we have any listeners for method entry events? Short-cut to avoid taking the
682   // instrumentation_lock_.
683   bool have_method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
684 
685   // Do we have any listeners for method exit events? Short-cut to avoid taking the
686   // instrumentation_lock_.
687   bool have_method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
688 
689   // Do we have any listeners for method unwind events? Short-cut to avoid taking the
690   // instrumentation_lock_.
691   bool have_method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
692 
693   // Do we have any listeners for dex move events? Short-cut to avoid taking the
694   // instrumentation_lock_.
695   bool have_dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
696 
697   // Do we have any listeners for field read events? Short-cut to avoid taking the
698   // instrumentation_lock_.
699   bool have_field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
700 
701   // Do we have any listeners for field write events? Short-cut to avoid taking the
702   // instrumentation_lock_.
703   bool have_field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
704 
705   // Do we have any exception thrown listeners? Short-cut to avoid taking the instrumentation_lock_.
706   bool have_exception_thrown_listeners_ GUARDED_BY(Locks::mutator_lock_);
707 
708   // Do we have any frame pop listeners? Short-cut to avoid taking the instrumentation_lock_.
709   bool have_watched_frame_pop_listeners_ GUARDED_BY(Locks::mutator_lock_);
710 
711   // Do we have any branch listeners? Short-cut to avoid taking the instrumentation_lock_.
712   bool have_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
713 
714   // Do we have any exception handled listeners? Short-cut to avoid taking the
715   // instrumentation_lock_.
716   bool have_exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_);
717 
718   // Contains the instrumentation level required by each client of the instrumentation identified
719   // by a string key.
720   using InstrumentationLevelTable = SafeMap<const char*, InstrumentationLevel>;
721   InstrumentationLevelTable requested_instrumentation_levels_ GUARDED_BY(Locks::mutator_lock_);
722 
723   // The event listeners, written to with the mutator_lock_ exclusively held.
724   // Mutators must be able to iterate over these lists concurrently, that is, with listeners being
725   // added or removed while iterating. The modifying thread holds exclusive lock,
726   // so other threads cannot iterate (i.e. read the data of the list) at the same time but they
727   // do keep iterators that need to remain valid. This is the reason these listeners are std::list
728   // and not for example std::vector: the existing storage for a std::list does not move.
729   // Note that mutators cannot make a copy of these lists before iterating, as the instrumentation
730   // listeners can also be deleted concurrently.
731   // As a result, these lists are never trimmed. That's acceptable given the low number of
732   // listeners we have.
733   std::list<InstrumentationListener*> method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
734   std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
735   std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
736   std::list<InstrumentationListener*> branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
737   std::list<InstrumentationListener*> dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
738   std::list<InstrumentationListener*> field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
739   std::list<InstrumentationListener*> field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
740   std::list<InstrumentationListener*> exception_thrown_listeners_ GUARDED_BY(Locks::mutator_lock_);
741   std::list<InstrumentationListener*> watched_frame_pop_listeners_ GUARDED_BY(Locks::mutator_lock_);
742   std::list<InstrumentationListener*> exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_);
743 
744   // The set of methods being deoptimized (by the debugger) which must be executed with interpreter
745   // only.
746   std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(Locks::mutator_lock_);
747 
748   // Current interpreter handler table. This is updated each time the thread state flags are
749   // modified.
750 
751   // Greater than 0 if quick alloc entry points instrumented.
752   size_t quick_alloc_entry_points_instrumentation_counter_;
753 
754   // alloc_entrypoints_instrumented_ is only updated with all the threads suspended, this is done
755   // to prevent races with the GC where the GC relies on thread suspension only see
756   // alloc_entrypoints_instrumented_ change during suspend points.
757   bool alloc_entrypoints_instrumented_;
758 
759   friend class InstrumentationTest;  // For GetCurrentInstrumentationLevel and ConfigureStubs.
760   friend class InstrumentationStackPopper;  // For popping instrumentation frames.
761   friend void InstrumentationInstallStack(Thread*, bool);
762 
763   DISALLOW_COPY_AND_ASSIGN(Instrumentation);
764 };
765 std::ostream& operator<<(std::ostream& os, Instrumentation::InstrumentationEvent rhs);
766 std::ostream& operator<<(std::ostream& os, Instrumentation::InstrumentationLevel rhs);
767 
768 }  // namespace instrumentation
769 }  // namespace art
770 
771 #endif  // ART_RUNTIME_INSTRUMENTATION_H_
772