• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_INSTRUMENTATION_H_
18 #define ART_RUNTIME_INSTRUMENTATION_H_
19 
20 #include <stdint.h>
21 
22 #include <functional>
23 #include <list>
24 #include <memory>
25 #include <optional>
26 #include <queue>
27 #include <unordered_set>
28 
29 #include "arch/instruction_set.h"
30 #include "base/locks.h"
31 #include "base/macros.h"
32 #include "base/pointer_size.h"
33 #include "base/safe_map.h"
34 #include "gc_root.h"
35 #include "jvalue.h"
36 #include "offsets.h"
37 
38 namespace art HIDDEN {
39 namespace mirror {
40 class Class;
41 class Object;
42 class Throwable;
43 }  // namespace mirror
44 class ArtField;
45 class ArtMethod;
46 class Context;
47 template <typename T> class Handle;
48 template <typename T> class MutableHandle;
49 struct NthCallerVisitor;
50 union JValue;
51 class OatQuickMethodHeader;
52 class SHARED_LOCKABLE ReaderWriterMutex;
53 class ShadowFrame;
54 class Thread;
55 enum class DeoptimizationMethodType;
56 
57 namespace instrumentation {
58 
59 
60 // Do we want to deoptimize for method entry and exit listeners or just try to intercept
61 // invocations? Deoptimization forces all code to run in the interpreter and considerably hurts the
62 // application's performance.
63 static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = true;
64 
65 // an optional frame is either Some(const ShadowFrame& current_frame) or None depending on if the
66 // method being exited has a shadow-frame associed with the current stack frame. In cases where
67 // there is no shadow-frame associated with this stack frame this will be None.
68 using OptionalFrame = std::optional<std::reference_wrapper<const ShadowFrame>>;
69 
70 // Instrumentation event listener API. Registered listeners will get the appropriate call back for
71 // the events they are listening for. The call backs supply the thread, method and dex_pc the event
72 // occurred upon. The thread may or may not be Thread::Current().
73 struct InstrumentationListener {
InstrumentationListenerInstrumentationListener74   InstrumentationListener() {}
~InstrumentationListenerInstrumentationListener75   virtual ~InstrumentationListener() {}
76 
77   // Call-back for when a method is entered.
78   virtual void MethodEntered(Thread* thread, ArtMethod* method)
79       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
80 
81   virtual void MethodExited(Thread* thread,
82                             ArtMethod* method,
83                             OptionalFrame frame,
84                             MutableHandle<mirror::Object>& return_value)
85       REQUIRES_SHARED(Locks::mutator_lock_);
86 
87   // Call-back for when a method is exited. The implementor should either handler-ize the return
88   // value (if appropriate) or use the alternate MethodExited callback instead if they need to
89   // go through a suspend point.
90   virtual void MethodExited(Thread* thread,
91                             ArtMethod* method,
92                             OptionalFrame frame,
93                             JValue& return_value)
94       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
95 
96   // Call-back for when a method is popped due to an exception throw. A method will either cause a
97   // MethodExited call-back or a MethodUnwind call-back when its activation is removed.
98   virtual void MethodUnwind(Thread* thread,
99                             ArtMethod* method,
100                             uint32_t dex_pc)
101       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
102 
103   // Call-back for when the dex pc moves in a method.
104   virtual void DexPcMoved(Thread* thread,
105                           Handle<mirror::Object> this_object,
106                           ArtMethod* method,
107                           uint32_t new_dex_pc)
108       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
109 
110   // Call-back for when we read from a field.
111   virtual void FieldRead(Thread* thread,
112                          Handle<mirror::Object> this_object,
113                          ArtMethod* method,
114                          uint32_t dex_pc,
115                          ArtField* field) = 0;
116 
117   virtual void FieldWritten(Thread* thread,
118                             Handle<mirror::Object> this_object,
119                             ArtMethod* method,
120                             uint32_t dex_pc,
121                             ArtField* field,
122                             Handle<mirror::Object> field_value)
123       REQUIRES_SHARED(Locks::mutator_lock_);
124 
125   // Call-back for when we write into a field.
126   virtual void FieldWritten(Thread* thread,
127                             Handle<mirror::Object> this_object,
128                             ArtMethod* method,
129                             uint32_t dex_pc,
130                             ArtField* field,
131                             const JValue& field_value)
132       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
133 
134   // Call-back when an exception is thrown.
135   virtual void ExceptionThrown(Thread* thread,
136                                Handle<mirror::Throwable> exception_object)
137       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
138 
139   // Call-back when an exception is caught/handled by java code.
140   virtual void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
141       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
142 
143   // Call-back for when we execute a branch.
144   virtual void Branch(Thread* thread,
145                       ArtMethod* method,
146                       uint32_t dex_pc,
147                       int32_t dex_pc_offset)
148       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
149 
150   // Call-back when a shadow_frame with the needs_notify_pop_ boolean set is popped off the stack by
151   // either return or exceptions. Normally instrumentation listeners should ensure that there are
152   // shadow-frames by deoptimizing stacks.
153   virtual void WatchedFramePop([[maybe_unused]] Thread* thread,
154                                [[maybe_unused]] const ShadowFrame& frame)
155       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
156 };
157 
158 class Instrumentation;
159 // A helper to send instrumentation events while popping the stack in a safe way.
160 class InstrumentationStackPopper {
161  public:
162   explicit InstrumentationStackPopper(Thread* self);
163   ~InstrumentationStackPopper() REQUIRES_SHARED(Locks::mutator_lock_);
164 
165   // Increase the number of frames being popped up to `stack_pointer`. Return true if the
166   // frames were popped without any exceptions, false otherwise. The exception that caused
167   // the pop is 'exception'.
168   bool PopFramesTo(uintptr_t stack_pointer, /*in-out*/MutableHandle<mirror::Throwable>& exception)
169       REQUIRES_SHARED(Locks::mutator_lock_);
170 
171  private:
172   Thread* self_;
173   Instrumentation* instrumentation_;
174   // The stack pointer limit for frames to pop.
175   uintptr_t pop_until_;
176 };
177 
178 // Instrumentation is a catch-all for when extra information is required from the runtime. The
179 // typical use for instrumentation is for profiling and debugging. Instrumentation may add stubs
180 // to method entry and exit, it may also force execution to be switched to the interpreter and
181 // trigger deoptimization.
182 class Instrumentation {
183  public:
184   enum InstrumentationEvent {
185     kMethodEntered = 0x1,
186     kMethodExited = 0x2,
187     kMethodUnwind = 0x4,
188     kDexPcMoved = 0x8,
189     kFieldRead = 0x10,
190     kFieldWritten = 0x20,
191     kExceptionThrown = 0x40,
192     kBranch = 0x80,
193     kWatchedFramePop = 0x200,
194     kExceptionHandled = 0x400,
195   };
196 
197   enum class InstrumentationLevel {
198     kInstrumentNothing,             // execute without instrumentation
199     kInstrumentWithEntryExitHooks,  // execute with entry/exit hooks
200     kInstrumentWithInterpreter      // execute with interpreter
201   };
202 
203   static constexpr uint8_t kFastTraceListeners = 0b01;
204   static constexpr uint8_t kSlowMethodEntryExitListeners = 0b10;
205 
206   Instrumentation();
207 
RunExitHooksOffset()208   static constexpr MemberOffset RunExitHooksOffset() {
209     // Assert that run_entry_exit_hooks_ is 8bits wide. If the size changes
210     // update the compare instructions in the code generator when generating checks for
211     // MethodEntryExitHooks.
212     static_assert(sizeof(run_exit_hooks_) == 1, "run_exit_hooks_ isn't expected size");
213     return MemberOffset(OFFSETOF_MEMBER(Instrumentation, run_exit_hooks_));
214   }
215 
HaveMethodEntryListenersOffset()216   static constexpr MemberOffset HaveMethodEntryListenersOffset() {
217     // Assert that have_method_entry_listeners_ is 8bits wide. If the size changes
218     // update the compare instructions in the code generator when generating checks for
219     // MethodEntryExitHooks.
220     static_assert(sizeof(have_method_entry_listeners_) == 1,
221                   "have_method_entry_listeners_ isn't expected size");
222     return MemberOffset(OFFSETOF_MEMBER(Instrumentation, have_method_entry_listeners_));
223   }
224 
HaveMethodExitListenersOffset()225   static constexpr MemberOffset HaveMethodExitListenersOffset() {
226     // Assert that have_method_exit_slow_listeners_ is 8bits wide. If the size changes
227     // update the compare instructions in the code generator when generating checks for
228     // MethodEntryExitHooks.
229     static_assert(sizeof(have_method_exit_listeners_) == 1,
230                   "have_method_exit_listeners_ isn't expected size");
231     return MemberOffset(OFFSETOF_MEMBER(Instrumentation, have_method_exit_listeners_));
232   }
233 
234   // Add a listener to be notified of the masked together sent of instrumentation events. This
235   // suspend the runtime to install stubs. You are expected to hold the mutator lock as a proxy
236   // for saying you should have suspended all threads (installing stubs while threads are running
237   // will break).
238   EXPORT void AddListener(InstrumentationListener* listener,
239                           uint32_t events,
240                           bool is_trace_listener = false)
241       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
242 
243   // Removes listeners for the specified events.
244   EXPORT void RemoveListener(InstrumentationListener* listener,
245                              uint32_t events,
246                              bool is_trace_listener = false)
247       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
248 
249   // Calls UndeoptimizeEverything which may visit class linker classes through ConfigureStubs.
250   // try_switch_to_non_debuggable specifies if we can switch the runtime back to non-debuggable.
251   // When a debugger is attached to a non-debuggable app, we switch the runtime to debuggable and
252   // when we are detaching the debugger we move back to non-debuggable. If we are disabling
253   // deoptimization for other reasons (ex: removing the last breakpoint) while the debugger is still
254   // connected, we pass false to stay in debuggable. Switching runtimes is expensive so we only want
255   // to switch when we know debug features aren't needed anymore.
256   EXPORT void DisableDeoptimization(const char* key, bool try_switch_to_non_debuggable)
257       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
258 
259   // Enables entry exit hooks support. This is called in preparation for debug requests that require
260   // calling method entry / exit hooks.
261   EXPORT void EnableEntryExitHooks(const char* key)
262       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
263 
AreAllMethodsDeoptimized()264   bool AreAllMethodsDeoptimized() const {
265     return InterpreterStubsInstalled();
266   }
267   bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
268 
269   // Executes everything with interpreter.
270   EXPORT void DeoptimizeEverything(const char* key)
271       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
272           REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
273 
274   // Executes everything with compiled code (or interpreter if there is no code). May visit class
275   // linker classes through ConfigureStubs.
276   EXPORT void UndeoptimizeEverything(const char* key)
277       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
278           REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
279 
280   // Deoptimize a method by forcing its execution with the interpreter. Nevertheless, a static
281   // method (except a class initializer) set to the resolution trampoline will be deoptimized only
282   // once its declaring class is initialized.
283   EXPORT void Deoptimize(ArtMethod* method)
284       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_);
285 
286   // Undeoptimze the method by restoring its entrypoints. Nevertheless, a static method
287   // (except a class initializer) set to the resolution trampoline will be updated only once its
288   // declaring class is initialized.
289   EXPORT void Undeoptimize(ArtMethod* method)
290       REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_);
291 
292   // Indicates whether the method has been deoptimized so it is executed with the interpreter.
293   EXPORT bool IsDeoptimized(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
294 
295   // Indicates if any method needs to be deoptimized. This is used to avoid walking the stack to
296   // determine if a deoptimization is required.
297   bool IsDeoptimizedMethodsEmpty() const REQUIRES_SHARED(Locks::mutator_lock_);
298 
299   // Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
300   EXPORT void EnableMethodTracing(
301       const char* key,
302       InstrumentationListener* listener,
303       bool needs_interpreter = kDeoptimizeForAccurateMethodEntryExitListeners)
304       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
305           REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
306 
307   // Disable method tracing by uninstalling instrumentation entry/exit stubs or interpreter.
308   EXPORT void DisableMethodTracing(const char* key)
309       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
310           REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
311 
312   void InstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
313   void UninstrumentQuickAllocEntryPoints() REQUIRES(!Locks::instrument_entrypoints_lock_);
314   void InstrumentQuickAllocEntryPointsLocked()
315       REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
316                !Locks::runtime_shutdown_lock_);
317   void UninstrumentQuickAllocEntryPointsLocked()
318       REQUIRES(Locks::instrument_entrypoints_lock_, !Locks::thread_list_lock_,
319                !Locks::runtime_shutdown_lock_);
320   void ResetQuickAllocEntryPoints() REQUIRES(Locks::runtime_shutdown_lock_);
321 
322   // Returns a string representation of the given entry point.
323   static std::string EntryPointString(const void* code);
324 
325   // Return the best initial entrypoint of a method, assuming that stubs are not in use.
326   // This function can be called while the thread is suspended.
327   const void* GetInitialEntrypoint(uint32_t method_access_flags, const void* aot_code);
328 
329   // Check if the best initial entrypoint needs to be overridden with stubs.
330   bool InitialEntrypointNeedsInstrumentationStubs()
331       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Roles::uninterruptible_);
332 
333   // Initialize the method's entrypoint with aot code or runtime stub.
334   // The caller must check and apply `InitialEntrypointNeedsInstrumentationStubs()`
335   // in the same `Roles::uninterruptible_` section of code.
336   void InitializeMethodsCode(ArtMethod* method, const void* entrypoint, PointerSize pointer_size)
337       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Roles::uninterruptible_);
338 
339   // Reinitialize the entrypoint of the method.
340   EXPORT void ReinitializeMethodsCode(ArtMethod* method)
341       REQUIRES_SHARED(Locks::mutator_lock_);
342 
343   // Update the code of a method respecting any installed stubs.
344   void UpdateMethodsCode(ArtMethod* method, const void* new_code)
345       REQUIRES_SHARED(Locks::mutator_lock_);
346 
347   // Update the code of a native method to a JITed stub.
348   void UpdateNativeMethodsCodeToJitCode(ArtMethod* method, const void* new_code)
349       REQUIRES_SHARED(Locks::mutator_lock_);
350 
351   // Return the code that we can execute for an invoke including from the JIT.
352   EXPORT const void* GetCodeForInvoke(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
353 
354   // Return the code that we can execute considering the current instrumentation level.
355   // If interpreter stubs are installed return interpreter bridge. If the entry exit stubs
356   // are installed return an instrumentation entry point. Otherwise, return the code that
357   // can be executed including from the JIT.
358   const void* GetMaybeInstrumentedCodeForInvoke(ArtMethod* method)
359       REQUIRES_SHARED(Locks::mutator_lock_);
360 
ForceInterpretOnly()361   void ForceInterpretOnly() {
362     forced_interpret_only_ = true;
363   }
364 
EntryExitStubsInstalled()365   bool EntryExitStubsInstalled() const {
366     return instrumentation_level_ == InstrumentationLevel::kInstrumentWithEntryExitHooks ||
367            instrumentation_level_ == InstrumentationLevel::kInstrumentWithInterpreter;
368   }
369 
InterpreterStubsInstalled()370   bool InterpreterStubsInstalled() const {
371     return instrumentation_level_ == InstrumentationLevel::kInstrumentWithInterpreter;
372   }
373 
374   // Called by ArtMethod::Invoke to determine dispatch mechanism.
InterpretOnly()375   bool InterpretOnly() const {
376     return forced_interpret_only_ || InterpreterStubsInstalled();
377   }
378   bool InterpretOnly(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
379 
IsForcedInterpretOnly()380   bool IsForcedInterpretOnly() const {
381     return forced_interpret_only_;
382   }
383 
RunExitHooks()384   bool RunExitHooks() const {
385     return run_exit_hooks_;
386   }
387 
HasMethodEntryListeners()388   bool HasMethodEntryListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
389     return have_method_entry_listeners_ != 0;
390   }
391 
HasMethodExitListeners()392   bool HasMethodExitListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
393     return have_method_exit_listeners_ != 0;
394   }
395 
HasFastMethodEntryListenersOnly()396   bool HasFastMethodEntryListenersOnly() const REQUIRES_SHARED(Locks::mutator_lock_) {
397     return have_method_entry_listeners_ == kFastTraceListeners;
398   }
399 
HasFastMethodExitListenersOnly()400   bool HasFastMethodExitListenersOnly() const REQUIRES_SHARED(Locks::mutator_lock_) {
401     return have_method_exit_listeners_ == kFastTraceListeners;
402   }
403 
HasMethodUnwindListeners()404   bool HasMethodUnwindListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
405     return have_method_unwind_listeners_;
406   }
407 
HasDexPcListeners()408   bool HasDexPcListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
409     return have_dex_pc_listeners_;
410   }
411 
HasFieldReadListeners()412   bool HasFieldReadListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
413     return have_field_read_listeners_;
414   }
415 
HasFieldWriteListeners()416   bool HasFieldWriteListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
417     return have_field_write_listeners_;
418   }
419 
HasExceptionThrownListeners()420   bool HasExceptionThrownListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
421     return have_exception_thrown_listeners_;
422   }
423 
HasBranchListeners()424   bool HasBranchListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
425     return have_branch_listeners_;
426   }
427 
HasWatchedFramePopListeners()428   bool HasWatchedFramePopListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
429     return have_watched_frame_pop_listeners_;
430   }
431 
HasExceptionHandledListeners()432   bool HasExceptionHandledListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
433     return have_exception_handled_listeners_;
434   }
435 
436   // Returns if dex pc events need to be reported for the specified method.
437   // These events are reported when DexPCListeners are installed and at least one of the
438   // following conditions hold:
439   // 1. The method is deoptimized. This is done when there is a breakpoint on method.
440   // 2. When the thread is deoptimized. This is used when single stepping a single thread.
441   // 3. When interpreter stubs are installed. In this case no additional information is maintained
442   //    about which methods need dex pc move events. This is usually used for features which need
443   //    them for several methods across threads or need expensive processing. So it is OK to not
444   //    further optimize this case.
445   // DexPCListeners are installed when there is a breakpoint on any method / single stepping
446   // on any of thread. These are removed when the last breakpoint was removed. See AddListener and
447   // RemoveListener for more details.
448   bool NeedsDexPcEvents(ArtMethod* method, Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
449 
NeedsSlowInterpreterForListeners()450   bool NeedsSlowInterpreterForListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
451     return have_field_read_listeners_ ||
452            have_field_write_listeners_ ||
453            have_watched_frame_pop_listeners_ ||
454            have_exception_handled_listeners_;
455   }
456 
457   // Inform listeners that a method has been entered. A dex PC is provided as we may install
458   // listeners into executing code and get method enter events for methods already on the stack.
MethodEnterEvent(Thread * thread,ArtMethod * method)459   void MethodEnterEvent(Thread* thread, ArtMethod* method) const
460       REQUIRES_SHARED(Locks::mutator_lock_) {
461     if (UNLIKELY(HasMethodEntryListeners())) {
462       MethodEnterEventImpl(thread, method);
463     }
464   }
465 
466   // Inform listeners that a method has been exited.
467   template<typename T>
MethodExitEvent(Thread * thread,ArtMethod * method,OptionalFrame frame,T & return_value)468   void MethodExitEvent(Thread* thread,
469                        ArtMethod* method,
470                        OptionalFrame frame,
471                        T& return_value) const
472       REQUIRES_SHARED(Locks::mutator_lock_) {
473     if (UNLIKELY(HasMethodExitListeners())) {
474       MethodExitEventImpl(thread, method, frame, return_value);
475     }
476   }
477 
478   // Inform listeners that a method has been exited due to an exception.
479   void MethodUnwindEvent(Thread* thread,
480                          ArtMethod* method,
481                          uint32_t dex_pc) const
482       REQUIRES_SHARED(Locks::mutator_lock_);
483 
484   // Inform listeners that the dex pc has moved (only supported by the interpreter).
DexPcMovedEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc)485   void DexPcMovedEvent(Thread* thread,
486                        ObjPtr<mirror::Object> this_object,
487                        ArtMethod* method,
488                        uint32_t dex_pc) const
489       REQUIRES_SHARED(Locks::mutator_lock_) {
490     if (UNLIKELY(HasDexPcListeners())) {
491       DexPcMovedEventImpl(thread, this_object, method, dex_pc);
492     }
493   }
494 
495   // Inform listeners that a branch has been taken (only supported by the interpreter).
Branch(Thread * thread,ArtMethod * method,uint32_t dex_pc,int32_t offset)496   void Branch(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
497       REQUIRES_SHARED(Locks::mutator_lock_) {
498     if (UNLIKELY(HasBranchListeners())) {
499       BranchImpl(thread, method, dex_pc, offset);
500     }
501   }
502 
503   // Inform listeners that we read a field (only supported by the interpreter).
FieldReadEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field)504   void FieldReadEvent(Thread* thread,
505                       ObjPtr<mirror::Object> this_object,
506                       ArtMethod* method,
507                       uint32_t dex_pc,
508                       ArtField* field) const
509       REQUIRES_SHARED(Locks::mutator_lock_) {
510     if (UNLIKELY(HasFieldReadListeners())) {
511       FieldReadEventImpl(thread, this_object, method, dex_pc, field);
512     }
513   }
514 
515   // Inform listeners that we write a field (only supported by the interpreter).
FieldWriteEvent(Thread * thread,ObjPtr<mirror::Object> this_object,ArtMethod * method,uint32_t dex_pc,ArtField * field,const JValue & field_value)516   void FieldWriteEvent(Thread* thread,
517                        ObjPtr<mirror::Object> this_object,
518                        ArtMethod* method,
519                        uint32_t dex_pc,
520                        ArtField* field,
521                        const JValue& field_value) const
522       REQUIRES_SHARED(Locks::mutator_lock_) {
523     if (UNLIKELY(HasFieldWriteListeners())) {
524       FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value);
525     }
526   }
527 
528   // Inform listeners that a branch has been taken (only supported by the interpreter).
WatchedFramePopped(Thread * thread,const ShadowFrame & frame)529   void WatchedFramePopped(Thread* thread, const ShadowFrame& frame) const
530       REQUIRES_SHARED(Locks::mutator_lock_) {
531     if (UNLIKELY(HasWatchedFramePopListeners())) {
532       WatchedFramePopImpl(thread, frame);
533     }
534   }
535 
536   // Inform listeners that an exception was thrown.
537   void ExceptionThrownEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const
538       REQUIRES_SHARED(Locks::mutator_lock_);
539 
540   // Inform listeners that an exception has been handled. This is not sent for native code or for
541   // exceptions which reach the end of the thread's stack.
542   void ExceptionHandledEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const
543       REQUIRES_SHARED(Locks::mutator_lock_);
544 
545   JValue GetReturnValue(ArtMethod* method, bool* is_ref, uint64_t* gpr_result, uint64_t* fpr_result)
546       REQUIRES_SHARED(Locks::mutator_lock_);
547   bool PushDeoptContextIfNeeded(Thread* self,
548                                 DeoptimizationMethodType deopt_type,
549                                 bool is_ref,
550                                 const JValue& result) REQUIRES_SHARED(Locks::mutator_lock_);
551 
552   // Deoptimize upon pending exception or if the caller requires it. Returns a long jump context if
553   // a deoptimization is needed and taken.
554   std::unique_ptr<Context> DeoptimizeIfNeeded(Thread* self,
555                                               ArtMethod** sp,
556                                               DeoptimizationMethodType type,
557                                               JValue result,
558                                               bool is_ref) REQUIRES_SHARED(Locks::mutator_lock_);
559   // This returns if the caller of runtime method requires a deoptimization. This checks both if the
560   // method requires a deopt or if this particular frame needs a deopt because of a class
561   // redefinition.
562   bool ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_);
563   bool ShouldDeoptimizeCaller(Thread* self, ArtMethod** sp, size_t frame_size)
564       REQUIRES_SHARED(Locks::mutator_lock_);
565   // This returns if the specified method requires a deoptimization. This doesn't account if a stack
566   // frame involving this method requires a deoptimization.
567   bool NeedsSlowInterpreterForMethod(Thread* self, ArtMethod* method)
568       REQUIRES_SHARED(Locks::mutator_lock_);
569 
570   DeoptimizationMethodType GetDeoptimizationMethodType(ArtMethod* method)
571       REQUIRES_SHARED(Locks::mutator_lock_);
572 
573   // Call back for configure stubs.
574   void InstallStubsForClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
575 
576   void InstallStubsForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
577 
578   EXPORT void UpdateEntrypointsForDebuggable() REQUIRES(art::Locks::mutator_lock_);
579 
580   // Install instrumentation exit stub on every method of the stack of the given thread.
581   // This is used by:
582   //  - the debugger to cause a deoptimization of the all frames in thread's stack (for
583   //    example, after updating local variables)
584   //  - to call method entry / exit hooks for tracing. For this we instrument
585   //    the stack frame to run entry / exit hooks but we don't need to deoptimize.
586   // force_deopt indicates whether the frames need to deoptimize or not.
587   EXPORT void InstrumentThreadStack(Thread* thread, bool force_deopt)
588       REQUIRES(Locks::mutator_lock_);
589   void InstrumentAllThreadStacks(bool force_deopt) REQUIRES(Locks::mutator_lock_)
590       REQUIRES(!Locks::thread_list_lock_);
591 
592   // Force all currently running frames to be deoptimized back to interpreter. This should only be
593   // used in cases where basically all compiled code has been invalidated.
594   EXPORT void DeoptimizeAllThreadFrames() REQUIRES(art::Locks::mutator_lock_);
595 
596   static size_t ComputeFrameId(Thread* self,
597                                size_t frame_depth,
598                                size_t inlined_frames_before_frame)
599       REQUIRES_SHARED(Locks::mutator_lock_);
600 
601   // Does not hold lock, used to check if someone changed from not instrumented to instrumented
602   // during a GC suspend point.
AllocEntrypointsInstrumented()603   bool AllocEntrypointsInstrumented() const REQUIRES_SHARED(Locks::mutator_lock_) {
604     return alloc_entrypoints_instrumented_;
605   }
606 
607   bool ProcessMethodUnwindCallbacks(Thread* self,
608                                     std::queue<ArtMethod*>& methods,
609                                     MutableHandle<mirror::Throwable>& exception)
610       REQUIRES_SHARED(Locks::mutator_lock_);
611 
612   EXPORT InstrumentationLevel GetCurrentInstrumentationLevel() const;
613 
614   bool MethodSupportsExitEvents(ArtMethod* method, const OatQuickMethodHeader* header)
615       REQUIRES_SHARED(Locks::mutator_lock_);
616 
617  private:
618   static bool CanUseAotCode(const void* quick_code);
619   static const void* GetOptimizedCodeFor(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
620 
621   // Update the current instrumentation_level_.
622   void UpdateInstrumentationLevel(InstrumentationLevel level);
623 
624   // Does the job of installing or removing instrumentation code within methods.
625   // In order to support multiple clients using instrumentation at the same time,
626   // the caller must pass a unique key (a string) identifying it so we remind which
627   // instrumentation level it needs. Therefore the current instrumentation level
628   // becomes the highest instrumentation level required by a client.
629   void ConfigureStubs(const char* key,
630                       InstrumentationLevel desired_instrumentation_level,
631                       bool try_switch_to_non_debuggable)
632       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
633       REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
634   void UpdateStubs(bool try_switch_to_non_debuggable)
635       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_)
636       REQUIRES(!Locks::thread_list_lock_, !Locks::classlinker_classes_lock_);
637 
638   // If there are no pending deoptimizations restores the stack to the normal state by updating the
639   // return pcs to actual return addresses from the instrumentation stack and clears the
640   // instrumentation stack.
641   void MaybeRestoreInstrumentationStack() REQUIRES(Locks::mutator_lock_);
642 
643   // Switches the runtime state to non-java debuggable if entry / exit hooks are no longer required
644   // and the runtime did not start off as java debuggable.
645   void MaybeSwitchRuntimeDebugState(Thread* self)
646       REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
647 
648   // No thread safety analysis to get around SetQuickAllocEntryPointsInstrumented requiring
649   // exclusive access to mutator lock which you can't get if the runtime isn't started.
650   void SetEntrypointsInstrumented(bool instrumented) NO_THREAD_SAFETY_ANALYSIS;
651 
652   void MethodEnterEventImpl(Thread* thread, ArtMethod* method) const
653       REQUIRES_SHARED(Locks::mutator_lock_);
654   template <typename T>
655   void MethodExitEventImpl(Thread* thread,
656                            ArtMethod* method,
657                            OptionalFrame frame,
658                            T& return_value) const
659       REQUIRES_SHARED(Locks::mutator_lock_);
660   void DexPcMovedEventImpl(Thread* thread,
661                            ObjPtr<mirror::Object> this_object,
662                            ArtMethod* method,
663                            uint32_t dex_pc) const
664       REQUIRES_SHARED(Locks::mutator_lock_);
665   void BranchImpl(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
666       REQUIRES_SHARED(Locks::mutator_lock_);
667   void WatchedFramePopImpl(Thread* thread, const ShadowFrame& frame) const
668       REQUIRES_SHARED(Locks::mutator_lock_);
669   void FieldReadEventImpl(Thread* thread,
670                           ObjPtr<mirror::Object> this_object,
671                           ArtMethod* method,
672                           uint32_t dex_pc,
673                           ArtField* field) const
674       REQUIRES_SHARED(Locks::mutator_lock_);
675   void FieldWriteEventImpl(Thread* thread,
676                            ObjPtr<mirror::Object> this_object,
677                            ArtMethod* method,
678                            uint32_t dex_pc,
679                            ArtField* field,
680                            const JValue& field_value) const
681       REQUIRES_SHARED(Locks::mutator_lock_);
682 
683   // Read barrier-aware utility functions for accessing deoptimized_methods_
684   bool AddDeoptimizedMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
685   bool IsDeoptimizedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
686   bool RemoveDeoptimizedMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);
687   void UpdateMethodsCodeImpl(ArtMethod* method, const void* new_code)
688       REQUIRES_SHARED(Locks::mutator_lock_);
689 
690   // We need to run method exit hooks for two reasons:
691   // 1. When method exit listeners are installed
692   // 2. When we need to check if the caller of this method needs a deoptimization. This is needed
693   // only for deoptimizing the currently active invocations on stack when we deoptimize a method or
694   // invalidate the JITed code when redefining the classes. So future invocations don't need to do
695   // this check.
696   //
697   // For JITed code of non-native methods we already have a stack slot reserved for deoptimizing
698   // on demand and we use that stack slot to check if the caller needs a deoptimization. JITed code
699   // checks if there are any method exit listeners or if the stack slot is set to determine if
700   // method exit hooks need to be executed.
701   //
702   // For JITed JNI stubs there is no reserved stack slot for this and we just use this variable to
703   // check if we need to run method entry / exit hooks. This variable would be set when either of
704   // the above conditions are true. If we need method exit hooks only for case 2, we would call exit
705   // hooks for any future invocations which aren't necessary.
706   // QuickToInterpreterBridge and GenericJniStub also use this for same reasons.
707   // If calling entry / exit hooks becomes expensive we could do the same optimization we did for
708   // JITed code by having a reserved stack slot.
709   bool run_exit_hooks_;
710 
711   // The required level of instrumentation. This could be one of the following values:
712   // kInstrumentNothing: no instrumentation support is needed
713   // kInstrumentWithEntryExitHooks: needs support to call method entry/exit stubs.
714   // kInstrumentWithInterpreter: only execute with interpreter
715   Instrumentation::InstrumentationLevel instrumentation_level_;
716 
717   // Did the runtime request we only run in the interpreter? ie -Xint mode.
718   bool forced_interpret_only_;
719 
720   // For method entry / exit events, we maintain fast trace listeners in a separate list to make
721   // implementation of fast trace listeners more efficient by JITing the code to handle fast trace
722   // events. We use a uint8_t (and not bool) to encode if there are none / fast / slow listeners.
723   // Do we have any listeners for method entry events.
724   uint8_t have_method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
725 
726   // Do we have any listeners for method exit events.
727   uint8_t have_method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
728 
729   // Do we have any listeners for method unwind events?
730   bool have_method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
731 
732   // Do we have any listeners for dex move events?
733   bool have_dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
734 
735   // Do we have any listeners for field read events?
736   bool have_field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
737 
738   // Do we have any listeners for field write events?
739   bool have_field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
740 
741   // Do we have any exception thrown listeners?
742   bool have_exception_thrown_listeners_ GUARDED_BY(Locks::mutator_lock_);
743 
744   // Do we have any frame pop listeners?
745   bool have_watched_frame_pop_listeners_ GUARDED_BY(Locks::mutator_lock_);
746 
747   // Do we have any branch listeners?
748   bool have_branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
749 
750   // Do we have any exception handled listeners?
751   bool have_exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_);
752 
753   // Contains the instrumentation level required by each client of the instrumentation identified
754   // by a string key.
755   using InstrumentationLevelTable = SafeMap<const char*, InstrumentationLevel>;
756   InstrumentationLevelTable requested_instrumentation_levels_ GUARDED_BY(Locks::mutator_lock_);
757 
758   // The event listeners, written to with the mutator_lock_ exclusively held.
759   // Mutators must be able to iterate over these lists concurrently, that is, with listeners being
760   // added or removed while iterating. The modifying thread holds exclusive lock,
761   // so other threads cannot iterate (i.e. read the data of the list) at the same time but they
762   // do keep iterators that need to remain valid. This is the reason these listeners are std::list
763   // and not for example std::vector: the existing storage for a std::list does not move.
764   // Note that mutators cannot make a copy of these lists before iterating, as the instrumentation
765   // listeners can also be deleted concurrently.
766   // As a result, these lists are never trimmed. That's acceptable given the low number of
767   // listeners we have.
768   std::list<InstrumentationListener*> method_entry_slow_listeners_ GUARDED_BY(Locks::mutator_lock_);
769   std::list<InstrumentationListener*> method_entry_fast_trace_listeners_
770       GUARDED_BY(Locks::mutator_lock_);
771   std::list<InstrumentationListener*> method_exit_slow_listeners_ GUARDED_BY(Locks::mutator_lock_);
772   std::list<InstrumentationListener*> method_exit_fast_trace_listeners_
773       GUARDED_BY(Locks::mutator_lock_);
774   std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
775   std::list<InstrumentationListener*> branch_listeners_ GUARDED_BY(Locks::mutator_lock_);
776   std::list<InstrumentationListener*> dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
777   std::list<InstrumentationListener*> field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
778   std::list<InstrumentationListener*> field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
779   std::list<InstrumentationListener*> exception_thrown_listeners_ GUARDED_BY(Locks::mutator_lock_);
780   std::list<InstrumentationListener*> watched_frame_pop_listeners_ GUARDED_BY(Locks::mutator_lock_);
781   std::list<InstrumentationListener*> exception_handled_listeners_ GUARDED_BY(Locks::mutator_lock_);
782 
783   // The set of methods being deoptimized (by the debugger) which must be executed with interpreter
784   // only.
785   std::unordered_set<ArtMethod*> deoptimized_methods_ GUARDED_BY(Locks::mutator_lock_);
786 
787   // Current interpreter handler table. This is updated each time the thread state flags are
788   // modified.
789 
790   // Greater than 0 if quick alloc entry points instrumented.
791   size_t quick_alloc_entry_points_instrumentation_counter_;
792 
793   // alloc_entrypoints_instrumented_ is only updated with all the threads suspended, this is done
794   // to prevent races with the GC where the GC relies on thread suspension only see
795   // alloc_entrypoints_instrumented_ change during suspend points.
796   bool alloc_entrypoints_instrumented_;
797 
798   friend class InstrumentationTest;  // For GetCurrentInstrumentationLevel and ConfigureStubs.
799   friend class InstrumentationStackPopper;  // For popping instrumentation frames.
800   friend void InstrumentationInstallStack(Thread*, bool);
801 
802   DISALLOW_COPY_AND_ASSIGN(Instrumentation);
803 };
804 std::ostream& operator<<(std::ostream& os, Instrumentation::InstrumentationEvent rhs);
805 std::ostream& operator<<(std::ostream& os, Instrumentation::InstrumentationLevel rhs);
806 
807 }  // namespace instrumentation
808 }  // namespace art
809 
810 #endif  // ART_RUNTIME_INSTRUMENTATION_H_
811