1 /* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32 #include "events.h"
33
34 #include <sys/time.h>
35
36 #include <array>
37 #include <functional>
38
39 #include "alloc_manager.h"
40 #include "android-base/thread_annotations.h"
41 #include "arch/context.h"
42 #include "art_field-inl.h"
43 #include "art_jvmti.h"
44 #include "art_method-inl.h"
45 #include "base/locks.h"
46 #include "base/mutex.h"
47 #include "deopt_manager.h"
48 #include "dex/dex_file_types.h"
49 #include "events-inl.h"
50 #include "gc/allocation_listener.h"
51 #include "gc/gc_pause_listener.h"
52 #include "gc/heap.h"
53 #include "gc/scoped_gc_critical_section.h"
54 #include "handle_scope-inl.h"
55 #include "indirect_reference_table.h"
56 #include "instrumentation.h"
57 #include "interpreter/shadow_frame.h"
58 #include "jni/jni_env_ext-inl.h"
59 #include "jni/jni_internal.h"
60 #include "jvalue-inl.h"
61 #include "jvalue.h"
62 #include "jvmti.h"
63 #include "mirror/class.h"
64 #include "mirror/object-inl.h"
65 #include "monitor-inl.h"
66 #include "nativehelper/scoped_local_ref.h"
67 #include "reflective_handle.h"
68 #include "reflective_handle_scope-inl.h"
69 #include "runtime.h"
70 #include "scoped_thread_state_change-inl.h"
71 #include "scoped_thread_state_change.h"
72 #include "stack.h"
73 #include "thread-inl.h"
74 #include "thread.h"
75 #include "thread_list.h"
76 #include "ti_phase.h"
77 #include "ti_thread.h"
78 #include "well_known_classes.h"
79
80 namespace openjdkjvmti {
81
CopyExtensionsFrom(const ArtJvmtiEventCallbacks * cb)82 void ArtJvmtiEventCallbacks::CopyExtensionsFrom(const ArtJvmtiEventCallbacks* cb) {
83 if (art::kIsDebugBuild) {
84 ArtJvmtiEventCallbacks clean;
85 DCHECK_EQ(memcmp(&clean, this, sizeof(clean)), 0)
86 << "CopyExtensionsFrom called with initialized eventsCallbacks!";
87 }
88 if (cb != nullptr) {
89 memcpy(this, cb, sizeof(*this));
90 } else {
91 memset(this, 0, sizeof(*this));
92 }
93 }
94
Set(jint index,jvmtiExtensionEvent cb)95 jvmtiError ArtJvmtiEventCallbacks::Set(jint index, jvmtiExtensionEvent cb) {
96 switch (index) {
97 case static_cast<jint>(ArtJvmtiEvent::kObsoleteObjectCreated):
98 ObsoleteObjectCreated = reinterpret_cast<ArtJvmtiEventObsoleteObjectCreated>(cb);
99 return OK;
100 case static_cast<jint>(ArtJvmtiEvent::kDdmPublishChunk):
101 DdmPublishChunk = reinterpret_cast<ArtJvmtiEventDdmPublishChunk>(cb);
102 return OK;
103 case static_cast<jint>(ArtJvmtiEvent::kStructuralDexFileLoadHook):
104 StructuralDexFileLoadHook = reinterpret_cast<ArtJvmtiEventStructuralDexFileLoadHook>(cb);
105 return OK;
106 default:
107 return ERR(ILLEGAL_ARGUMENT);
108 }
109 }
110
111
IsExtensionEvent(jint e)112 bool IsExtensionEvent(jint e) {
113 return e >= static_cast<jint>(ArtJvmtiEvent::kMinEventTypeVal) &&
114 e <= static_cast<jint>(ArtJvmtiEvent::kMaxEventTypeVal) &&
115 IsExtensionEvent(static_cast<ArtJvmtiEvent>(e));
116 }
117
IsExtensionEvent(ArtJvmtiEvent e)118 bool IsExtensionEvent(ArtJvmtiEvent e) {
119 switch (e) {
120 case ArtJvmtiEvent::kDdmPublishChunk:
121 case ArtJvmtiEvent::kObsoleteObjectCreated:
122 case ArtJvmtiEvent::kStructuralDexFileLoadHook:
123 return true;
124 default:
125 return false;
126 }
127 }
128
IsEnabledAnywhere(ArtJvmtiEvent event)129 bool EventMasks::IsEnabledAnywhere(ArtJvmtiEvent event) {
130 return global_event_mask.Test(event) || unioned_thread_event_mask.Test(event);
131 }
132
GetEventMask(art::Thread * thread)133 EventMask& EventMasks::GetEventMask(art::Thread* thread) {
134 if (thread == nullptr) {
135 return global_event_mask;
136 }
137
138 for (auto& pair : thread_event_masks) {
139 const UniqueThread& unique_thread = pair.first;
140 if (unique_thread.first == thread &&
141 unique_thread.second == static_cast<uint32_t>(thread->GetTid())) {
142 return pair.second;
143 }
144 }
145
146 // TODO: Remove old UniqueThread with the same pointer, if exists.
147
148 thread_event_masks.emplace_back(UniqueThread(thread, thread->GetTid()), EventMask());
149 return thread_event_masks.back().second;
150 }
151
GetEventMaskOrNull(art::Thread * thread)152 EventMask* EventMasks::GetEventMaskOrNull(art::Thread* thread) {
153 if (thread == nullptr) {
154 return &global_event_mask;
155 }
156
157 for (auto& pair : thread_event_masks) {
158 const UniqueThread& unique_thread = pair.first;
159 if (unique_thread.first == thread &&
160 unique_thread.second == static_cast<uint32_t>(thread->GetTid())) {
161 return &pair.second;
162 }
163 }
164
165 return nullptr;
166 }
167
168
EnableEvent(ArtJvmTiEnv * env,art::Thread * thread,ArtJvmtiEvent event)169 void EventMasks::EnableEvent(ArtJvmTiEnv* env, art::Thread* thread, ArtJvmtiEvent event) {
170 DCHECK_EQ(&env->event_masks, this);
171 env->event_info_mutex_.AssertExclusiveHeld(art::Thread::Current());
172 DCHECK(EventMask::EventIsInRange(event));
173 GetEventMask(thread).Set(event);
174 if (thread != nullptr) {
175 unioned_thread_event_mask.Set(event, true);
176 }
177 }
178
DisableEvent(ArtJvmTiEnv * env,art::Thread * thread,ArtJvmtiEvent event)179 void EventMasks::DisableEvent(ArtJvmTiEnv* env, art::Thread* thread, ArtJvmtiEvent event) {
180 DCHECK_EQ(&env->event_masks, this);
181 env->event_info_mutex_.AssertExclusiveHeld(art::Thread::Current());
182 DCHECK(EventMask::EventIsInRange(event));
183 GetEventMask(thread).Set(event, false);
184 if (thread != nullptr) {
185 // Regenerate union for the event.
186 bool union_value = false;
187 for (auto& pair : thread_event_masks) {
188 union_value |= pair.second.Test(event);
189 if (union_value) {
190 break;
191 }
192 }
193 unioned_thread_event_mask.Set(event, union_value);
194 }
195 }
196
HandleChangedCapabilities(const jvmtiCapabilities & caps,bool caps_added)197 void EventMasks::HandleChangedCapabilities(const jvmtiCapabilities& caps, bool caps_added) {
198 if (UNLIKELY(caps.can_retransform_classes == 1)) {
199 // If we are giving this env the retransform classes cap we need to switch all events of
200 // NonTransformable to Transformable and vice versa.
201 ArtJvmtiEvent to_remove = caps_added ? ArtJvmtiEvent::kClassFileLoadHookNonRetransformable
202 : ArtJvmtiEvent::kClassFileLoadHookRetransformable;
203 ArtJvmtiEvent to_add = caps_added ? ArtJvmtiEvent::kClassFileLoadHookRetransformable
204 : ArtJvmtiEvent::kClassFileLoadHookNonRetransformable;
205 if (global_event_mask.Test(to_remove)) {
206 CHECK(!global_event_mask.Test(to_add));
207 global_event_mask.Set(to_remove, false);
208 global_event_mask.Set(to_add, true);
209 }
210
211 if (unioned_thread_event_mask.Test(to_remove)) {
212 CHECK(!unioned_thread_event_mask.Test(to_add));
213 unioned_thread_event_mask.Set(to_remove, false);
214 unioned_thread_event_mask.Set(to_add, true);
215 }
216 for (auto thread_mask : thread_event_masks) {
217 if (thread_mask.second.Test(to_remove)) {
218 CHECK(!thread_mask.second.Test(to_add));
219 thread_mask.second.Set(to_remove, false);
220 thread_mask.second.Set(to_add, true);
221 }
222 }
223 }
224 }
225
RegisterArtJvmTiEnv(ArtJvmTiEnv * env)226 void EventHandler::RegisterArtJvmTiEnv(ArtJvmTiEnv* env) {
227 art::WriterMutexLock mu(art::Thread::Current(), envs_lock_);
228 envs.push_back(env);
229 }
230
RemoveArtJvmTiEnv(ArtJvmTiEnv * env)231 void EventHandler::RemoveArtJvmTiEnv(ArtJvmTiEnv* env) {
232 art::WriterMutexLock mu(art::Thread::Current(), envs_lock_);
233 // Since we might be currently iterating over the envs list we cannot actually erase elements.
234 // Instead we will simply replace them with 'nullptr' and skip them manually.
235 auto it = std::find(envs.begin(), envs.end(), env);
236 if (it != envs.end()) {
237 envs.erase(it);
238 for (size_t i = static_cast<size_t>(ArtJvmtiEvent::kMinEventTypeVal);
239 i <= static_cast<size_t>(ArtJvmtiEvent::kMaxEventTypeVal);
240 ++i) {
241 RecalculateGlobalEventMaskLocked(static_cast<ArtJvmtiEvent>(i));
242 }
243 }
244 }
245
IsThreadControllable(ArtJvmtiEvent event)246 static bool IsThreadControllable(ArtJvmtiEvent event) {
247 switch (event) {
248 case ArtJvmtiEvent::kVmInit:
249 case ArtJvmtiEvent::kVmStart:
250 case ArtJvmtiEvent::kVmDeath:
251 case ArtJvmtiEvent::kThreadStart:
252 case ArtJvmtiEvent::kCompiledMethodLoad:
253 case ArtJvmtiEvent::kCompiledMethodUnload:
254 case ArtJvmtiEvent::kDynamicCodeGenerated:
255 case ArtJvmtiEvent::kDataDumpRequest:
256 case ArtJvmtiEvent::kObsoleteObjectCreated:
257 return false;
258
259 default:
260 return true;
261 }
262 }
263
264 template<typename Type>
AddLocalRef(art::JNIEnvExt * e,art::ObjPtr<art::mirror::Object> obj)265 static Type AddLocalRef(art::JNIEnvExt* e, art::ObjPtr<art::mirror::Object> obj)
266 REQUIRES_SHARED(art::Locks::mutator_lock_) {
267 return (obj == nullptr) ? nullptr : e->AddLocalReference<Type>(obj);
268 }
269
270 template<ArtJvmtiEvent kEvent, typename ...Args>
RunEventCallback(EventHandler * handler,art::Thread * self,art::JNIEnvExt * jnienv,Args...args)271 static void RunEventCallback(EventHandler* handler,
272 art::Thread* self,
273 art::JNIEnvExt* jnienv,
274 Args... args)
275 REQUIRES_SHARED(art::Locks::mutator_lock_) {
276 ScopedLocalRef<jthread> thread_jni(jnienv, AddLocalRef<jthread>(jnienv, self->GetPeer()));
277 handler->DispatchEvent<kEvent>(self,
278 static_cast<JNIEnv*>(jnienv),
279 thread_jni.get(),
280 args...);
281 }
282
SetupDdmTracking(art::DdmCallback * listener,bool enable)283 static void SetupDdmTracking(art::DdmCallback* listener, bool enable) {
284 art::ScopedObjectAccess soa(art::Thread::Current());
285 if (enable) {
286 art::Runtime::Current()->GetRuntimeCallbacks()->AddDdmCallback(listener);
287 } else {
288 art::Runtime::Current()->GetRuntimeCallbacks()->RemoveDdmCallback(listener);
289 }
290 }
291
292 class JvmtiDdmChunkListener : public art::DdmCallback {
293 public:
JvmtiDdmChunkListener(EventHandler * handler)294 explicit JvmtiDdmChunkListener(EventHandler* handler) : handler_(handler) {}
295
DdmPublishChunk(uint32_t type,const art::ArrayRef<const uint8_t> & data)296 void DdmPublishChunk(uint32_t type, const art::ArrayRef<const uint8_t>& data)
297 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
298 if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kDdmPublishChunk)) {
299 art::Thread* self = art::Thread::Current();
300 handler_->DispatchEvent<ArtJvmtiEvent::kDdmPublishChunk>(
301 self,
302 static_cast<jint>(type),
303 static_cast<jint>(data.size()),
304 reinterpret_cast<const jbyte*>(data.data()));
305 }
306 }
307
308 private:
309 EventHandler* handler_;
310
311 DISALLOW_COPY_AND_ASSIGN(JvmtiDdmChunkListener);
312 };
313
314 class JvmtiEventAllocationListener : public AllocationManager::AllocationCallback {
315 public:
JvmtiEventAllocationListener(EventHandler * handler)316 explicit JvmtiEventAllocationListener(EventHandler* handler) : handler_(handler) {}
317
ObjectAllocated(art::Thread * self,art::ObjPtr<art::mirror::Object> * obj,size_t byte_count)318 void ObjectAllocated(art::Thread* self, art::ObjPtr<art::mirror::Object>* obj, size_t byte_count)
319 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
320 DCHECK_EQ(self, art::Thread::Current());
321
322 if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kVmObjectAlloc)) {
323 art::StackHandleScope<1> hs(self);
324 auto h = hs.NewHandleWrapper(obj);
325 // jvmtiEventVMObjectAlloc parameters:
326 // jvmtiEnv *jvmti_env,
327 // JNIEnv* jni_env,
328 // jthread thread,
329 // jobject object,
330 // jclass object_klass,
331 // jlong size
332 art::JNIEnvExt* jni_env = self->GetJniEnv();
333 ScopedLocalRef<jobject> object(
334 jni_env, jni_env->AddLocalReference<jobject>(*obj));
335 ScopedLocalRef<jclass> klass(
336 jni_env, jni_env->AddLocalReference<jclass>(obj->Ptr()->GetClass()));
337
338 RunEventCallback<ArtJvmtiEvent::kVmObjectAlloc>(handler_,
339 self,
340 jni_env,
341 object.get(),
342 klass.get(),
343 static_cast<jlong>(byte_count));
344 }
345 }
346
347 private:
348 EventHandler* handler_;
349 };
350
SetupObjectAllocationTracking(bool enable)351 static void SetupObjectAllocationTracking(bool enable) {
352 // We must not hold the mutator lock here, but if we're in FastJNI, for example, we might. For
353 // now, do a workaround: (possibly) acquire and release.
354 art::ScopedObjectAccess soa(art::Thread::Current());
355 if (enable) {
356 AllocationManager::Get()->EnableAllocationCallback(soa.Self());
357 } else {
358 AllocationManager::Get()->DisableAllocationCallback(soa.Self());
359 }
360 }
361
362 class JvmtiMonitorListener : public art::MonitorCallback {
363 public:
JvmtiMonitorListener(EventHandler * handler)364 explicit JvmtiMonitorListener(EventHandler* handler) : handler_(handler) {}
365
MonitorContendedLocking(art::Monitor * m)366 void MonitorContendedLocking(art::Monitor* m)
367 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
368 if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorContendedEnter)) {
369 art::Thread* self = art::Thread::Current();
370 art::JNIEnvExt* jnienv = self->GetJniEnv();
371 ScopedLocalRef<jobject> mon(jnienv, AddLocalRef<jobject>(jnienv, m->GetObject()));
372 RunEventCallback<ArtJvmtiEvent::kMonitorContendedEnter>(
373 handler_,
374 self,
375 jnienv,
376 mon.get());
377 }
378 }
379
MonitorContendedLocked(art::Monitor * m)380 void MonitorContendedLocked(art::Monitor* m)
381 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
382 if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorContendedEntered)) {
383 art::Thread* self = art::Thread::Current();
384 art::JNIEnvExt* jnienv = self->GetJniEnv();
385 ScopedLocalRef<jobject> mon(jnienv, AddLocalRef<jobject>(jnienv, m->GetObject()));
386 RunEventCallback<ArtJvmtiEvent::kMonitorContendedEntered>(
387 handler_,
388 self,
389 jnienv,
390 mon.get());
391 }
392 }
393
ObjectWaitStart(art::Handle<art::mirror::Object> obj,int64_t timeout)394 void ObjectWaitStart(art::Handle<art::mirror::Object> obj, int64_t timeout)
395 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
396 if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWait)) {
397 art::Thread* self = art::Thread::Current();
398 art::JNIEnvExt* jnienv = self->GetJniEnv();
399 ScopedLocalRef<jobject> mon(jnienv, AddLocalRef<jobject>(jnienv, obj.Get()));
400 RunEventCallback<ArtJvmtiEvent::kMonitorWait>(
401 handler_,
402 self,
403 jnienv,
404 mon.get(),
405 static_cast<jlong>(timeout));
406 }
407 }
408
409
410 // Our interpretation of the spec is that the JVMTI_EVENT_MONITOR_WAITED will be sent immediately
411 // after a thread has woken up from a sleep caused by a call to Object#wait. If the thread will
412 // never go to sleep (due to not having the lock, having bad arguments, or having an exception
413 // propogated from JVMTI_EVENT_MONITOR_WAIT) we will not send this event.
414 //
415 // This does not fully match the RI semantics. Specifically, we will not send the
416 // JVMTI_EVENT_MONITOR_WAITED event in one situation where the RI would, there was an exception in
417 // the JVMTI_EVENT_MONITOR_WAIT event but otherwise the call was fine. In that case the RI would
418 // send this event and return without going to sleep.
419 //
420 // See b/65558434 for more discussion.
MonitorWaitFinished(art::Monitor * m,bool timeout)421 void MonitorWaitFinished(art::Monitor* m, bool timeout)
422 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
423 if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWaited)) {
424 art::Thread* self = art::Thread::Current();
425 art::JNIEnvExt* jnienv = self->GetJniEnv();
426 ScopedLocalRef<jobject> mon(jnienv, AddLocalRef<jobject>(jnienv, m->GetObject()));
427 RunEventCallback<ArtJvmtiEvent::kMonitorWaited>(
428 handler_,
429 self,
430 jnienv,
431 mon.get(),
432 static_cast<jboolean>(timeout));
433 }
434 }
435
436 private:
437 EventHandler* handler_;
438 };
439
440 class JvmtiParkListener : public art::ParkCallback {
441 public:
JvmtiParkListener(EventHandler * handler)442 explicit JvmtiParkListener(EventHandler* handler) : handler_(handler) {}
443
ThreadParkStart(bool is_absolute,int64_t timeout)444 void ThreadParkStart(bool is_absolute, int64_t timeout)
445 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
446 if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWait)) {
447 art::Thread* self = art::Thread::Current();
448 art::JNIEnvExt* jnienv = self->GetJniEnv();
449 art::ObjPtr<art::mirror::Object> blocker_obj =
450 art::WellKnownClasses::java_lang_Thread_parkBlocker->GetObj(self->GetPeer());
451 if (blocker_obj.IsNull()) {
452 blocker_obj = self->GetPeer();
453 }
454 int64_t timeout_ms;
455 if (!is_absolute) {
456 if (timeout == 0) {
457 timeout_ms = 0;
458 } else {
459 timeout_ms = timeout / 1000000;
460 if (timeout_ms == 0) {
461 // If we were instructed to park for a nonzero number of nanoseconds, but not enough
462 // to be a full millisecond, round up to 1 ms. A nonzero park() call will return
463 // soon, but a 0 wait or park call will wait indefinitely.
464 timeout_ms = 1;
465 }
466 }
467 } else {
468 struct timeval tv;
469 gettimeofday(&tv, (struct timezone *) nullptr);
470 int64_t now = tv.tv_sec * 1000LL + tv.tv_usec / 1000;
471 if (now < timeout) {
472 timeout_ms = timeout - now;
473 } else {
474 // Waiting for 0 ms is an indefinite wait; parking until a time in
475 // the past or the current time will return immediately, so emulate
476 // the shortest possible wait event.
477 timeout_ms = 1;
478 }
479 }
480 ScopedLocalRef<jobject> blocker(jnienv, AddLocalRef<jobject>(jnienv, blocker_obj.Ptr()));
481 RunEventCallback<ArtJvmtiEvent::kMonitorWait>(
482 handler_,
483 self,
484 jnienv,
485 blocker.get(),
486 static_cast<jlong>(timeout_ms));
487 }
488 }
489
490
491 // Our interpretation of the spec is that the JVMTI_EVENT_MONITOR_WAITED will be sent immediately
492 // after a thread has woken up from a sleep caused by a call to Object#wait. If the thread will
493 // never go to sleep (due to not having the lock, having bad arguments, or having an exception
494 // propogated from JVMTI_EVENT_MONITOR_WAIT) we will not send this event.
495 //
496 // This does not fully match the RI semantics. Specifically, we will not send the
497 // JVMTI_EVENT_MONITOR_WAITED event in one situation where the RI would, there was an exception in
498 // the JVMTI_EVENT_MONITOR_WAIT event but otherwise the call was fine. In that case the RI would
499 // send this event and return without going to sleep.
500 //
501 // See b/65558434 for more discussion.
ThreadParkFinished(bool timeout)502 void ThreadParkFinished(bool timeout) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
503 if (handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMonitorWaited)) {
504 art::Thread* self = art::Thread::Current();
505 art::JNIEnvExt* jnienv = self->GetJniEnv();
506 art::ObjPtr<art::mirror::Object> blocker_obj =
507 art::WellKnownClasses::java_lang_Thread_parkBlocker->GetObj(self->GetPeer());
508 if (blocker_obj.IsNull()) {
509 blocker_obj = self->GetPeer();
510 }
511 ScopedLocalRef<jobject> blocker(jnienv, AddLocalRef<jobject>(jnienv, blocker_obj.Ptr()));
512 RunEventCallback<ArtJvmtiEvent::kMonitorWaited>(
513 handler_,
514 self,
515 jnienv,
516 blocker.get(),
517 static_cast<jboolean>(timeout));
518 }
519 }
520
521 private:
522 EventHandler* handler_;
523 };
524
SetupMonitorListener(art::MonitorCallback * monitor_listener,art::ParkCallback * park_listener,bool enable)525 static void SetupMonitorListener(art::MonitorCallback* monitor_listener, art::ParkCallback* park_listener, bool enable) {
526 // We must not hold the mutator lock here, but if we're in FastJNI, for example, we might. For
527 // now, do a workaround: (possibly) acquire and release.
528 art::ScopedObjectAccess soa(art::Thread::Current());
529 if (enable) {
530 art::Runtime::Current()->GetRuntimeCallbacks()->AddMonitorCallback(monitor_listener);
531 art::Runtime::Current()->GetRuntimeCallbacks()->AddParkCallback(park_listener);
532 } else {
533 art::Runtime::Current()->GetRuntimeCallbacks()->RemoveMonitorCallback(monitor_listener);
534 art::Runtime::Current()->GetRuntimeCallbacks()->RemoveParkCallback(park_listener);
535 }
536 }
537
538 // Report GC pauses (see spec) as GARBAGE_COLLECTION_START and GARBAGE_COLLECTION_END.
539 class JvmtiGcPauseListener : public art::gc::GcPauseListener {
540 public:
JvmtiGcPauseListener(EventHandler * handler)541 explicit JvmtiGcPauseListener(EventHandler* handler)
542 : handler_(handler),
543 start_enabled_(false),
544 finish_enabled_(false) {}
545
StartPause()546 void StartPause() override {
547 handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionStart>(art::Thread::Current());
548 }
549
EndPause()550 void EndPause() override {
551 handler_->DispatchEvent<ArtJvmtiEvent::kGarbageCollectionFinish>(art::Thread::Current());
552 }
553
IsEnabled()554 bool IsEnabled() {
555 return start_enabled_ || finish_enabled_;
556 }
557
SetStartEnabled(bool e)558 void SetStartEnabled(bool e) {
559 start_enabled_ = e;
560 }
561
SetFinishEnabled(bool e)562 void SetFinishEnabled(bool e) {
563 finish_enabled_ = e;
564 }
565
566 private:
567 EventHandler* handler_;
568 bool start_enabled_;
569 bool finish_enabled_;
570 };
571
SetupGcPauseTracking(JvmtiGcPauseListener * listener,ArtJvmtiEvent event,bool enable)572 static void SetupGcPauseTracking(JvmtiGcPauseListener* listener, ArtJvmtiEvent event, bool enable) {
573 bool old_state = listener->IsEnabled();
574
575 if (event == ArtJvmtiEvent::kGarbageCollectionStart) {
576 listener->SetStartEnabled(enable);
577 } else {
578 listener->SetFinishEnabled(enable);
579 }
580
581 bool new_state = listener->IsEnabled();
582
583 if (old_state != new_state) {
584 if (new_state) {
585 art::Runtime::Current()->GetHeap()->SetGcPauseListener(listener);
586 } else {
587 art::Runtime::Current()->GetHeap()->RemoveGcPauseListener();
588 }
589 }
590 }
591
592 class JvmtiMethodTraceListener final : public art::instrumentation::InstrumentationListener {
593 public:
JvmtiMethodTraceListener(EventHandler * handler)594 explicit JvmtiMethodTraceListener(EventHandler* handler)
595 : event_handler_(handler),
596 non_standard_exits_lock_("JVMTI NonStandard Exits list lock",
597 art::LockLevel::kGenericBottomLock) {}
598
AddDelayedNonStandardExitEvent(const art::ShadowFrame * frame,bool is_object,jvalue val)599 void AddDelayedNonStandardExitEvent(const art::ShadowFrame* frame, bool is_object, jvalue val)
600 REQUIRES_SHARED(art::Locks::mutator_lock_)
601 REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_) {
602 art::Thread* self = art::Thread::Current();
603 jobject to_cleanup = nullptr;
604 jobject new_val = is_object ? self->GetJniEnv()->NewGlobalRef(val.l) : nullptr;
605 {
606 art::MutexLock mu(self, non_standard_exits_lock_);
607 NonStandardExitEventInfo saved{ nullptr, { .j = 0 } };
608 if (is_object) {
609 saved.return_val_obj_ = new_val;
610 saved.return_val_.l = saved.return_val_obj_;
611 } else {
612 saved.return_val_.j = val.j;
613 }
614 // only objects need cleanup.
615 if (UNLIKELY(is_object && non_standard_exits_.find(frame) != non_standard_exits_.end())) {
616 to_cleanup = non_standard_exits_.find(frame)->second.return_val_obj_;
617 }
618 non_standard_exits_.insert_or_assign(frame, saved);
619 }
620 self->GetJniEnv()->DeleteGlobalRef(to_cleanup);
621 }
622
623 // Call-back for when a method is entered.
MethodEntered(art::Thread * self,art::ArtMethod * method)624 void MethodEntered(art::Thread* self, art::ArtMethod* method)
625 REQUIRES_SHARED(art::Locks::mutator_lock_) override {
626 if (!method->IsRuntimeMethod() &&
627 event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodEntry)) {
628 art::JNIEnvExt* jnienv = self->GetJniEnv();
629 RunEventCallback<ArtJvmtiEvent::kMethodEntry>(event_handler_,
630 self,
631 jnienv,
632 art::jni::EncodeArtMethod(method));
633 }
634 }
635
636 // TODO Maybe try to combine this with below using templates?
637 // Callback for when a method is exited with a reference return value.
MethodExited(art::Thread * self,art::ArtMethod * method,art::instrumentation::OptionalFrame frame,art::MutableHandle<art::mirror::Object> & return_value)638 void MethodExited(art::Thread* self,
639 art::ArtMethod* method,
640 art::instrumentation::OptionalFrame frame,
641 art::MutableHandle<art::mirror::Object>& return_value)
642 REQUIRES_SHARED(art::Locks::mutator_lock_) override {
643 if (method->IsRuntimeMethod()) {
644 return;
645 }
646 if (frame.has_value() && UNLIKELY(event_handler_->IsEventEnabledAnywhere(
647 ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue))) {
648 DCHECK(!frame->get().GetSkipMethodExitEvents());
649 bool has_return = false;
650 jobject ret_val = nullptr;
651 {
652 art::MutexLock mu(self, non_standard_exits_lock_);
653 const art::ShadowFrame* sframe = &frame.value().get();
654 const auto it = non_standard_exits_.find(sframe);
655 if (it != non_standard_exits_.end()) {
656 ret_val = it->second.return_val_obj_;
657 non_standard_exits_.erase(it);
658 has_return = true;
659 }
660 }
661 if (has_return) {
662 return_value.Assign(self->DecodeJObject(ret_val));
663 ScopedLocalRef<jthread> thr(self->GetJniEnv(),
664 self->GetJniEnv()->NewLocalRef(self->GetPeer()));
665 art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
666 self->GetJniEnv()->DeleteGlobalRef(ret_val);
667 event_handler_->SetInternalEvent(
668 thr.get(), ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
669 }
670 }
671 if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
672 DCHECK_EQ(
673 method->GetInterfaceMethodIfProxy(art::kRuntimePointerSize)->GetReturnTypePrimitive(),
674 art::Primitive::kPrimNot) << method->PrettyMethod();
675 DCHECK(!self->IsExceptionPending());
676 jvalue val;
677 art::JNIEnvExt* jnienv = self->GetJniEnv();
678 ScopedLocalRef<jobject> return_jobj(jnienv, AddLocalRef<jobject>(jnienv, return_value.Get()));
679 val.l = return_jobj.get();
680 RunEventCallback<ArtJvmtiEvent::kMethodExit>(
681 event_handler_,
682 self,
683 jnienv,
684 art::jni::EncodeArtMethod(method),
685 /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_FALSE),
686 val);
687 }
688 }
689
690 // Call-back for when a method is exited.
MethodExited(art::Thread * self,art::ArtMethod * method,art::instrumentation::OptionalFrame frame,art::JValue & return_value)691 void MethodExited(art::Thread* self,
692 art::ArtMethod* method,
693 art::instrumentation::OptionalFrame frame,
694 art::JValue& return_value) REQUIRES_SHARED(art::Locks::mutator_lock_) override {
695 if (frame.has_value() &&
696 UNLIKELY(event_handler_->IsEventEnabledAnywhere(
697 ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue))) {
698 DCHECK(!frame->get().GetSkipMethodExitEvents());
699 bool has_return = false;
700 {
701 art::MutexLock mu(self, non_standard_exits_lock_);
702 const art::ShadowFrame* sframe = &frame.value().get();
703 const auto it = non_standard_exits_.find(sframe);
704 if (it != non_standard_exits_.end()) {
705 return_value.SetJ(it->second.return_val_.j);
706 non_standard_exits_.erase(it);
707 has_return = true;
708 }
709 }
710 if (has_return) {
711 ScopedLocalRef<jthread> thr(self->GetJniEnv(),
712 self->GetJniEnv()->NewLocalRef(self->GetPeer()));
713 art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
714 event_handler_->SetInternalEvent(
715 thr.get(), ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
716 }
717 }
718 if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
719 DCHECK_NE(
720 method->GetInterfaceMethodIfProxy(art::kRuntimePointerSize)->GetReturnTypePrimitive(),
721 art::Primitive::kPrimNot) << method->PrettyMethod();
722 DCHECK(!self->IsExceptionPending()) << self->GetException()->Dump();
723 jvalue val;
724 art::JNIEnvExt* jnienv = self->GetJniEnv();
725 // 64bit integer is the largest value in the union so we should be fine simply copying it into
726 // the union.
727 val.j = return_value.GetJ();
728 RunEventCallback<ArtJvmtiEvent::kMethodExit>(
729 event_handler_,
730 self,
731 jnienv,
732 art::jni::EncodeArtMethod(method),
733 /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_FALSE),
734 val);
735 }
736 }
737
738 // Call-back for when a method is popped due to an exception throw. A method will either cause a
739 // MethodExited call-back or a MethodUnwind call-back when its activation is removed.
MethodUnwind(art::Thread * self,art::ArtMethod * method,uint32_t dex_pc ATTRIBUTE_UNUSED)740 void MethodUnwind(art::Thread* self,
741 art::ArtMethod* method,
742 uint32_t dex_pc ATTRIBUTE_UNUSED)
743 REQUIRES_SHARED(art::Locks::mutator_lock_) override {
744 if (!method->IsRuntimeMethod() &&
745 event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
746 jvalue val;
747 // Just set this to 0xffffffffffffffff so it's not uninitialized.
748 val.j = static_cast<jlong>(-1);
749 art::JNIEnvExt* jnienv = self->GetJniEnv();
750 art::StackHandleScope<1> hs(self);
751 art::Handle<art::mirror::Throwable> old_exception(hs.NewHandle(self->GetException()));
752 CHECK(!old_exception.IsNull());
753 self->ClearException();
754 RunEventCallback<ArtJvmtiEvent::kMethodExit>(
755 event_handler_,
756 self,
757 jnienv,
758 art::jni::EncodeArtMethod(method),
759 /*was_popped_by_exception=*/ static_cast<jboolean>(JNI_TRUE),
760 val);
761 // Match RI behavior of just throwing away original exception if a new one is thrown.
762 if (LIKELY(!self->IsExceptionPending())) {
763 self->SetException(old_exception.Get());
764 }
765 }
766 }
767
768 // Call-back for when the dex pc moves in a method.
DexPcMoved(art::Thread * self,art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,art::ArtMethod * method,uint32_t new_dex_pc)769 void DexPcMoved(art::Thread* self,
770 art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
771 art::ArtMethod* method,
772 uint32_t new_dex_pc)
773 REQUIRES_SHARED(art::Locks::mutator_lock_) override {
774 DCHECK(!method->IsRuntimeMethod());
775 // Default methods might be copied to multiple classes. We need to get the canonical version of
776 // this method so that we can check for breakpoints correctly.
777 // TODO We should maybe do this on other events to ensure that we are consistent WRT default
778 // methods. This could interact with obsolete methods if we ever let interface redefinition
779 // happen though.
780 method = method->GetCanonicalMethod();
781 art::JNIEnvExt* jnienv = self->GetJniEnv();
782 jmethodID jmethod = art::jni::EncodeArtMethod(method);
783 jlocation location = static_cast<jlocation>(new_dex_pc);
784 // Step event is reported first according to the spec.
785 if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kSingleStep)) {
786 RunEventCallback<ArtJvmtiEvent::kSingleStep>(event_handler_, self, jnienv, jmethod, location);
787 }
788 // Next we do the Breakpoint events. The Dispatch code will filter the individual
789 if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kBreakpoint)) {
790 RunEventCallback<ArtJvmtiEvent::kBreakpoint>(event_handler_, self, jnienv, jmethod, location);
791 }
792 }
793
794 // Call-back for when we read from a field.
FieldRead(art::Thread * self,art::Handle<art::mirror::Object> this_object,art::ArtMethod * method_p,uint32_t dex_pc,art::ArtField * field_p)795 void FieldRead(art::Thread* self,
796 art::Handle<art::mirror::Object> this_object,
797 art::ArtMethod* method_p,
798 uint32_t dex_pc,
799 art::ArtField* field_p)
800 REQUIRES_SHARED(art::Locks::mutator_lock_) override {
801 if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldAccess)) {
802 art::StackReflectiveHandleScope<1, 1> rhs(self);
803 art::ReflectiveHandle<art::ArtField> field(rhs.NewHandle(field_p));
804 art::ReflectiveHandle<art::ArtMethod> method(rhs.NewHandle(method_p));
805 art::JNIEnvExt* jnienv = self->GetJniEnv();
806 // DCHECK(!self->IsExceptionPending());
807 ScopedLocalRef<jobject> this_ref(jnienv, AddLocalRef<jobject>(jnienv, this_object.Get()));
808 ScopedLocalRef<jobject> fklass(jnienv,
809 AddLocalRef<jobject>(jnienv,
810 field->GetDeclaringClass().Ptr()));
811 RunEventCallback<ArtJvmtiEvent::kFieldAccess>(event_handler_,
812 self,
813 jnienv,
814 art::jni::EncodeArtMethod(method),
815 static_cast<jlocation>(dex_pc),
816 static_cast<jclass>(fklass.get()),
817 this_ref.get(),
818 art::jni::EncodeArtField(field));
819 }
820 }
821
FieldWritten(art::Thread * self,art::Handle<art::mirror::Object> this_object,art::ArtMethod * method_p,uint32_t dex_pc,art::ArtField * field_p,art::Handle<art::mirror::Object> new_val)822 void FieldWritten(art::Thread* self,
823 art::Handle<art::mirror::Object> this_object,
824 art::ArtMethod* method_p,
825 uint32_t dex_pc,
826 art::ArtField* field_p,
827 art::Handle<art::mirror::Object> new_val)
828 REQUIRES_SHARED(art::Locks::mutator_lock_) override {
829 if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
830 art::JNIEnvExt* jnienv = self->GetJniEnv();
831 art::StackReflectiveHandleScope<1, 1> rhs(self);
832 art::ReflectiveHandle<art::ArtField> field(rhs.NewHandle(field_p));
833 art::ReflectiveHandle<art::ArtMethod> method(rhs.NewHandle(method_p));
834 // DCHECK(!self->IsExceptionPending());
835 ScopedLocalRef<jobject> this_ref(jnienv, AddLocalRef<jobject>(jnienv, this_object.Get()));
836 ScopedLocalRef<jobject> fklass(jnienv,
837 AddLocalRef<jobject>(jnienv,
838 field->GetDeclaringClass().Ptr()));
839 ScopedLocalRef<jobject> fval(jnienv, AddLocalRef<jobject>(jnienv, new_val.Get()));
840 jvalue val;
841 val.l = fval.get();
842 RunEventCallback<ArtJvmtiEvent::kFieldModification>(
843 event_handler_,
844 self,
845 jnienv,
846 art::jni::EncodeArtMethod(method),
847 static_cast<jlocation>(dex_pc),
848 static_cast<jclass>(fklass.get()),
849 field->IsStatic() ? nullptr : this_ref.get(),
850 art::jni::EncodeArtField(field),
851 'L', // type_char
852 val);
853 }
854 }
855
856 // Call-back for when we write into a field.
FieldWritten(art::Thread * self,art::Handle<art::mirror::Object> this_object,art::ArtMethod * method_p,uint32_t dex_pc,art::ArtField * field_p,const art::JValue & field_value)857 void FieldWritten(art::Thread* self,
858 art::Handle<art::mirror::Object> this_object,
859 art::ArtMethod* method_p,
860 uint32_t dex_pc,
861 art::ArtField* field_p,
862 const art::JValue& field_value)
863 REQUIRES_SHARED(art::Locks::mutator_lock_) override {
864 if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
865 art::JNIEnvExt* jnienv = self->GetJniEnv();
866 art::StackReflectiveHandleScope<1, 1> rhs(self);
867 art::ReflectiveHandle<art::ArtField> field(rhs.NewHandle(field_p));
868 art::ReflectiveHandle<art::ArtMethod> method(rhs.NewHandle(method_p));
869 DCHECK(!self->IsExceptionPending());
870 ScopedLocalRef<jobject> this_ref(jnienv, AddLocalRef<jobject>(jnienv, this_object.Get()));
871 ScopedLocalRef<jobject> fklass(jnienv,
872 AddLocalRef<jobject>(jnienv,
873 field->GetDeclaringClass().Ptr()));
874 char type_char = art::Primitive::Descriptor(field->GetTypeAsPrimitiveType())[0];
875 jvalue val;
876 // 64bit integer is the largest value in the union so we should be fine simply copying it into
877 // the union.
878 val.j = field_value.GetJ();
879 RunEventCallback<ArtJvmtiEvent::kFieldModification>(
880 event_handler_,
881 self,
882 jnienv,
883 art::jni::EncodeArtMethod(method),
884 static_cast<jlocation>(dex_pc),
885 static_cast<jclass>(fklass.get()),
886 field->IsStatic() ? nullptr : this_ref.get(), // nb static field modification get given
887 // the class as this_object for some
888 // reason.
889 art::jni::EncodeArtField(field),
890 type_char,
891 val);
892 }
893 }
894
WatchedFramePop(art::Thread * self,const art::ShadowFrame & frame)895 void WatchedFramePop(art::Thread* self, const art::ShadowFrame& frame)
896 REQUIRES_SHARED(art::Locks::mutator_lock_) override {
897 art::JNIEnvExt* jnienv = self->GetJniEnv();
898 // Remove the force-interpreter added by the WatchFrame.
899 {
900 art::MutexLock mu(self, *art::Locks::thread_list_lock_);
901 CHECK_GT(self->ForceInterpreterCount(), 0u);
902 self->DecrementForceInterpreterCount();
903 }
904 jboolean is_exception_pending = self->IsExceptionPending();
905 RunEventCallback<ArtJvmtiEvent::kFramePop>(
906 event_handler_,
907 self,
908 jnienv,
909 art::jni::EncodeArtMethod(frame.GetMethod()),
910 is_exception_pending,
911 &frame);
912 }
913
FindCatchMethodsFromThrow(art::Thread * self,art::Handle<art::mirror::Throwable> exception,art::ArtMethod ** out_method,uint32_t * dex_pc)914 static void FindCatchMethodsFromThrow(art::Thread* self,
915 art::Handle<art::mirror::Throwable> exception,
916 /*out*/ art::ArtMethod** out_method,
917 /*out*/ uint32_t* dex_pc)
918 REQUIRES_SHARED(art::Locks::mutator_lock_) {
919 // Finds the location where this exception will most likely be caught. We ignore intervening
920 // native frames (which could catch the exception) and return the closest java frame with a
921 // compatible catch statement.
922 class CatchLocationFinder final : public art::StackVisitor {
923 public:
924 CatchLocationFinder(art::Thread* target,
925 art::Handle<art::mirror::Class> exception_class,
926 art::Context* context,
927 /*out*/ art::ArtMethod** out_catch_method,
928 /*out*/ uint32_t* out_catch_pc)
929 REQUIRES_SHARED(art::Locks::mutator_lock_)
930 : StackVisitor(target, context, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
931 exception_class_(exception_class),
932 catch_method_ptr_(out_catch_method),
933 catch_dex_pc_ptr_(out_catch_pc) {}
934
935 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
936 art::ArtMethod* method = GetMethod();
937 DCHECK(method != nullptr);
938 if (method->IsRuntimeMethod()) {
939 return true;
940 }
941
942 if (!method->IsNative()) {
943 uint32_t cur_dex_pc = GetDexPc();
944 if (cur_dex_pc == art::dex::kDexNoIndex) {
945 // This frame looks opaque. Just keep on going.
946 return true;
947 }
948 bool has_no_move_exception = false;
949 uint32_t found_dex_pc = method->FindCatchBlock(
950 exception_class_, cur_dex_pc, &has_no_move_exception);
951 if (found_dex_pc != art::dex::kDexNoIndex) {
952 // We found the catch. Store the result and return.
953 *catch_method_ptr_ = method;
954 *catch_dex_pc_ptr_ = found_dex_pc;
955 return false;
956 }
957 }
958 return true;
959 }
960
961 private:
962 art::Handle<art::mirror::Class> exception_class_;
963 art::ArtMethod** catch_method_ptr_;
964 uint32_t* catch_dex_pc_ptr_;
965
966 DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
967 };
968
969 art::StackHandleScope<1> hs(self);
970 *out_method = nullptr;
971 *dex_pc = 0;
972 std::unique_ptr<art::Context> context(art::Context::Create());
973
974 CatchLocationFinder clf(self,
975 hs.NewHandle(exception->GetClass()),
976 context.get(),
977 /*out*/ out_method,
978 /*out*/ dex_pc);
979 clf.WalkStack(/* include_transitions= */ false);
980 }
981
982 // Call-back when an exception is thrown.
ExceptionThrown(art::Thread * self,art::Handle<art::mirror::Throwable> exception_object)983 void ExceptionThrown(art::Thread* self, art::Handle<art::mirror::Throwable> exception_object)
984 REQUIRES_SHARED(art::Locks::mutator_lock_) override {
985 DCHECK(self->IsExceptionThrownByCurrentMethod(exception_object.Get()));
986 // The instrumentation events get rid of this for us.
987 DCHECK(!self->IsExceptionPending());
988 if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kException)) {
989 art::JNIEnvExt* jnienv = self->GetJniEnv();
990 art::ArtMethod* catch_method;
991 uint32_t catch_pc;
992 FindCatchMethodsFromThrow(self, exception_object, &catch_method, &catch_pc);
993 uint32_t dex_pc = 0;
994 art::ArtMethod* method = self->GetCurrentMethod(&dex_pc,
995 /* check_suspended= */ true,
996 /* abort_on_error= */ art::kIsDebugBuild);
997 ScopedLocalRef<jobject> exception(jnienv,
998 AddLocalRef<jobject>(jnienv, exception_object.Get()));
999 RunEventCallback<ArtJvmtiEvent::kException>(
1000 event_handler_,
1001 self,
1002 jnienv,
1003 art::jni::EncodeArtMethod(method),
1004 static_cast<jlocation>(dex_pc),
1005 exception.get(),
1006 art::jni::EncodeArtMethod(catch_method),
1007 static_cast<jlocation>(catch_pc));
1008 }
1009 return;
1010 }
1011
1012 // Call-back when an exception is handled.
ExceptionHandled(art::Thread * self,art::Handle<art::mirror::Throwable> exception_object)1013 void ExceptionHandled(art::Thread* self, art::Handle<art::mirror::Throwable> exception_object)
1014 REQUIRES_SHARED(art::Locks::mutator_lock_) override {
1015 // Since the exception has already been handled there shouldn't be one pending.
1016 DCHECK(!self->IsExceptionPending());
1017 if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kExceptionCatch)) {
1018 art::JNIEnvExt* jnienv = self->GetJniEnv();
1019 uint32_t dex_pc;
1020 art::ArtMethod* method = self->GetCurrentMethod(&dex_pc,
1021 /* check_suspended= */ true,
1022 /* abort_on_error= */ art::kIsDebugBuild);
1023 ScopedLocalRef<jobject> exception(jnienv,
1024 AddLocalRef<jobject>(jnienv, exception_object.Get()));
1025 RunEventCallback<ArtJvmtiEvent::kExceptionCatch>(
1026 event_handler_,
1027 self,
1028 jnienv,
1029 art::jni::EncodeArtMethod(method),
1030 static_cast<jlocation>(dex_pc),
1031 exception.get());
1032 }
1033 return;
1034 }
1035
1036 // Call-back for when we execute a branch.
Branch(art::Thread * self ATTRIBUTE_UNUSED,art::ArtMethod * method ATTRIBUTE_UNUSED,uint32_t dex_pc ATTRIBUTE_UNUSED,int32_t dex_pc_offset ATTRIBUTE_UNUSED)1037 void Branch(art::Thread* self ATTRIBUTE_UNUSED,
1038 art::ArtMethod* method ATTRIBUTE_UNUSED,
1039 uint32_t dex_pc ATTRIBUTE_UNUSED,
1040 int32_t dex_pc_offset ATTRIBUTE_UNUSED)
1041 REQUIRES_SHARED(art::Locks::mutator_lock_) override {
1042 return;
1043 }
1044
1045 private:
1046 struct NonStandardExitEventInfo {
1047 // if non-null is a GlobalReference to the returned value.
1048 jobject return_val_obj_;
1049 // The return-value to be passed to the MethodExit event.
1050 jvalue return_val_;
1051 };
1052
1053 EventHandler* const event_handler_;
1054
1055 mutable art::Mutex non_standard_exits_lock_
1056 ACQUIRED_BEFORE(art::Locks::instrument_entrypoints_lock_);
1057
1058 std::unordered_map<const art::ShadowFrame*, NonStandardExitEventInfo> non_standard_exits_
1059 GUARDED_BY(non_standard_exits_lock_);
1060 };
1061
GetInstrumentationEventsFor(ArtJvmtiEvent event)1062 uint32_t EventHandler::GetInstrumentationEventsFor(ArtJvmtiEvent event) {
1063 switch (event) {
1064 case ArtJvmtiEvent::kMethodEntry:
1065 return art::instrumentation::Instrumentation::kMethodEntered;
1066 case ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue:
1067 // TODO We want to do this but supporting only having a single one is difficult.
1068 // return art::instrumentation::Instrumentation::kMethodExited;
1069 case ArtJvmtiEvent::kMethodExit: {
1070 DCHECK(event == ArtJvmtiEvent::kMethodExit ||
1071 event == ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue)
1072 << "event = " << static_cast<uint32_t>(event);
1073 ArtJvmtiEvent other = event == ArtJvmtiEvent::kMethodExit
1074 ? ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue
1075 : ArtJvmtiEvent::kMethodExit;
1076 if (LIKELY(!IsEventEnabledAnywhere(other))) {
1077 return art::instrumentation::Instrumentation::kMethodExited |
1078 art::instrumentation::Instrumentation::kMethodUnwind;
1079 } else {
1080 // The event needs to be kept around/is already enabled by the other jvmti event that uses
1081 // the same instrumentation event.
1082 return 0u;
1083 }
1084 }
1085 case ArtJvmtiEvent::kFieldModification:
1086 return art::instrumentation::Instrumentation::kFieldWritten;
1087 case ArtJvmtiEvent::kFieldAccess:
1088 return art::instrumentation::Instrumentation::kFieldRead;
1089 case ArtJvmtiEvent::kBreakpoint:
1090 case ArtJvmtiEvent::kSingleStep: {
1091 // Need to skip adding the listeners if the event is breakpoint/single-step since those events
1092 // share the same art-instrumentation underlying event. We need to give them their own deopt
1093 // request though so the test waits until here.
1094 DCHECK(event == ArtJvmtiEvent::kBreakpoint || event == ArtJvmtiEvent::kSingleStep);
1095 ArtJvmtiEvent other = event == ArtJvmtiEvent::kBreakpoint ? ArtJvmtiEvent::kSingleStep
1096 : ArtJvmtiEvent::kBreakpoint;
1097 if (LIKELY(!IsEventEnabledAnywhere(other))) {
1098 return art::instrumentation::Instrumentation::kDexPcMoved;
1099 } else {
1100 // The event needs to be kept around/is already enabled by the other jvmti event that uses
1101 // the same instrumentation event.
1102 return 0u;
1103 }
1104 }
1105 case ArtJvmtiEvent::kFramePop:
1106 return art::instrumentation::Instrumentation::kWatchedFramePop;
1107 case ArtJvmtiEvent::kException:
1108 return art::instrumentation::Instrumentation::kExceptionThrown;
1109 case ArtJvmtiEvent::kExceptionCatch:
1110 return art::instrumentation::Instrumentation::kExceptionHandled;
1111 default:
1112 LOG(FATAL) << "Unknown event ";
1113 UNREACHABLE();
1114 }
1115 }
1116
1117 enum class DeoptRequirement {
1118 // No deoptimization work required.
1119 kNone,
1120 // Limited/no deopt required.
1121 kLimited,
1122 // A single thread must be put into interpret only.
1123 kThread,
1124 // All methods and all threads deopted.
1125 kFull,
1126 };
1127
GetDeoptRequirement(ArtJvmtiEvent event,jthread thread)1128 static DeoptRequirement GetDeoptRequirement(ArtJvmtiEvent event, jthread thread) {
1129 switch (event) {
1130 case ArtJvmtiEvent::kBreakpoint:
1131 case ArtJvmtiEvent::kException:
1132 case ArtJvmtiEvent::kMethodEntry:
1133 case ArtJvmtiEvent::kMethodExit:
1134 return DeoptRequirement::kLimited;
1135 case ArtJvmtiEvent::kExceptionCatch:
1136 return DeoptRequirement::kFull;
1137 case ArtJvmtiEvent::kFieldModification:
1138 case ArtJvmtiEvent::kFieldAccess:
1139 case ArtJvmtiEvent::kSingleStep:
1140 case ArtJvmtiEvent::kFramePop:
1141 case ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue:
1142 return thread == nullptr ? DeoptRequirement::kFull : DeoptRequirement::kThread;
1143 case ArtJvmtiEvent::kVmInit:
1144 case ArtJvmtiEvent::kVmDeath:
1145 case ArtJvmtiEvent::kThreadStart:
1146 case ArtJvmtiEvent::kThreadEnd:
1147 case ArtJvmtiEvent::kClassFileLoadHookNonRetransformable:
1148 case ArtJvmtiEvent::kClassLoad:
1149 case ArtJvmtiEvent::kClassPrepare:
1150 case ArtJvmtiEvent::kVmStart:
1151 case ArtJvmtiEvent::kNativeMethodBind:
1152 case ArtJvmtiEvent::kCompiledMethodLoad:
1153 case ArtJvmtiEvent::kCompiledMethodUnload:
1154 case ArtJvmtiEvent::kDynamicCodeGenerated:
1155 case ArtJvmtiEvent::kDataDumpRequest:
1156 case ArtJvmtiEvent::kMonitorWait:
1157 case ArtJvmtiEvent::kMonitorWaited:
1158 case ArtJvmtiEvent::kMonitorContendedEnter:
1159 case ArtJvmtiEvent::kMonitorContendedEntered:
1160 case ArtJvmtiEvent::kResourceExhausted:
1161 case ArtJvmtiEvent::kGarbageCollectionStart:
1162 case ArtJvmtiEvent::kGarbageCollectionFinish:
1163 case ArtJvmtiEvent::kObjectFree:
1164 case ArtJvmtiEvent::kVmObjectAlloc:
1165 case ArtJvmtiEvent::kClassFileLoadHookRetransformable:
1166 case ArtJvmtiEvent::kDdmPublishChunk:
1167 case ArtJvmtiEvent::kObsoleteObjectCreated:
1168 case ArtJvmtiEvent::kStructuralDexFileLoadHook:
1169 return DeoptRequirement::kNone;
1170 }
1171 }
1172
HandleEventDeopt(ArtJvmtiEvent event,jthread thread,bool enable)1173 jvmtiError EventHandler::HandleEventDeopt(ArtJvmtiEvent event, jthread thread, bool enable) {
1174 DeoptRequirement deopt_req = GetDeoptRequirement(event, thread);
1175 // Make sure we can deopt.
1176 if (deopt_req != DeoptRequirement::kNone) {
1177 art::ScopedObjectAccess soa(art::Thread::Current());
1178 DeoptManager* deopt_manager = DeoptManager::Get();
1179 jvmtiError err = OK;
1180 if (enable) {
1181 deopt_manager->AddDeoptimizationRequester();
1182 switch (deopt_req) {
1183 case DeoptRequirement::kFull:
1184 deopt_manager->AddDeoptimizeAllMethods();
1185 break;
1186 case DeoptRequirement::kThread:
1187 err = deopt_manager->AddDeoptimizeThreadMethods(soa, thread);
1188 break;
1189 default:
1190 break;
1191 }
1192 if (err != OK) {
1193 deopt_manager->RemoveDeoptimizationRequester();
1194 return err;
1195 }
1196 } else {
1197 switch (deopt_req) {
1198 case DeoptRequirement::kFull:
1199 deopt_manager->RemoveDeoptimizeAllMethods();
1200 break;
1201 case DeoptRequirement::kThread:
1202 err = deopt_manager->RemoveDeoptimizeThreadMethods(soa, thread);
1203 break;
1204 default:
1205 break;
1206 }
1207 deopt_manager->RemoveDeoptimizationRequester();
1208 if (err != OK) {
1209 return err;
1210 }
1211 }
1212 }
1213 return OK;
1214 }
1215
SetupTraceListener(JvmtiMethodTraceListener * listener,ArtJvmtiEvent event,bool enable)1216 void EventHandler::SetupTraceListener(JvmtiMethodTraceListener* listener,
1217 ArtJvmtiEvent event,
1218 bool enable) {
1219 // Add the actual listeners.
1220 uint32_t new_events = GetInstrumentationEventsFor(event);
1221 if (new_events == 0) {
1222 return;
1223 }
1224 art::ScopedThreadStateChange stsc(art::Thread::Current(), art::ThreadState::kNative);
1225 art::instrumentation::Instrumentation* instr = art::Runtime::Current()->GetInstrumentation();
1226 art::ScopedSuspendAll ssa("jvmti method tracing installation");
1227 if (enable) {
1228 instr->AddListener(listener, new_events);
1229 } else {
1230 instr->RemoveListener(listener, new_events);
1231 }
1232 return;
1233 }
1234
1235 // Makes sure that all compiled methods are AsyncDeoptimizable so we can deoptimize (and force to
1236 // the switch interpreter) when we try to get or set a local variable.
HandleLocalAccessCapabilityAdded()1237 void EventHandler::HandleLocalAccessCapabilityAdded() {
1238 class UpdateEntryPointsClassVisitor : public art::ClassVisitor {
1239 public:
1240 explicit UpdateEntryPointsClassVisitor(art::Runtime* runtime)
1241 : runtime_(runtime) {}
1242
1243 bool operator()(art::ObjPtr<art::mirror::Class> klass)
1244 override REQUIRES(art::Locks::mutator_lock_) {
1245 if (!klass->IsLoaded()) {
1246 // Skip classes that aren't loaded since they might not have fully allocated and initialized
1247 // their methods. Furthemore since the jvmti-plugin must have been loaded by this point
1248 // these methods will definitately be using debuggable code.
1249 return true;
1250 }
1251 for (auto& m : klass->GetMethods(art::kRuntimePointerSize)) {
1252 const void* code = m.GetEntryPointFromQuickCompiledCode();
1253 if (m.IsNative() || m.IsProxyMethod() || !m.IsInvokable()) {
1254 continue;
1255 } else if (!runtime_->GetClassLinker()->IsQuickToInterpreterBridge(code) &&
1256 !runtime_->IsAsyncDeoptimizeable(&m, reinterpret_cast<uintptr_t>(code))) {
1257 runtime_->GetInstrumentation()->InitializeMethodsCode(&m, /*aot_code=*/ nullptr);
1258 }
1259 }
1260 return true;
1261 }
1262
1263 private:
1264 art::Runtime* runtime_;
1265 };
1266 art::ScopedObjectAccess soa(art::Thread::Current());
1267 UpdateEntryPointsClassVisitor visitor(art::Runtime::Current());
1268 art::Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
1269 }
1270
OtherMonitorEventsEnabledAnywhere(ArtJvmtiEvent event)1271 bool EventHandler::OtherMonitorEventsEnabledAnywhere(ArtJvmtiEvent event) {
1272 std::array<ArtJvmtiEvent, 4> events {
1273 {
1274 ArtJvmtiEvent::kMonitorContendedEnter,
1275 ArtJvmtiEvent::kMonitorContendedEntered,
1276 ArtJvmtiEvent::kMonitorWait,
1277 ArtJvmtiEvent::kMonitorWaited
1278 }
1279 };
1280 for (ArtJvmtiEvent e : events) {
1281 if (e != event && IsEventEnabledAnywhere(e)) {
1282 return true;
1283 }
1284 }
1285 return false;
1286 }
1287
SetupFramePopTraceListener(bool enable)1288 void EventHandler::SetupFramePopTraceListener(bool enable) {
1289 if (enable) {
1290 frame_pop_enabled = true;
1291 SetupTraceListener(method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, enable);
1292 } else {
1293 // remove the listener if we have no outstanding frames.
1294 {
1295 art::ReaderMutexLock mu(art::Thread::Current(), envs_lock_);
1296 for (ArtJvmTiEnv *env : envs) {
1297 art::ReaderMutexLock event_mu(art::Thread::Current(), env->event_info_mutex_);
1298 if (!env->notify_frames.empty()) {
1299 // Leaving FramePop listener since there are unsent FramePop events.
1300 return;
1301 }
1302 }
1303 frame_pop_enabled = false;
1304 }
1305 SetupTraceListener(method_trace_listener_.get(), ArtJvmtiEvent::kFramePop, enable);
1306 }
1307 }
1308
1309 // Handle special work for the given event type, if necessary.
HandleEventType(ArtJvmtiEvent event,bool enable)1310 void EventHandler::HandleEventType(ArtJvmtiEvent event, bool enable) {
1311 switch (event) {
1312 case ArtJvmtiEvent::kDdmPublishChunk:
1313 SetupDdmTracking(ddm_listener_.get(), enable);
1314 return;
1315 case ArtJvmtiEvent::kVmObjectAlloc:
1316 SetupObjectAllocationTracking(enable);
1317 return;
1318 case ArtJvmtiEvent::kGarbageCollectionStart:
1319 case ArtJvmtiEvent::kGarbageCollectionFinish:
1320 SetupGcPauseTracking(gc_pause_listener_.get(), event, enable);
1321 return;
1322 // FramePop can never be disabled once it's been turned on if it was turned off with outstanding
1323 // pop-events since we would either need to deal with dangling pointers or have missed events.
1324 case ArtJvmtiEvent::kFramePop:
1325 if (enable && frame_pop_enabled) {
1326 // The frame-pop event was held on by pending events so we don't need to do anything.
1327 } else {
1328 SetupFramePopTraceListener(enable);
1329 }
1330 return;
1331 case ArtJvmtiEvent::kMethodEntry:
1332 case ArtJvmtiEvent::kMethodExit:
1333 case ArtJvmtiEvent::kFieldAccess:
1334 case ArtJvmtiEvent::kFieldModification:
1335 case ArtJvmtiEvent::kException:
1336 case ArtJvmtiEvent::kExceptionCatch:
1337 case ArtJvmtiEvent::kBreakpoint:
1338 case ArtJvmtiEvent::kSingleStep:
1339 case ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue:
1340 SetupTraceListener(method_trace_listener_.get(), event, enable);
1341 return;
1342 case ArtJvmtiEvent::kMonitorContendedEnter:
1343 case ArtJvmtiEvent::kMonitorContendedEntered:
1344 case ArtJvmtiEvent::kMonitorWait:
1345 case ArtJvmtiEvent::kMonitorWaited:
1346 if (!OtherMonitorEventsEnabledAnywhere(event)) {
1347 SetupMonitorListener(monitor_listener_.get(), park_listener_.get(), enable);
1348 }
1349 return;
1350 default:
1351 break;
1352 }
1353 return;
1354 }
1355
1356 // Checks to see if the env has the capabilities associated with the given event.
HasAssociatedCapability(ArtJvmTiEnv * env,ArtJvmtiEvent event)1357 static bool HasAssociatedCapability(ArtJvmTiEnv* env,
1358 ArtJvmtiEvent event) {
1359 jvmtiCapabilities caps = env->capabilities;
1360 switch (event) {
1361 case ArtJvmtiEvent::kBreakpoint:
1362 return caps.can_generate_breakpoint_events == 1;
1363
1364 case ArtJvmtiEvent::kCompiledMethodLoad:
1365 case ArtJvmtiEvent::kCompiledMethodUnload:
1366 return caps.can_generate_compiled_method_load_events == 1;
1367
1368 case ArtJvmtiEvent::kException:
1369 case ArtJvmtiEvent::kExceptionCatch:
1370 return caps.can_generate_exception_events == 1;
1371
1372 case ArtJvmtiEvent::kFieldAccess:
1373 return caps.can_generate_field_access_events == 1;
1374
1375 case ArtJvmtiEvent::kFieldModification:
1376 return caps.can_generate_field_modification_events == 1;
1377
1378 case ArtJvmtiEvent::kFramePop:
1379 return caps.can_generate_frame_pop_events == 1;
1380
1381 case ArtJvmtiEvent::kGarbageCollectionStart:
1382 case ArtJvmtiEvent::kGarbageCollectionFinish:
1383 return caps.can_generate_garbage_collection_events == 1;
1384
1385 case ArtJvmtiEvent::kMethodEntry:
1386 return caps.can_generate_method_entry_events == 1;
1387
1388 case ArtJvmtiEvent::kMethodExit:
1389 return caps.can_generate_method_exit_events == 1;
1390
1391 case ArtJvmtiEvent::kMonitorContendedEnter:
1392 case ArtJvmtiEvent::kMonitorContendedEntered:
1393 case ArtJvmtiEvent::kMonitorWait:
1394 case ArtJvmtiEvent::kMonitorWaited:
1395 return caps.can_generate_monitor_events == 1;
1396
1397 case ArtJvmtiEvent::kNativeMethodBind:
1398 return caps.can_generate_native_method_bind_events == 1;
1399
1400 case ArtJvmtiEvent::kObjectFree:
1401 return caps.can_generate_object_free_events == 1;
1402
1403 case ArtJvmtiEvent::kSingleStep:
1404 return caps.can_generate_single_step_events == 1;
1405
1406 case ArtJvmtiEvent::kVmObjectAlloc:
1407 return caps.can_generate_vm_object_alloc_events == 1;
1408
1409 default:
1410 return true;
1411 }
1412 }
1413
IsInternalEvent(ArtJvmtiEvent event)1414 static bool IsInternalEvent(ArtJvmtiEvent event) {
1415 return static_cast<uint32_t>(event) >=
1416 static_cast<uint32_t>(ArtJvmtiEvent::kMinInternalEventTypeVal);
1417 }
1418
SetInternalEvent(jthread thread,ArtJvmtiEvent event,jvmtiEventMode mode)1419 jvmtiError EventHandler::SetInternalEvent(jthread thread,
1420 ArtJvmtiEvent event,
1421 jvmtiEventMode mode) {
1422 CHECK(IsInternalEvent(event)) << static_cast<uint32_t>(event);
1423
1424 art::Thread* self = art::Thread::Current();
1425 art::Thread* target = nullptr;
1426 ScopedNoUserCodeSuspension snucs(self);
1427 // The overall state across all threads and jvmtiEnvs. This is used to control the state of the
1428 // instrumentation handlers since we only want each added once.
1429 bool old_state;
1430 bool new_state;
1431 // The state for just the current 'thread' (including null) across all jvmtiEnvs. This is used to
1432 // control the deoptimization state since we do refcounting for that and need to perform different
1433 // actions depending on if the event is limited to a single thread or global.
1434 bool old_thread_state;
1435 bool new_thread_state;
1436 {
1437 // From now on we know we cannot get suspended by user-code.
1438 // NB This does a SuspendCheck (during thread state change) so we need to
1439 // make sure we don't have the 'suspend_lock' locked here.
1440 art::ScopedObjectAccess soa(self);
1441 art::WriterMutexLock el_mu(self, envs_lock_);
1442 art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
1443 jvmtiError err = ERR(INTERNAL);
1444 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1445 return err;
1446 } else if (target->IsStillStarting() || target->GetState() == art::ThreadState::kStarting) {
1447 target->Dump(LOG_STREAM(WARNING) << "Is not alive: ");
1448 return ERR(THREAD_NOT_ALIVE);
1449 }
1450
1451 // Make sure we have a valid jthread to pass to deopt-manager.
1452 ScopedLocalRef<jthread> thread_lr(
1453 soa.Env(), thread != nullptr ? nullptr : soa.AddLocalReference<jthread>(target->GetPeer()));
1454 if (thread == nullptr) {
1455 thread = thread_lr.get();
1456 }
1457 CHECK(thread != nullptr);
1458
1459 {
1460 DCHECK_GE(GetInternalEventRefcount(event) + (mode == JVMTI_ENABLE ? 1 : -1), 0)
1461 << "Refcount: " << GetInternalEventRefcount(event);
1462 DCHECK_GE(GetInternalEventThreadRefcount(event, target) + (mode == JVMTI_ENABLE ? 1 : -1), 0)
1463 << "Refcount: " << GetInternalEventThreadRefcount(event, target);
1464 DCHECK_GE(GetInternalEventRefcount(event), GetInternalEventThreadRefcount(event, target));
1465 old_state = GetInternalEventRefcount(event) > 0;
1466 old_thread_state = GetInternalEventThreadRefcount(event, target) > 0;
1467 if (mode == JVMTI_ENABLE) {
1468 new_state = IncrInternalEventRefcount(event) > 0;
1469 new_thread_state = IncrInternalEventThreadRefcount(event, target) > 0;
1470 } else {
1471 new_state = DecrInternalEventRefcount(event) > 0;
1472 new_thread_state = DecrInternalEventThreadRefcount(event, target) > 0;
1473 }
1474 if (old_state != new_state) {
1475 global_mask.Set(event, new_state);
1476 }
1477 }
1478 }
1479 // Handle any special work required for the event type. We still have the
1480 // user_code_suspend_count_lock_ so there won't be any interleaving here.
1481 if (new_state != old_state) {
1482 HandleEventType(event, mode == JVMTI_ENABLE);
1483 }
1484 if (old_thread_state != new_thread_state) {
1485 HandleEventDeopt(event, thread, new_thread_state);
1486 }
1487 return OK;
1488 }
1489
IsDirectlySettableEvent(ArtJvmtiEvent event)1490 static bool IsDirectlySettableEvent(ArtJvmtiEvent event) {
1491 return !IsInternalEvent(event);
1492 }
1493
EventIsNormal(ArtJvmtiEvent event)1494 static bool EventIsNormal(ArtJvmtiEvent event) {
1495 return EventMask::EventIsInRange(event) && IsDirectlySettableEvent(event);
1496 }
1497
SetEvent(ArtJvmTiEnv * env,jthread thread,ArtJvmtiEvent event,jvmtiEventMode mode)1498 jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
1499 jthread thread,
1500 ArtJvmtiEvent event,
1501 jvmtiEventMode mode) {
1502 if (mode != JVMTI_ENABLE && mode != JVMTI_DISABLE) {
1503 return ERR(ILLEGAL_ARGUMENT);
1504 }
1505
1506 if (!EventIsNormal(event)) {
1507 return ERR(INVALID_EVENT_TYPE);
1508 }
1509
1510 if (!HasAssociatedCapability(env, event)) {
1511 return ERR(MUST_POSSESS_CAPABILITY);
1512 }
1513
1514 if (thread != nullptr && !IsThreadControllable(event)) {
1515 return ERR(ILLEGAL_ARGUMENT);
1516 }
1517
1518 art::Thread* self = art::Thread::Current();
1519 art::Thread* target = nullptr;
1520 ScopedNoUserCodeSuspension snucs(self);
1521 // The overall state across all threads and jvmtiEnvs. This is used to control the state of the
1522 // instrumentation handlers since we only want each added once.
1523 bool old_state;
1524 bool new_state;
1525 // The state for just the current 'thread' (including null) across all jvmtiEnvs. This is used to
1526 // control the deoptimization state since we do refcounting for that and need to perform different
1527 // actions depending on if the event is limited to a single thread or global.
1528 bool old_thread_state;
1529 bool new_thread_state;
1530 {
1531 // From now on we know we cannot get suspended by user-code.
1532 // NB This does a SuspendCheck (during thread state change) so we need to
1533 // make sure we don't have the 'suspend_lock' locked here.
1534 art::ScopedObjectAccess soa(self);
1535 art::WriterMutexLock el_mu(self, envs_lock_);
1536 art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
1537 jvmtiError err = ERR(INTERNAL);
1538 if (thread != nullptr) {
1539 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1540 return err;
1541 } else if (target->IsStillStarting() ||
1542 target->GetState() == art::ThreadState::kStarting) {
1543 target->Dump(LOG_STREAM(WARNING) << "Is not alive: ");
1544 return ERR(THREAD_NOT_ALIVE);
1545 }
1546 }
1547
1548
1549 art::WriterMutexLock ei_mu(self, env->event_info_mutex_);
1550 old_thread_state = GetThreadEventState(event, target);
1551 old_state = global_mask.Test(event);
1552 if (mode == JVMTI_ENABLE) {
1553 env->event_masks.EnableEvent(env, target, event);
1554 global_mask.Set(event);
1555 new_state = true;
1556 new_thread_state = true;
1557 DCHECK(GetThreadEventState(event, target));
1558 } else {
1559 DCHECK_EQ(mode, JVMTI_DISABLE);
1560
1561 env->event_masks.DisableEvent(env, target, event);
1562 RecalculateGlobalEventMaskLocked(event);
1563 new_state = global_mask.Test(event);
1564 new_thread_state = GetThreadEventState(event, target);
1565 DCHECK(new_state || !new_thread_state);
1566 }
1567 }
1568 // Handle any special work required for the event type. We still have the
1569 // user_code_suspend_count_lock_ so there won't be any interleaving here.
1570 if (new_state != old_state) {
1571 HandleEventType(event, mode == JVMTI_ENABLE);
1572 }
1573 if (old_thread_state != new_thread_state) {
1574 return HandleEventDeopt(event, thread, new_thread_state);
1575 }
1576 return OK;
1577 }
1578
GetThreadEventState(ArtJvmtiEvent event,art::Thread * thread)1579 bool EventHandler::GetThreadEventState(ArtJvmtiEvent event, art::Thread* thread) {
1580 for (ArtJvmTiEnv* stored_env : envs) {
1581 if (stored_env == nullptr) {
1582 continue;
1583 }
1584 auto& masks = stored_env->event_masks;
1585 if (thread == nullptr && masks.global_event_mask.Test(event)) {
1586 return true;
1587 } else if (thread != nullptr) {
1588 EventMask* mask = masks.GetEventMaskOrNull(thread);
1589 if (mask != nullptr && mask->Test(event)) {
1590 return true;
1591 }
1592 }
1593 }
1594 return false;
1595 }
1596
HandleBreakpointEventsChanged(bool added)1597 void EventHandler::HandleBreakpointEventsChanged(bool added) {
1598 if (added) {
1599 DeoptManager::Get()->AddDeoptimizationRequester();
1600 } else {
1601 DeoptManager::Get()->RemoveDeoptimizationRequester();
1602 }
1603 }
1604
AddDelayedNonStandardExitEvent(const art::ShadowFrame * frame,bool is_object,jvalue val)1605 void EventHandler::AddDelayedNonStandardExitEvent(const art::ShadowFrame *frame,
1606 bool is_object,
1607 jvalue val) {
1608 method_trace_listener_->AddDelayedNonStandardExitEvent(frame, is_object, val);
1609 }
1610
GetInternalEventIndex(ArtJvmtiEvent event)1611 static size_t GetInternalEventIndex(ArtJvmtiEvent event) {
1612 CHECK(IsInternalEvent(event));
1613 return static_cast<size_t>(event) - static_cast<size_t>(ArtJvmtiEvent::kMinInternalEventTypeVal);
1614 }
1615
DecrInternalEventThreadRefcount(ArtJvmtiEvent event,art::Thread * target)1616 int32_t EventHandler::DecrInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target) {
1617 return --GetInternalEventThreadRefcount(event, target);
1618 }
1619
IncrInternalEventThreadRefcount(ArtJvmtiEvent event,art::Thread * target)1620 int32_t EventHandler::IncrInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target) {
1621 return ++GetInternalEventThreadRefcount(event, target);
1622 }
1623
GetInternalEventThreadRefcount(ArtJvmtiEvent event,art::Thread * target)1624 int32_t& EventHandler::GetInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target) {
1625 auto& refs = internal_event_thread_refcount_[GetInternalEventIndex(event)];
1626 UniqueThread target_ut{target, target->GetTid()};
1627 if (refs.find(target_ut) == refs.end()) {
1628 refs.insert({target_ut, 0});
1629 }
1630 return refs.at(target_ut);
1631 }
1632
DecrInternalEventRefcount(ArtJvmtiEvent event)1633 int32_t EventHandler::DecrInternalEventRefcount(ArtJvmtiEvent event) {
1634 return --internal_event_refcount_[GetInternalEventIndex(event)];
1635 }
1636
IncrInternalEventRefcount(ArtJvmtiEvent event)1637 int32_t EventHandler::IncrInternalEventRefcount(ArtJvmtiEvent event) {
1638 return ++internal_event_refcount_[GetInternalEventIndex(event)];
1639 }
1640
GetInternalEventRefcount(ArtJvmtiEvent event) const1641 int32_t EventHandler::GetInternalEventRefcount(ArtJvmtiEvent event) const {
1642 return internal_event_refcount_[GetInternalEventIndex(event)];
1643 }
1644
Shutdown()1645 void EventHandler::Shutdown() {
1646 // Need to remove the method_trace_listener_ if it's there.
1647 art::Thread* self = art::Thread::Current();
1648 art::gc::ScopedGCCriticalSection gcs(self,
1649 art::gc::kGcCauseInstrumentation,
1650 art::gc::kCollectorTypeInstrumentation);
1651 art::ScopedSuspendAll ssa("jvmti method tracing uninstallation");
1652 // Just remove every possible event.
1653 art::Runtime::Current()->GetInstrumentation()->RemoveListener(method_trace_listener_.get(), ~0);
1654 AllocationManager::Get()->RemoveAllocListener();
1655 }
1656
EventHandler()1657 EventHandler::EventHandler()
1658 : envs_lock_("JVMTI Environment List Lock", art::LockLevel::kPostMutatorTopLockLevel),
1659 frame_pop_enabled(false),
1660 internal_event_refcount_({0}) {
1661 alloc_listener_.reset(new JvmtiEventAllocationListener(this));
1662 AllocationManager::Get()->SetAllocListener(alloc_listener_.get());
1663 ddm_listener_.reset(new JvmtiDdmChunkListener(this));
1664 gc_pause_listener_.reset(new JvmtiGcPauseListener(this));
1665 method_trace_listener_.reset(new JvmtiMethodTraceListener(this));
1666 monitor_listener_.reset(new JvmtiMonitorListener(this));
1667 park_listener_.reset(new JvmtiParkListener(this));
1668 }
1669
~EventHandler()1670 EventHandler::~EventHandler() {
1671 }
1672
1673 } // namespace openjdkjvmti
1674