1 /* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32 #include "ti_stack.h"
33
34 #include <algorithm>
35 #include <initializer_list>
36 #include <list>
37 #include <unordered_map>
38 #include <vector>
39
40 #include "android-base/macros.h"
41 #include "android-base/thread_annotations.h"
42 #include "arch/context.h"
43 #include "art_field-inl.h"
44 #include "art_jvmti.h"
45 #include "art_method-inl.h"
46 #include "barrier.h"
47 #include "base/bit_utils.h"
48 #include "base/enums.h"
49 #include "base/locks.h"
50 #include "base/macros.h"
51 #include "base/mutex.h"
52 #include "deopt_manager.h"
53 #include "dex/code_item_accessors-inl.h"
54 #include "dex/dex_file.h"
55 #include "dex/dex_file_annotations.h"
56 #include "dex/dex_file_types.h"
57 #include "dex/dex_instruction-inl.h"
58 #include "dex/primitive.h"
59 #include "events.h"
60 #include "gc_root.h"
61 #include "handle_scope-inl.h"
62 #include "instrumentation.h"
63 #include "interpreter/shadow_frame-inl.h"
64 #include "interpreter/shadow_frame.h"
65 #include "jni/jni_env_ext.h"
66 #include "jni/jni_internal.h"
67 #include "jvalue-inl.h"
68 #include "jvalue.h"
69 #include "jvmti.h"
70 #include "mirror/class.h"
71 #include "mirror/dex_cache.h"
72 #include "nativehelper/scoped_local_ref.h"
73 #include "scoped_thread_state_change-inl.h"
74 #include "scoped_thread_state_change.h"
75 #include "stack.h"
76 #include "thread-current-inl.h"
77 #include "thread.h"
78 #include "thread_list.h"
79 #include "thread_pool.h"
80 #include "thread_state.h"
81 #include "ti_logging.h"
82 #include "ti_thread.h"
83 #include "well_known_classes-inl.h"
84
85 namespace openjdkjvmti {
86
87 template <typename FrameFn>
88 struct GetStackTraceVisitor : public art::StackVisitor {
GetStackTraceVisitoropenjdkjvmti::GetStackTraceVisitor89 GetStackTraceVisitor(art::Thread* thread_in,
90 size_t start_,
91 size_t stop_,
92 FrameFn fn_)
93 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
94 fn(fn_),
95 start(start_),
96 stop(stop_) {}
97 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
98 GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default;
99
VisitFrameopenjdkjvmti::GetStackTraceVisitor100 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
101 art::ArtMethod* m = GetMethod();
102 if (m->IsRuntimeMethod()) {
103 return true;
104 }
105
106 if (start == 0) {
107 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
108 jmethodID id = art::jni::EncodeArtMethod(m);
109
110 uint32_t dex_pc = GetDexPc(false);
111 jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
112
113 jvmtiFrameInfo info = { id, dex_location };
114 fn(info);
115
116 if (stop == 1) {
117 return false; // We're done.
118 } else if (stop > 0) {
119 stop--;
120 }
121 } else {
122 start--;
123 }
124
125 return true;
126 }
127
128 FrameFn fn;
129 size_t start;
130 size_t stop;
131 };
132
GetOrCreateShadowFrame(bool * created_frame)133 art::ShadowFrame* FindFrameAtDepthVisitor::GetOrCreateShadowFrame(bool* created_frame) {
134 art::ShadowFrame* cur = GetCurrentShadowFrame();
135 if (cur == nullptr) {
136 *created_frame = true;
137 art::ArtMethod* method = GetMethod();
138 const uint16_t num_regs = method->DexInstructionData().RegistersSize();
139 cur = GetThread()->FindOrCreateDebuggerShadowFrame(GetFrameId(),
140 num_regs,
141 method,
142 GetDexPc());
143 DCHECK(cur != nullptr);
144 } else {
145 *created_frame = false;
146 }
147 return cur;
148 }
149
150 template <typename FrameFn>
MakeStackTraceVisitor(art::Thread * thread_in,size_t start,size_t stop,FrameFn fn)151 GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
152 size_t start,
153 size_t stop,
154 FrameFn fn) {
155 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
156 }
157
158 struct GetStackTraceVectorClosure : public art::Closure {
159 public:
GetStackTraceVectorClosureopenjdkjvmti::GetStackTraceVectorClosure160 GetStackTraceVectorClosure(size_t start, size_t stop)
161 : start_input(start),
162 stop_input(stop),
163 start_result(0),
164 stop_result(0) {}
165
Runopenjdkjvmti::GetStackTraceVectorClosure166 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
167 auto frames_fn = [&](jvmtiFrameInfo info) {
168 frames.push_back(info);
169 };
170 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
171 visitor.WalkStack(/* include_transitions= */ false);
172
173 start_result = visitor.start;
174 stop_result = visitor.stop;
175 }
176
177 const size_t start_input;
178 const size_t stop_input;
179
180 std::vector<jvmtiFrameInfo> frames;
181 size_t start_result;
182 size_t stop_result;
183 };
184
TranslateFrameVector(const std::vector<jvmtiFrameInfo> & frames,jint start_depth,size_t start_result,jint max_frame_count,jvmtiFrameInfo * frame_buffer,jint * count_ptr)185 static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
186 jint start_depth,
187 size_t start_result,
188 jint max_frame_count,
189 jvmtiFrameInfo* frame_buffer,
190 jint* count_ptr) {
191 size_t collected_frames = frames.size();
192
193 // Assume we're here having collected something.
194 DCHECK_GT(max_frame_count, 0);
195
196 // Frames from the top.
197 if (start_depth >= 0) {
198 if (start_result != 0) {
199 // Not enough frames.
200 return ERR(ILLEGAL_ARGUMENT);
201 }
202 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
203 if (frames.size() > 0) {
204 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
205 }
206 *count_ptr = static_cast<jint>(frames.size());
207 return ERR(NONE);
208 }
209
210 // Frames from the bottom.
211 if (collected_frames < static_cast<size_t>(-start_depth)) {
212 return ERR(ILLEGAL_ARGUMENT);
213 }
214
215 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
216 memcpy(frame_buffer,
217 &frames.data()[collected_frames + start_depth],
218 count * sizeof(jvmtiFrameInfo));
219 *count_ptr = static_cast<jint>(count);
220 return ERR(NONE);
221 }
222
223 struct GetStackTraceDirectClosure : public art::Closure {
224 public:
GetStackTraceDirectClosureopenjdkjvmti::GetStackTraceDirectClosure225 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
226 : frame_buffer(frame_buffer_),
227 start_input(start),
228 stop_input(stop),
229 index(0) {
230 DCHECK_GE(start_input, 0u);
231 }
232
Runopenjdkjvmti::GetStackTraceDirectClosure233 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
234 auto frames_fn = [&](jvmtiFrameInfo info) {
235 frame_buffer[index] = info;
236 ++index;
237 };
238 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
239 visitor.WalkStack(/* include_transitions= */ false);
240 }
241
242 jvmtiFrameInfo* frame_buffer;
243
244 const size_t start_input;
245 const size_t stop_input;
246
247 size_t index = 0;
248 };
249
GetStackTrace(jvmtiEnv * jvmti_env,jthread java_thread,jint start_depth,jint max_frame_count,jvmtiFrameInfo * frame_buffer,jint * count_ptr)250 jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env,
251 jthread java_thread,
252 jint start_depth,
253 jint max_frame_count,
254 jvmtiFrameInfo* frame_buffer,
255 jint* count_ptr) {
256 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
257 // that the thread isn't dying on us.
258 art::ScopedObjectAccess soa(art::Thread::Current());
259 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
260
261 art::Thread* thread;
262 jvmtiError thread_error = ERR(INTERNAL);
263 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
264 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
265 return thread_error;
266 }
267 DCHECK(thread != nullptr);
268
269 art::ThreadState state = thread->GetState();
270 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
271 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
272 return ERR(THREAD_NOT_ALIVE);
273 }
274
275 if (max_frame_count < 0) {
276 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
277 return ERR(ILLEGAL_ARGUMENT);
278 }
279 if (frame_buffer == nullptr || count_ptr == nullptr) {
280 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
281 return ERR(NULL_POINTER);
282 }
283
284 if (max_frame_count == 0) {
285 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
286 *count_ptr = 0;
287 return ERR(NONE);
288 }
289
290 if (start_depth >= 0) {
291 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
292 GetStackTraceDirectClosure closure(frame_buffer,
293 static_cast<size_t>(start_depth),
294 static_cast<size_t>(max_frame_count));
295 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
296 if (!thread->RequestSynchronousCheckpoint(&closure)) {
297 return ERR(THREAD_NOT_ALIVE);
298 }
299 *count_ptr = static_cast<jint>(closure.index);
300 if (closure.index == 0) {
301 JVMTI_LOG(INFO, jvmti_env) << "The stack is not large enough for a start_depth of "
302 << start_depth << ".";
303 return ERR(ILLEGAL_ARGUMENT);
304 }
305 return ERR(NONE);
306 } else {
307 GetStackTraceVectorClosure closure(0, 0);
308 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
309 if (!thread->RequestSynchronousCheckpoint(&closure)) {
310 return ERR(THREAD_NOT_ALIVE);
311 }
312
313 return TranslateFrameVector(closure.frames,
314 start_depth,
315 closure.start_result,
316 max_frame_count,
317 frame_buffer,
318 count_ptr);
319 }
320 }
321
322 template <typename Data>
323 struct GetAllStackTracesVectorClosure : public art::Closure {
GetAllStackTracesVectorClosureopenjdkjvmti::GetAllStackTracesVectorClosure324 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
325 : barrier(0), stop_input(stop), data(data_) {}
326
Runopenjdkjvmti::GetAllStackTracesVectorClosure327 void Run(art::Thread* thread) override
328 REQUIRES_SHARED(art::Locks::mutator_lock_)
329 REQUIRES(!data->mutex) {
330 art::Thread* self = art::Thread::Current();
331 Work(thread, self);
332 barrier.Pass(self);
333 }
334
Workopenjdkjvmti::GetAllStackTracesVectorClosure335 void Work(art::Thread* thread, art::Thread* self)
336 REQUIRES_SHARED(art::Locks::mutator_lock_)
337 REQUIRES(!data->mutex) {
338 // Skip threads that are still starting.
339 if (thread->IsStillStarting()) {
340 return;
341 }
342
343 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
344 if (thread_frames == nullptr) {
345 return;
346 }
347
348 // Now collect the data.
349 auto frames_fn = [&](jvmtiFrameInfo info) {
350 thread_frames->push_back(info);
351 };
352 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
353 visitor.WalkStack(/* include_transitions= */ false);
354 }
355
356 art::Barrier barrier;
357 const size_t stop_input;
358 Data* data;
359 };
360
361 template <typename Data>
RunCheckpointAndWait(Data * data,size_t max_frame_count)362 static void RunCheckpointAndWait(Data* data, size_t max_frame_count)
363 REQUIRES_SHARED(art::Locks::mutator_lock_) {
364 // Note: requires the mutator lock as the checkpoint requires the mutator lock.
365 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
366 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
367 if (barrier_count == 0) {
368 return;
369 }
370 art::Thread* self = art::Thread::Current();
371 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
372 closure.barrier.Increment(self, barrier_count);
373 }
374
GetAllStackTraces(jvmtiEnv * env,jint max_frame_count,jvmtiStackInfo ** stack_info_ptr,jint * thread_count_ptr)375 jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
376 jint max_frame_count,
377 jvmtiStackInfo** stack_info_ptr,
378 jint* thread_count_ptr) {
379 if (max_frame_count < 0) {
380 return ERR(ILLEGAL_ARGUMENT);
381 }
382 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
383 return ERR(NULL_POINTER);
384 }
385
386 struct AllStackTracesData {
387 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
388 ~AllStackTracesData() {
389 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
390 for (jthread global_thread_ref : thread_peers) {
391 jni_env->DeleteGlobalRef(global_thread_ref);
392 }
393 }
394
395 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
396 REQUIRES_SHARED(art::Locks::mutator_lock_)
397 REQUIRES(!mutex) {
398 art::MutexLock mu(self, mutex);
399
400 threads.push_back(thread);
401
402 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
403 self, thread->GetPeerFromOtherThread());
404 thread_peers.push_back(peer);
405
406 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
407 return frames.back().get();
408 }
409
410 art::Mutex mutex;
411
412 // Storage. Only access directly after completion.
413
414 std::vector<art::Thread*> threads;
415 // "thread_peers" contains global references to their peers.
416 std::vector<jthread> thread_peers;
417
418 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
419 };
420
421 AllStackTracesData data;
422 art::Thread* current = art::Thread::Current();
423 {
424 art::ScopedObjectAccess soa(current);
425 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
426 }
427
428 // Convert the data into our output format.
429
430 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
431 // allocate one big chunk for this and the actual frames, which means we need
432 // to either be conservative or rearrange things later (the latter is implemented).
433 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
434 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
435 frame_infos.reserve(data.frames.size());
436
437 // Now run through and add data for each thread.
438 size_t sum_frames = 0;
439 for (size_t index = 0; index < data.frames.size(); ++index) {
440 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
441 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
442
443 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
444
445 // For the time being, set the thread to null. We'll fix it up in the second stage.
446 stack_info.thread = nullptr;
447 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
448
449 size_t collected_frames = thread_frames.size();
450 if (max_frame_count == 0 || collected_frames == 0) {
451 stack_info.frame_count = 0;
452 stack_info.frame_buffer = nullptr;
453 continue;
454 }
455 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
456
457 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
458 frame_infos.emplace_back(frame_info);
459
460 jint count;
461 jvmtiError translate_result = TranslateFrameVector(thread_frames,
462 0,
463 0,
464 static_cast<jint>(collected_frames),
465 frame_info,
466 &count);
467 DCHECK(translate_result == JVMTI_ERROR_NONE);
468 stack_info.frame_count = static_cast<jint>(collected_frames);
469 stack_info.frame_buffer = frame_info;
470 sum_frames += static_cast<size_t>(count);
471 }
472
473 // No errors, yet. Now put it all into an output buffer.
474 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
475 alignof(jvmtiFrameInfo));
476 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
477 unsigned char* chunk_data;
478 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
479 if (alloc_result != ERR(NONE)) {
480 return alloc_result;
481 }
482
483 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
484 // First copy in all the basic data.
485 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
486
487 // Now copy the frames and fix up the pointers.
488 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
489 chunk_data + rounded_stack_info_size);
490 for (size_t i = 0; i < data.frames.size(); ++i) {
491 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
492 jvmtiStackInfo& new_stack_info = stack_info[i];
493
494 // Translate the global ref into a local ref.
495 new_stack_info.thread =
496 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
497
498 if (old_stack_info.frame_count > 0) {
499 // Only copy when there's data - leave the nullptr alone.
500 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
501 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
502 new_stack_info.frame_buffer = frame_info;
503 frame_info += old_stack_info.frame_count;
504 }
505 }
506
507 *stack_info_ptr = stack_info;
508 *thread_count_ptr = static_cast<jint>(data.frames.size());
509
510 return ERR(NONE);
511 }
512
GetThreadListStackTraces(jvmtiEnv * env,jint thread_count,const jthread * thread_list,jint max_frame_count,jvmtiStackInfo ** stack_info_ptr)513 jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
514 jint thread_count,
515 const jthread* thread_list,
516 jint max_frame_count,
517 jvmtiStackInfo** stack_info_ptr) {
518 if (max_frame_count < 0) {
519 return ERR(ILLEGAL_ARGUMENT);
520 }
521 if (thread_count < 0) {
522 return ERR(ILLEGAL_ARGUMENT);
523 }
524 if (thread_count == 0) {
525 *stack_info_ptr = nullptr;
526 return ERR(NONE);
527 }
528 if (thread_list == nullptr || stack_info_ptr == nullptr) {
529 return ERR(NULL_POINTER);
530 }
531
532 art::Thread* current = art::Thread::Current();
533 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
534
535 struct SelectStackTracesData {
536 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
537
538 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
539 REQUIRES_SHARED(art::Locks::mutator_lock_)
540 REQUIRES(!mutex) {
541 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
542 for (size_t index = 0; index != handles.size(); ++index) {
543 if (peer == handles[index].Get()) {
544 // Found the thread.
545 art::MutexLock mu(self, mutex);
546
547 threads.push_back(thread);
548 thread_list_indices.push_back(index);
549
550 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
551 return frames.back().get();
552 }
553 }
554 return nullptr;
555 }
556
557 art::Mutex mutex;
558
559 // Selection data.
560
561 std::vector<art::Handle<art::mirror::Object>> handles;
562
563 // Storage. Only access directly after completion.
564
565 std::vector<art::Thread*> threads;
566 std::vector<size_t> thread_list_indices;
567
568 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
569 };
570
571 SelectStackTracesData data;
572
573 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
574 art::VariableSizedHandleScope hs(current);
575 for (jint i = 0; i != thread_count; ++i) {
576 if (thread_list[i] == nullptr) {
577 return ERR(INVALID_THREAD);
578 }
579 art::ObjPtr<art::mirror::Object> thread = soa.Decode<art::mirror::Object>(thread_list[i]);
580 if (!thread->InstanceOf(art::WellKnownClasses::java_lang_Thread.Get())) {
581 return ERR(INVALID_THREAD);
582 }
583 data.handles.push_back(hs.NewHandle(thread));
584 }
585
586 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
587
588 // Convert the data into our output format.
589
590 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
591 // allocate one big chunk for this and the actual frames, which means we need
592 // to either be conservative or rearrange things later (the latter is implemented).
593 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
594 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
595 frame_infos.reserve(data.frames.size());
596
597 // Now run through and add data for each thread.
598 size_t sum_frames = 0;
599 for (size_t index = 0; index < data.frames.size(); ++index) {
600 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
601 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
602
603 art::Thread* self = data.threads[index];
604 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
605
606 // For the time being, set the thread to null. We don't have good ScopedLocalRef
607 // infrastructure.
608 DCHECK(self->GetPeerFromOtherThread() != nullptr);
609 stack_info.thread = nullptr;
610 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
611
612 size_t collected_frames = thread_frames.size();
613 if (max_frame_count == 0 || collected_frames == 0) {
614 stack_info.frame_count = 0;
615 stack_info.frame_buffer = nullptr;
616 continue;
617 }
618 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
619
620 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
621 frame_infos.emplace_back(frame_info);
622
623 jint count;
624 jvmtiError translate_result = TranslateFrameVector(thread_frames,
625 0,
626 0,
627 static_cast<jint>(collected_frames),
628 frame_info,
629 &count);
630 DCHECK(translate_result == JVMTI_ERROR_NONE);
631 stack_info.frame_count = static_cast<jint>(collected_frames);
632 stack_info.frame_buffer = frame_info;
633 sum_frames += static_cast<size_t>(count);
634 }
635
636 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
637 // potentially.
638 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
639 alignof(jvmtiFrameInfo));
640 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
641 unsigned char* chunk_data;
642 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
643 if (alloc_result != ERR(NONE)) {
644 return alloc_result;
645 }
646
647 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
648 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
649 chunk_data + rounded_stack_info_size);
650
651 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
652 // Check whether we found a running thread for this.
653 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
654 // search. (The list is *not* sorted!)
655 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
656 if (it == data.thread_list_indices.end()) {
657 // No native thread. Must be new or dead. We need to fill out the stack info now.
658 // (Need to read the Java "started" field to know whether this is starting or terminated.)
659 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
660 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
661 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
662 CHECK(started_field != nullptr);
663 bool started = started_field->GetBoolean(peer) != 0;
664 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
665 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
666 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
667 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
668 stack_info[i].state = started ? kTerminatedState : kStartedState;
669 stack_info[i].frame_count = 0;
670 stack_info[i].frame_buffer = nullptr;
671 } else {
672 // Had a native thread and frames.
673 size_t f_index = it - data.thread_list_indices.begin();
674
675 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
676 jvmtiStackInfo& new_stack_info = stack_info[i];
677
678 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
679 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
680 if (old_stack_info.frame_count > 0) {
681 // Only copy when there's data - leave the nullptr alone.
682 size_t frames_size =
683 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
684 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
685 new_stack_info.frame_buffer = frame_info;
686 frame_info += old_stack_info.frame_count;
687 }
688 }
689 }
690
691 *stack_info_ptr = stack_info;
692
693 return ERR(NONE);
694 }
695
696 struct GetFrameCountClosure : public art::Closure {
697 public:
GetFrameCountClosureopenjdkjvmti::GetFrameCountClosure698 GetFrameCountClosure() : count(0) {}
699
Runopenjdkjvmti::GetFrameCountClosure700 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
701 // This is not StackVisitor::ComputeNumFrames, as runtime methods and transitions must not be
702 // counted.
703 art::StackVisitor::WalkStack(
704 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
705 art::ArtMethod* m = stack_visitor->GetMethod();
706 if (m != nullptr && !m->IsRuntimeMethod()) {
707 count++;
708 }
709 return true;
710 },
711 self,
712 /* context= */ nullptr,
713 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
714 }
715
716 size_t count;
717 };
718
GetFrameCount(jvmtiEnv * env ATTRIBUTE_UNUSED,jthread java_thread,jint * count_ptr)719 jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
720 jthread java_thread,
721 jint* count_ptr) {
722 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
723 // that the thread isn't dying on us.
724 art::ScopedObjectAccess soa(art::Thread::Current());
725 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
726
727 art::Thread* thread;
728 jvmtiError thread_error = ERR(INTERNAL);
729 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
730 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
731 return thread_error;
732 }
733
734 DCHECK(thread != nullptr);
735 art::ThreadState state = thread->GetState();
736 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
737 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
738 return ERR(THREAD_NOT_ALIVE);
739 }
740
741 if (count_ptr == nullptr) {
742 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
743 return ERR(NULL_POINTER);
744 }
745
746 GetFrameCountClosure closure;
747 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
748 if (!thread->RequestSynchronousCheckpoint(&closure)) {
749 return ERR(THREAD_NOT_ALIVE);
750 }
751
752 *count_ptr = closure.count;
753 return ERR(NONE);
754 }
755
756 struct GetLocationClosure : public art::Closure {
757 public:
GetLocationClosureopenjdkjvmti::GetLocationClosure758 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
759
Runopenjdkjvmti::GetLocationClosure760 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
761 // Walks up the stack 'n' callers.
762 size_t count = 0u;
763 art::StackVisitor::WalkStack(
764 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
765 art::ArtMethod* m = stack_visitor->GetMethod();
766 if (m != nullptr && !m->IsRuntimeMethod()) {
767 DCHECK(method == nullptr);
768 if (count == n) {
769 method = m;
770 dex_pc = stack_visitor->GetDexPc(/*abort_on_failure=*/false);
771 return false;
772 }
773 count++;
774 }
775 return true;
776 },
777 self,
778 /* context= */ nullptr,
779 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
780 }
781
782 const size_t n;
783 art::ArtMethod* method;
784 uint32_t dex_pc;
785 };
786
GetFrameLocation(jvmtiEnv * env ATTRIBUTE_UNUSED,jthread java_thread,jint depth,jmethodID * method_ptr,jlocation * location_ptr)787 jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
788 jthread java_thread,
789 jint depth,
790 jmethodID* method_ptr,
791 jlocation* location_ptr) {
792 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
793 // that the thread isn't dying on us.
794 art::ScopedObjectAccess soa(art::Thread::Current());
795 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
796
797 art::Thread* thread;
798 jvmtiError thread_error = ERR(INTERNAL);
799 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
800 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
801 return thread_error;
802 }
803 DCHECK(thread != nullptr);
804
805 art::ThreadState state = thread->GetState();
806 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
807 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
808 return ERR(THREAD_NOT_ALIVE);
809 }
810
811 if (depth < 0) {
812 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
813 return ERR(ILLEGAL_ARGUMENT);
814 }
815 if (method_ptr == nullptr || location_ptr == nullptr) {
816 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
817 return ERR(NULL_POINTER);
818 }
819
820 GetLocationClosure closure(static_cast<size_t>(depth));
821 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
822 if (!thread->RequestSynchronousCheckpoint(&closure)) {
823 return ERR(THREAD_NOT_ALIVE);
824 }
825
826 if (closure.method == nullptr) {
827 return ERR(NO_MORE_FRAMES);
828 }
829
830 *method_ptr = art::jni::EncodeArtMethod(closure.method);
831 if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
832 *location_ptr = -1;
833 } else {
834 if (closure.dex_pc == art::dex::kDexNoIndex) {
835 return ERR(INTERNAL);
836 }
837 *location_ptr = static_cast<jlocation>(closure.dex_pc);
838 }
839
840 return ERR(NONE);
841 }
842
843 struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
844 // We need a context because VisitLocks needs it retrieve the monitor objects.
845 explicit MonitorVisitor(art::Thread* thread)
REQUIRES_SHAREDopenjdkjvmti::MonitorVisitor846 REQUIRES_SHARED(art::Locks::mutator_lock_)
847 : art::StackVisitor(thread,
848 art::Context::Create(),
849 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
850 hs(art::Thread::Current()),
851 current_stack_depth(0) {}
852
~MonitorVisitoropenjdkjvmti::MonitorVisitor853 ~MonitorVisitor() {
854 delete context_;
855 }
856
VisitFrameopenjdkjvmti::MonitorVisitor857 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
858 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
859 if (!GetMethod()->IsRuntimeMethod()) {
860 art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
861 ++current_stack_depth;
862 }
863 return true;
864 }
865
AppendOwnedMonitorsopenjdkjvmti::MonitorVisitor866 static void AppendOwnedMonitors(art::ObjPtr<art::mirror::Object> owned_monitor, void* arg)
867 REQUIRES_SHARED(art::Locks::mutator_lock_) {
868 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
869 MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
870 // Filter out duplicates.
871 for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
872 if (monitor.Get() == owned_monitor) {
873 return;
874 }
875 }
876 visitor->monitors.push_back(visitor->hs.NewHandle(owned_monitor));
877 visitor->stack_depths.push_back(visitor->current_stack_depth);
878 }
879
VisitRootopenjdkjvmti::MonitorVisitor880 void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
881 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
882 for (const art::Handle<art::mirror::Object>& m : monitors) {
883 if (m.Get() == obj) {
884 return;
885 }
886 }
887 monitors.push_back(hs.NewHandle(obj));
888 stack_depths.push_back(-1);
889 }
890
891 art::VariableSizedHandleScope hs;
892 jint current_stack_depth;
893 std::vector<art::Handle<art::mirror::Object>> monitors;
894 std::vector<jint> stack_depths;
895 };
896
897 template<typename Fn>
898 struct MonitorInfoClosure : public art::Closure {
899 public:
MonitorInfoClosureopenjdkjvmti::MonitorInfoClosure900 explicit MonitorInfoClosure(Fn handle_results)
901 : err_(OK), handle_results_(handle_results) {}
902
Runopenjdkjvmti::MonitorInfoClosure903 void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
904 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
905 // Find the monitors on the stack.
906 MonitorVisitor visitor(target);
907 visitor.WalkStack(/* include_transitions= */ false);
908 // Find any other monitors, including ones acquired in native code.
909 art::RootInfo root_info(art::kRootVMInternal);
910 target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
911 err_ = handle_results_(visitor);
912 }
913
GetErroropenjdkjvmti::MonitorInfoClosure914 jvmtiError GetError() {
915 return err_;
916 }
917
918 private:
919 jvmtiError err_;
920 Fn handle_results_;
921 };
922
923
924 template <typename Fn>
GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable & soa,jthread thread,Fn handle_results)925 static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa,
926 jthread thread,
927 Fn handle_results)
928 REQUIRES_SHARED(art::Locks::mutator_lock_) {
929 art::Thread* self = art::Thread::Current();
930 MonitorInfoClosure<Fn> closure(handle_results);
931 bool called_method = false;
932 {
933 art::Locks::thread_list_lock_->ExclusiveLock(self);
934 art::Thread* target = nullptr;
935 jvmtiError err = ERR(INTERNAL);
936 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
937 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
938 return err;
939 }
940 if (target != self) {
941 called_method = true;
942 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
943 // Since this deals with object references we need to avoid going to sleep.
944 art::ScopedAssertNoThreadSuspension sants("Getting owned monitor usage");
945 if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) {
946 return ERR(THREAD_NOT_ALIVE);
947 }
948 } else {
949 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
950 }
951 }
952 // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
953 // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
954 // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
955 // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
956 if (!called_method) {
957 closure.Run(self);
958 }
959 return closure.GetError();
960 }
961
GetOwnedMonitorStackDepthInfo(jvmtiEnv * env,jthread thread,jint * info_cnt,jvmtiMonitorStackDepthInfo ** info_ptr)962 jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
963 jthread thread,
964 jint* info_cnt,
965 jvmtiMonitorStackDepthInfo** info_ptr) {
966 if (info_cnt == nullptr || info_ptr == nullptr) {
967 return ERR(NULL_POINTER);
968 }
969 art::ScopedObjectAccess soa(art::Thread::Current());
970 std::vector<art::GcRoot<art::mirror::Object>> mons;
971 std::vector<uint32_t> depths;
972 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
973 for (size_t i = 0; i < visitor.monitors.size(); i++) {
974 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
975 depths.push_back(visitor.stack_depths[i]);
976 }
977 return OK;
978 };
979 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
980 if (err != OK) {
981 return err;
982 }
983 auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size();
984 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
985 if (err != OK) {
986 return err;
987 }
988 *info_cnt = mons.size();
989 for (uint32_t i = 0; i < mons.size(); i++) {
990 (*info_ptr)[i] = {
991 soa.AddLocalReference<jobject>(mons[i].Read()),
992 static_cast<jint>(depths[i])
993 };
994 }
995 return err;
996 }
997
GetOwnedMonitorInfo(jvmtiEnv * env,jthread thread,jint * owned_monitor_count_ptr,jobject ** owned_monitors_ptr)998 jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
999 jthread thread,
1000 jint* owned_monitor_count_ptr,
1001 jobject** owned_monitors_ptr) {
1002 if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) {
1003 return ERR(NULL_POINTER);
1004 }
1005 art::ScopedObjectAccess soa(art::Thread::Current());
1006 std::vector<art::GcRoot<art::mirror::Object>> mons;
1007 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
1008 for (size_t i = 0; i < visitor.monitors.size(); i++) {
1009 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
1010 }
1011 return OK;
1012 };
1013 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
1014 if (err != OK) {
1015 return err;
1016 }
1017 auto nbytes = sizeof(jobject) * mons.size();
1018 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
1019 if (err != OK) {
1020 return err;
1021 }
1022 *owned_monitor_count_ptr = mons.size();
1023 for (uint32_t i = 0; i < mons.size(); i++) {
1024 (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read());
1025 }
1026 return err;
1027 }
1028
NotifyFramePop(jvmtiEnv * env,jthread thread,jint depth)1029 jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
1030 if (depth < 0) {
1031 return ERR(ILLEGAL_ARGUMENT);
1032 }
1033 ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
1034 art::Thread* self = art::Thread::Current();
1035 art::Thread* target;
1036
1037 ScopedNoUserCodeSuspension snucs(self);
1038 // From now on we know we cannot get suspended by user-code.
1039 // NB This does a SuspendCheck (during thread state change) so we need to make
1040 // sure we don't have the 'suspend_lock' locked here.
1041 art::ScopedObjectAccess soa(self);
1042 art::Locks::thread_list_lock_->ExclusiveLock(self);
1043 jvmtiError err = ERR(INTERNAL);
1044 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1045 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1046 return err;
1047 }
1048 if (target != self) {
1049 // TODO This is part of the spec but we could easily avoid needing to do it.
1050 // We would just put all the logic into a sync-checkpoint.
1051 art::Locks::thread_suspend_count_lock_->ExclusiveLock(self);
1052 if (target->GetUserCodeSuspendCount() == 0) {
1053 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1054 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1055 return ERR(THREAD_NOT_SUSPENDED);
1056 }
1057 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1058 }
1059 // We hold the user_code_suspension_lock_ so the target thread is staying
1060 // suspended until we are done (unless it's 'self' in which case we don't care
1061 // since we aren't going to be returning).
1062 // TODO We could implement this using a synchronous checkpoint and not bother
1063 // with any of the suspension stuff. The spec does specifically say to return
1064 // THREAD_NOT_SUSPENDED though. Find the requested stack frame.
1065 std::unique_ptr<art::Context> context(art::Context::Create());
1066 FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1067 visitor.WalkStack();
1068 if (!visitor.FoundFrame()) {
1069 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1070 return ERR(NO_MORE_FRAMES);
1071 }
1072 art::ArtMethod* method = visitor.GetMethod();
1073 if (method->IsNative()) {
1074 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1075 return ERR(OPAQUE_FRAME);
1076 }
1077 // From here we are sure to succeed.
1078 bool needs_instrument = false;
1079 // Get/create a shadow frame
1080 art::ShadowFrame* shadow_frame =
1081 visitor.GetOrCreateShadowFrame(&needs_instrument);
1082 {
1083 art::WriterMutexLock lk(self, tienv->event_info_mutex_);
1084 if (LIKELY(!shadow_frame->NeedsNotifyPop())) {
1085 // Ensure we won't miss exceptions being thrown if we get jit-compiled. We
1086 // only do this for the first NotifyPopFrame.
1087 target->IncrementForceInterpreterCount();
1088
1089 // Mark shadow frame as needs_notify_pop_
1090 shadow_frame->SetNotifyPop(true);
1091 }
1092 tienv->notify_frames.insert(shadow_frame);
1093 }
1094 // Make sure can we will go to the interpreter and use the shadow frames.
1095 if (needs_instrument) {
1096 art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
1097 DeoptManager::Get()->DeoptimizeThread(self);
1098 });
1099 target->RequestSynchronousCheckpoint(&fc);
1100 } else {
1101 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1102 }
1103 return OK;
1104 }
1105
1106 namespace {
1107
1108 enum class NonStandardExitType {
1109 kPopFrame,
1110 kForceReturn,
1111 };
1112
1113 template<NonStandardExitType kExitType>
1114 class NonStandardExitFrames {
1115 public:
NonStandardExitFrames(art::Thread * self,jvmtiEnv * env,jthread thread)1116 NonStandardExitFrames(art::Thread* self, jvmtiEnv* env, jthread thread)
1117 REQUIRES(!art::Locks::thread_suspend_count_lock_)
1118 ACQUIRE_SHARED(art::Locks::mutator_lock_)
1119 ACQUIRE(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
1120 : snucs_(self) {
1121 // We keep the user-code-suspend-count lock.
1122 art::Locks::user_code_suspension_lock_->AssertExclusiveHeld(self);
1123
1124 // From now on we know we cannot get suspended by user-code.
1125 // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
1126 // have the 'suspend_lock' locked here.
1127 old_state_ = self->TransitionFromSuspendedToRunnable();
1128 art::ScopedObjectAccessUnchecked soau(self);
1129
1130 art::Locks::thread_list_lock_->ExclusiveLock(self);
1131
1132 if (!ThreadUtil::GetAliveNativeThread(thread, soau, &target_, &result_)) {
1133 return;
1134 }
1135 {
1136 art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
1137 if (target_ != self && target_->GetUserCodeSuspendCount() == 0) {
1138 // We cannot be the current thread for this function.
1139 result_ = ERR(THREAD_NOT_SUSPENDED);
1140 return;
1141 }
1142 }
1143 JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target_);
1144 constexpr art::StackVisitor::StackWalkKind kWalkKind =
1145 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
1146 if (tls_data != nullptr &&
1147 tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
1148 tls_data->disable_pop_frame_depth ==
1149 art::StackVisitor::ComputeNumFrames(target_, kWalkKind)) {
1150 JVMTI_LOG(WARNING, env) << "Disallowing frame pop due to in-progress class-load/prepare. "
1151 << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
1152 << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
1153 << "more information.";
1154 result_ = ERR(OPAQUE_FRAME);
1155 return;
1156 }
1157 // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
1158 // done.
1159 std::unique_ptr<art::Context> context(art::Context::Create());
1160 FindFrameAtDepthVisitor final_frame(target_, context.get(), 0);
1161 FindFrameAtDepthVisitor penultimate_frame(target_, context.get(), 1);
1162 final_frame.WalkStack();
1163 penultimate_frame.WalkStack();
1164
1165 if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
1166 // Cannot do it if there is only one frame!
1167 JVMTI_LOG(INFO, env) << "Can not pop final frame off of a stack";
1168 result_ = ERR(NO_MORE_FRAMES);
1169 return;
1170 }
1171
1172 art::ArtMethod* called_method = final_frame.GetMethod();
1173 art::ArtMethod* calling_method = penultimate_frame.GetMethod();
1174 if (!CheckFunctions(env, calling_method, called_method)) {
1175 return;
1176 }
1177 DCHECK(!called_method->IsNative()) << called_method->PrettyMethod();
1178
1179 // From here we are sure to succeed.
1180 result_ = OK;
1181
1182 // Get/create a shadow frame
1183 final_frame_ = final_frame.GetOrCreateShadowFrame(&created_final_frame_);
1184 penultimate_frame_ =
1185 (calling_method->IsNative()
1186 ? nullptr
1187 : penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame_));
1188
1189 final_frame_id_ = final_frame.GetFrameId();
1190 penultimate_frame_id_ = penultimate_frame.GetFrameId();
1191
1192 CHECK_NE(final_frame_, penultimate_frame_) << "Frames at different depths not different!";
1193 }
1194
1195 bool CheckFunctions(jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called)
1196 REQUIRES(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
1197 REQUIRES_SHARED(art::Locks::mutator_lock_);
1198
RELEASE_SHARED(art::Locks::mutator_lock_)1199 ~NonStandardExitFrames() RELEASE_SHARED(art::Locks::mutator_lock_)
1200 REQUIRES(!art::Locks::thread_list_lock_)
1201 RELEASE(art::Locks::user_code_suspension_lock_) {
1202 art::Thread* self = art::Thread::Current();
1203 DCHECK_EQ(old_state_, art::ThreadState::kNative)
1204 << "Unexpected thread state on entering PopFrame!";
1205 self->TransitionFromRunnableToSuspended(old_state_);
1206 }
1207
1208 ScopedNoUserCodeSuspension snucs_;
1209 art::ShadowFrame* final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
1210 art::ShadowFrame* penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
1211 bool created_final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
1212 bool created_penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
1213 uint32_t final_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
1214 uint32_t penultimate_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
1215 art::Thread* target_ GUARDED_BY(art::Locks::thread_list_lock_) = nullptr;
1216 art::ThreadState old_state_ = art::ThreadState::kTerminated;
1217 jvmtiError result_ = ERR(INTERNAL);
1218 };
1219
1220 template <>
CheckFunctions(jvmtiEnv * env,art::ArtMethod * calling ATTRIBUTE_UNUSED,art::ArtMethod * called)1221 bool NonStandardExitFrames<NonStandardExitType::kForceReturn>::CheckFunctions(
1222 jvmtiEnv* env, art::ArtMethod* calling ATTRIBUTE_UNUSED, art::ArtMethod* called) {
1223 if (UNLIKELY(called->IsNative())) {
1224 result_ = ERR(OPAQUE_FRAME);
1225 JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod()
1226 << " because it is native.";
1227 return false;
1228 } else {
1229 return true;
1230 }
1231 }
1232
1233 template <>
CheckFunctions(jvmtiEnv * env,art::ArtMethod * calling,art::ArtMethod * called)1234 bool NonStandardExitFrames<NonStandardExitType::kPopFrame>::CheckFunctions(
1235 jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called) {
1236 if (UNLIKELY(calling->IsNative() || called->IsNative())) {
1237 result_ = ERR(OPAQUE_FRAME);
1238 JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod() << " to "
1239 << calling->PrettyMethod() << " because at least one of them is native.";
1240 return false;
1241 } else {
1242 return true;
1243 }
1244 }
1245
1246 class SetupMethodExitEvents {
1247 public:
SetupMethodExitEvents(art::Thread * self,EventHandler * event_handler,jthread target)1248 SetupMethodExitEvents(art::Thread* self,
1249 EventHandler* event_handler,
1250 jthread target) REQUIRES(!art::Locks::mutator_lock_,
1251 !art::Locks::user_code_suspension_lock_,
1252 !art::Locks::thread_list_lock_)
1253 : self_(self), event_handler_(event_handler), target_(target) {
1254 DCHECK(target != nullptr);
1255 art::Locks::mutator_lock_->AssertNotHeld(self_);
1256 art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
1257 art::Locks::thread_list_lock_->AssertNotHeld(self_);
1258 event_handler_->SetInternalEvent(
1259 target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_ENABLE);
1260 }
1261
1262 ~SetupMethodExitEvents() REQUIRES(!art::Locks::mutator_lock_,
1263 !art::Locks::user_code_suspension_lock_,
1264 !art::Locks::thread_list_lock_) {
1265 art::Locks::mutator_lock_->AssertNotHeld(self_);
1266 art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
1267 art::Locks::thread_list_lock_->AssertNotHeld(self_);
1268 if (failed_) {
1269 event_handler_->SetInternalEvent(
1270 target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
1271 }
1272 }
1273
NotifyFailure()1274 void NotifyFailure() {
1275 failed_ = true;
1276 }
1277
1278 private:
1279 art::Thread* self_;
1280 EventHandler* event_handler_;
1281 jthread target_;
1282 bool failed_ = false;
1283 };
1284
1285 template <typename T>
1286 void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value)
1287 REQUIRES_SHARED(art::Locks::mutator_lock_)
1288 REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
1289
1290 template <typename T>
AddDelayedMethodExitEvent(EventHandler * handler,art::ShadowFrame * frame,T value)1291 void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value) {
1292 art::JValue val = art::JValue::FromPrimitive(value);
1293 jvalue jval{ .j = val.GetJ() };
1294 handler->AddDelayedNonStandardExitEvent(frame, false, jval);
1295 }
1296
1297 template <>
AddDelayedMethodExitEvent(EventHandler * handler,art::ShadowFrame * frame,std::nullptr_t null_val ATTRIBUTE_UNUSED)1298 void AddDelayedMethodExitEvent<std::nullptr_t>(EventHandler* handler,
1299 art::ShadowFrame* frame,
1300 std::nullptr_t null_val ATTRIBUTE_UNUSED) {
1301 jvalue jval;
1302 memset(&jval, 0, sizeof(jval));
1303 handler->AddDelayedNonStandardExitEvent(frame, false, jval);
1304 }
1305
1306 template <>
AddDelayedMethodExitEvent(EventHandler * handler,art::ShadowFrame * frame,jobject obj)1307 void AddDelayedMethodExitEvent<jobject>(EventHandler* handler,
1308 art::ShadowFrame* frame,
1309 jobject obj) {
1310 jvalue jval{ .l = art::Thread::Current()->GetJniEnv()->NewGlobalRef(obj) };
1311 handler->AddDelayedNonStandardExitEvent(frame, true, jval);
1312 }
1313
1314 template <typename T>
1315 bool ValidReturnType(art::Thread* self, art::ObjPtr<art::mirror::Class> return_type, T value)
1316 REQUIRES_SHARED(art::Locks::mutator_lock_)
1317 REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
1318
1319 #define SIMPLE_VALID_RETURN_TYPE(type, ...) \
1320 template <> \
1321 bool ValidReturnType<type>(art::Thread * self ATTRIBUTE_UNUSED, \
1322 art::ObjPtr<art::mirror::Class> return_type, \
1323 type value ATTRIBUTE_UNUSED) { \
1324 static constexpr std::initializer_list<art::Primitive::Type> types{ __VA_ARGS__ }; \
1325 return std::find(types.begin(), types.end(), return_type->GetPrimitiveType()) != types.end(); \
1326 }
1327
1328 SIMPLE_VALID_RETURN_TYPE(jlong, art::Primitive::kPrimLong);
1329 SIMPLE_VALID_RETURN_TYPE(jfloat, art::Primitive::kPrimFloat);
1330 SIMPLE_VALID_RETURN_TYPE(jdouble, art::Primitive::kPrimDouble);
1331 SIMPLE_VALID_RETURN_TYPE(nullptr_t, art::Primitive::kPrimVoid);
1332 SIMPLE_VALID_RETURN_TYPE(jint,
1333 art::Primitive::kPrimInt,
1334 art::Primitive::kPrimChar,
1335 art::Primitive::kPrimBoolean,
1336 art::Primitive::kPrimShort,
1337 art::Primitive::kPrimByte);
1338 #undef SIMPLE_VALID_RETURN_TYPE
1339
1340 template <>
ValidReturnType(art::Thread * self,art::ObjPtr<art::mirror::Class> return_type,jobject return_value)1341 bool ValidReturnType<jobject>(art::Thread* self,
1342 art::ObjPtr<art::mirror::Class> return_type,
1343 jobject return_value) {
1344 if (return_type->IsPrimitive()) {
1345 return false;
1346 }
1347 if (return_value == nullptr) {
1348 // Null can be used for anything.
1349 return true;
1350 }
1351 return return_type->IsAssignableFrom(self->DecodeJObject(return_value)->GetClass());
1352 }
1353
1354 } // namespace
1355
PopFrame(jvmtiEnv * env,jthread thread)1356 jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
1357 art::Thread* self = art::Thread::Current();
1358 NonStandardExitFrames<NonStandardExitType::kPopFrame> frames(self, env, thread);
1359 if (frames.result_ != OK) {
1360 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1361 return frames.result_;
1362 }
1363 // Tell the shadow-frame to return immediately and skip all exit events.
1364 frames.penultimate_frame_->SetForceRetryInstruction(true);
1365 frames.final_frame_->SetForcePopFrame(true);
1366 frames.final_frame_->SetSkipMethodExitEvents(true);
1367 if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
1368 art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
1369 DeoptManager::Get()->DeoptimizeThread(self);
1370 });
1371 frames.target_->RequestSynchronousCheckpoint(&fc);
1372 } else {
1373 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1374 }
1375 return OK;
1376 }
1377
1378 template <typename T>
1379 jvmtiError
ForceEarlyReturn(jvmtiEnv * env,EventHandler * event_handler,jthread thread,T value)1380 StackUtil::ForceEarlyReturn(jvmtiEnv* env, EventHandler* event_handler, jthread thread, T value) {
1381 art::Thread* self = art::Thread::Current();
1382 // We don't want to use the null == current-thread idiom since for events (that we use internally
1383 // to implement force-early-return) we instead have null == all threads. Instead just get the
1384 // current jthread if needed.
1385 ScopedLocalRef<jthread> cur_thread(self->GetJniEnv(), nullptr);
1386 if (UNLIKELY(thread == nullptr)) {
1387 art::ScopedObjectAccess soa(self);
1388 cur_thread.reset(soa.AddLocalReference<jthread>(self->GetPeer()));
1389 thread = cur_thread.get();
1390 }
1391 // This sets up the exit events we implement early return using before we have the locks and
1392 // thanks to destructor ordering will tear them down if something goes wrong.
1393 SetupMethodExitEvents smee(self, event_handler, thread);
1394 NonStandardExitFrames<NonStandardExitType::kForceReturn> frames(self, env, thread);
1395 if (frames.result_ != OK) {
1396 smee.NotifyFailure();
1397 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1398 return frames.result_;
1399 } else if (!ValidReturnType<T>(
1400 self, frames.final_frame_->GetMethod()->ResolveReturnType(), value)) {
1401 smee.NotifyFailure();
1402 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1403 return ERR(TYPE_MISMATCH);
1404 } else if (frames.final_frame_->GetForcePopFrame()) {
1405 // TODO We should really support this.
1406 smee.NotifyFailure();
1407 std::string thread_name;
1408 frames.target_->GetThreadName(thread_name);
1409 JVMTI_LOG(WARNING, env) << "PopFrame or force-return already pending on thread " << thread_name;
1410 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1411 return ERR(OPAQUE_FRAME);
1412 }
1413 // Tell the shadow-frame to return immediately and skip all exit events.
1414 frames.final_frame_->SetForcePopFrame(true);
1415 AddDelayedMethodExitEvent<T>(event_handler, frames.final_frame_, value);
1416 if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
1417 art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
1418 DeoptManager::Get()->DeoptimizeThread(self);
1419 });
1420 frames.target_->RequestSynchronousCheckpoint(&fc);
1421 } else {
1422 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1423 }
1424 return OK;
1425 }
1426
1427 // Instantiate the ForceEarlyReturn templates.
1428 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jint);
1429 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jlong);
1430 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jfloat);
1431 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jdouble);
1432 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jobject);
1433 template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, nullptr_t);
1434
1435 } // namespace openjdkjvmti
1436