1 /* Copyright (C) 2016 The Android Open Source Project
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This file implements interfaces from the file jvmti.h. This implementation
5 * is licensed under the same terms as the file jvmti.h. The
6 * copyright and license information for the file jvmti.h follows.
7 *
8 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
9 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
10 *
11 * This code is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 only, as
13 * published by the Free Software Foundation. Oracle designates this
14 * particular file as subject to the "Classpath" exception as provided
15 * by Oracle in the LICENSE file that accompanied this code.
16 *
17 * This code is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * version 2 for more details (a copy is included in the LICENSE file that
21 * accompanied this code).
22 *
23 * You should have received a copy of the GNU General Public License version
24 * 2 along with this work; if not, write to the Free Software Foundation,
25 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
28 * or visit www.oracle.com if you need additional information or have any
29 * questions.
30 */
31
32 #include "ti_stack.h"
33
34 #include <algorithm>
35 #include <list>
36 #include <unordered_map>
37 #include <vector>
38
39 #include "arch/context.h"
40 #include "art_field-inl.h"
41 #include "art_method-inl.h"
42 #include "art_jvmti.h"
43 #include "art_method-inl.h"
44 #include "barrier.h"
45 #include "base/bit_utils.h"
46 #include "base/enums.h"
47 #include "base/mutex.h"
48 #include "deopt_manager.h"
49 #include "dex/code_item_accessors-inl.h"
50 #include "dex/dex_file.h"
51 #include "dex/dex_file_annotations.h"
52 #include "dex/dex_file_types.h"
53 #include "gc_root.h"
54 #include "handle_scope-inl.h"
55 #include "jni/jni_env_ext.h"
56 #include "jni/jni_internal.h"
57 #include "mirror/class.h"
58 #include "mirror/dex_cache.h"
59 #include "nativehelper/scoped_local_ref.h"
60 #include "scoped_thread_state_change-inl.h"
61 #include "stack.h"
62 #include "ti_logging.h"
63 #include "ti_thread.h"
64 #include "thread-current-inl.h"
65 #include "thread_list.h"
66 #include "thread_pool.h"
67 #include "ti_thread.h"
68 #include "well_known_classes.h"
69
70 namespace openjdkjvmti {
71
72 template <typename FrameFn>
73 struct GetStackTraceVisitor : public art::StackVisitor {
GetStackTraceVisitoropenjdkjvmti::GetStackTraceVisitor74 GetStackTraceVisitor(art::Thread* thread_in,
75 size_t start_,
76 size_t stop_,
77 FrameFn fn_)
78 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
79 fn(fn_),
80 start(start_),
81 stop(stop_) {}
82 GetStackTraceVisitor(const GetStackTraceVisitor&) = default;
83 GetStackTraceVisitor(GetStackTraceVisitor&&) noexcept = default;
84
VisitFrameopenjdkjvmti::GetStackTraceVisitor85 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
86 art::ArtMethod* m = GetMethod();
87 if (m->IsRuntimeMethod()) {
88 return true;
89 }
90
91 if (start == 0) {
92 m = m->GetInterfaceMethodIfProxy(art::kRuntimePointerSize);
93 jmethodID id = art::jni::EncodeArtMethod(m);
94
95 uint32_t dex_pc = GetDexPc(false);
96 jlong dex_location = (dex_pc == art::dex::kDexNoIndex) ? -1 : static_cast<jlong>(dex_pc);
97
98 jvmtiFrameInfo info = { id, dex_location };
99 fn(info);
100
101 if (stop == 1) {
102 return false; // We're done.
103 } else if (stop > 0) {
104 stop--;
105 }
106 } else {
107 start--;
108 }
109
110 return true;
111 }
112
113 FrameFn fn;
114 size_t start;
115 size_t stop;
116 };
117
GetOrCreateShadowFrame(bool * created_frame)118 art::ShadowFrame* FindFrameAtDepthVisitor::GetOrCreateShadowFrame(bool* created_frame) {
119 art::ShadowFrame* cur = GetCurrentShadowFrame();
120 if (cur == nullptr) {
121 *created_frame = true;
122 art::ArtMethod* method = GetMethod();
123 const uint16_t num_regs = method->DexInstructionData().RegistersSize();
124 cur = GetThread()->FindOrCreateDebuggerShadowFrame(GetFrameId(),
125 num_regs,
126 method,
127 GetDexPc());
128 DCHECK(cur != nullptr);
129 } else {
130 *created_frame = false;
131 }
132 return cur;
133 }
134
135 template <typename FrameFn>
MakeStackTraceVisitor(art::Thread * thread_in,size_t start,size_t stop,FrameFn fn)136 GetStackTraceVisitor<FrameFn> MakeStackTraceVisitor(art::Thread* thread_in,
137 size_t start,
138 size_t stop,
139 FrameFn fn) {
140 return GetStackTraceVisitor<FrameFn>(thread_in, start, stop, fn);
141 }
142
143 struct GetStackTraceVectorClosure : public art::Closure {
144 public:
GetStackTraceVectorClosureopenjdkjvmti::GetStackTraceVectorClosure145 GetStackTraceVectorClosure(size_t start, size_t stop)
146 : start_input(start),
147 stop_input(stop),
148 start_result(0),
149 stop_result(0) {}
150
Runopenjdkjvmti::GetStackTraceVectorClosure151 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
152 auto frames_fn = [&](jvmtiFrameInfo info) {
153 frames.push_back(info);
154 };
155 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
156 visitor.WalkStack(/* include_transitions= */ false);
157
158 start_result = visitor.start;
159 stop_result = visitor.stop;
160 }
161
162 const size_t start_input;
163 const size_t stop_input;
164
165 std::vector<jvmtiFrameInfo> frames;
166 size_t start_result;
167 size_t stop_result;
168 };
169
TranslateFrameVector(const std::vector<jvmtiFrameInfo> & frames,jint start_depth,size_t start_result,jint max_frame_count,jvmtiFrameInfo * frame_buffer,jint * count_ptr)170 static jvmtiError TranslateFrameVector(const std::vector<jvmtiFrameInfo>& frames,
171 jint start_depth,
172 size_t start_result,
173 jint max_frame_count,
174 jvmtiFrameInfo* frame_buffer,
175 jint* count_ptr) {
176 size_t collected_frames = frames.size();
177
178 // Assume we're here having collected something.
179 DCHECK_GT(max_frame_count, 0);
180
181 // Frames from the top.
182 if (start_depth >= 0) {
183 if (start_result != 0) {
184 // Not enough frames.
185 return ERR(ILLEGAL_ARGUMENT);
186 }
187 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
188 if (frames.size() > 0) {
189 memcpy(frame_buffer, frames.data(), collected_frames * sizeof(jvmtiFrameInfo));
190 }
191 *count_ptr = static_cast<jint>(frames.size());
192 return ERR(NONE);
193 }
194
195 // Frames from the bottom.
196 if (collected_frames < static_cast<size_t>(-start_depth)) {
197 return ERR(ILLEGAL_ARGUMENT);
198 }
199
200 size_t count = std::min(static_cast<size_t>(-start_depth), static_cast<size_t>(max_frame_count));
201 memcpy(frame_buffer,
202 &frames.data()[collected_frames + start_depth],
203 count * sizeof(jvmtiFrameInfo));
204 *count_ptr = static_cast<jint>(count);
205 return ERR(NONE);
206 }
207
208 struct GetStackTraceDirectClosure : public art::Closure {
209 public:
GetStackTraceDirectClosureopenjdkjvmti::GetStackTraceDirectClosure210 GetStackTraceDirectClosure(jvmtiFrameInfo* frame_buffer_, size_t start, size_t stop)
211 : frame_buffer(frame_buffer_),
212 start_input(start),
213 stop_input(stop),
214 index(0) {
215 DCHECK_GE(start_input, 0u);
216 }
217
Runopenjdkjvmti::GetStackTraceDirectClosure218 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
219 auto frames_fn = [&](jvmtiFrameInfo info) {
220 frame_buffer[index] = info;
221 ++index;
222 };
223 auto visitor = MakeStackTraceVisitor(self, start_input, stop_input, frames_fn);
224 visitor.WalkStack(/* include_transitions= */ false);
225 }
226
227 jvmtiFrameInfo* frame_buffer;
228
229 const size_t start_input;
230 const size_t stop_input;
231
232 size_t index = 0;
233 };
234
GetStackTrace(jvmtiEnv * jvmti_env,jthread java_thread,jint start_depth,jint max_frame_count,jvmtiFrameInfo * frame_buffer,jint * count_ptr)235 jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env,
236 jthread java_thread,
237 jint start_depth,
238 jint max_frame_count,
239 jvmtiFrameInfo* frame_buffer,
240 jint* count_ptr) {
241 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
242 // that the thread isn't dying on us.
243 art::ScopedObjectAccess soa(art::Thread::Current());
244 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
245
246 art::Thread* thread;
247 jvmtiError thread_error = ERR(INTERNAL);
248 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
249 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
250 return thread_error;
251 }
252 DCHECK(thread != nullptr);
253
254 art::ThreadState state = thread->GetState();
255 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
256 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
257 return ERR(THREAD_NOT_ALIVE);
258 }
259
260 if (max_frame_count < 0) {
261 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
262 return ERR(ILLEGAL_ARGUMENT);
263 }
264 if (frame_buffer == nullptr || count_ptr == nullptr) {
265 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
266 return ERR(NULL_POINTER);
267 }
268
269 if (max_frame_count == 0) {
270 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
271 *count_ptr = 0;
272 return ERR(NONE);
273 }
274
275 if (start_depth >= 0) {
276 // Fast path: Regular order of stack trace. Fill into the frame_buffer directly.
277 GetStackTraceDirectClosure closure(frame_buffer,
278 static_cast<size_t>(start_depth),
279 static_cast<size_t>(max_frame_count));
280 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
281 if (!thread->RequestSynchronousCheckpoint(&closure)) {
282 return ERR(THREAD_NOT_ALIVE);
283 }
284 *count_ptr = static_cast<jint>(closure.index);
285 if (closure.index == 0) {
286 JVMTI_LOG(INFO, jvmti_env) << "The stack is not large enough for a start_depth of "
287 << start_depth << ".";
288 return ERR(ILLEGAL_ARGUMENT);
289 }
290 return ERR(NONE);
291 } else {
292 GetStackTraceVectorClosure closure(0, 0);
293 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
294 if (!thread->RequestSynchronousCheckpoint(&closure)) {
295 return ERR(THREAD_NOT_ALIVE);
296 }
297
298 return TranslateFrameVector(closure.frames,
299 start_depth,
300 closure.start_result,
301 max_frame_count,
302 frame_buffer,
303 count_ptr);
304 }
305 }
306
307 template <typename Data>
308 struct GetAllStackTracesVectorClosure : public art::Closure {
GetAllStackTracesVectorClosureopenjdkjvmti::GetAllStackTracesVectorClosure309 GetAllStackTracesVectorClosure(size_t stop, Data* data_)
310 : barrier(0), stop_input(stop), data(data_) {}
311
Runopenjdkjvmti::GetAllStackTracesVectorClosure312 void Run(art::Thread* thread) override
313 REQUIRES_SHARED(art::Locks::mutator_lock_)
314 REQUIRES(!data->mutex) {
315 art::Thread* self = art::Thread::Current();
316 Work(thread, self);
317 barrier.Pass(self);
318 }
319
Workopenjdkjvmti::GetAllStackTracesVectorClosure320 void Work(art::Thread* thread, art::Thread* self)
321 REQUIRES_SHARED(art::Locks::mutator_lock_)
322 REQUIRES(!data->mutex) {
323 // Skip threads that are still starting.
324 if (thread->IsStillStarting()) {
325 return;
326 }
327
328 std::vector<jvmtiFrameInfo>* thread_frames = data->GetFrameStorageFor(self, thread);
329 if (thread_frames == nullptr) {
330 return;
331 }
332
333 // Now collect the data.
334 auto frames_fn = [&](jvmtiFrameInfo info) {
335 thread_frames->push_back(info);
336 };
337 auto visitor = MakeStackTraceVisitor(thread, 0u, stop_input, frames_fn);
338 visitor.WalkStack(/* include_transitions= */ false);
339 }
340
341 art::Barrier barrier;
342 const size_t stop_input;
343 Data* data;
344 };
345
346 template <typename Data>
RunCheckpointAndWait(Data * data,size_t max_frame_count)347 static void RunCheckpointAndWait(Data* data, size_t max_frame_count)
348 REQUIRES_SHARED(art::Locks::mutator_lock_) {
349 // Note: requires the mutator lock as the checkpoint requires the mutator lock.
350 GetAllStackTracesVectorClosure<Data> closure(max_frame_count, data);
351 size_t barrier_count = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&closure, nullptr);
352 if (barrier_count == 0) {
353 return;
354 }
355 art::Thread* self = art::Thread::Current();
356 art::ScopedThreadStateChange tsc(self, art::ThreadState::kWaitingForCheckPointsToRun);
357 closure.barrier.Increment(self, barrier_count);
358 }
359
GetAllStackTraces(jvmtiEnv * env,jint max_frame_count,jvmtiStackInfo ** stack_info_ptr,jint * thread_count_ptr)360 jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
361 jint max_frame_count,
362 jvmtiStackInfo** stack_info_ptr,
363 jint* thread_count_ptr) {
364 if (max_frame_count < 0) {
365 return ERR(ILLEGAL_ARGUMENT);
366 }
367 if (stack_info_ptr == nullptr || thread_count_ptr == nullptr) {
368 return ERR(NULL_POINTER);
369 }
370
371 struct AllStackTracesData {
372 AllStackTracesData() : mutex("GetAllStackTraces", art::LockLevel::kAbortLock) {}
373 ~AllStackTracesData() {
374 JNIEnv* jni_env = art::Thread::Current()->GetJniEnv();
375 for (jthread global_thread_ref : thread_peers) {
376 jni_env->DeleteGlobalRef(global_thread_ref);
377 }
378 }
379
380 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
381 REQUIRES_SHARED(art::Locks::mutator_lock_)
382 REQUIRES(!mutex) {
383 art::MutexLock mu(self, mutex);
384
385 threads.push_back(thread);
386
387 jthread peer = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(
388 self, thread->GetPeerFromOtherThread());
389 thread_peers.push_back(peer);
390
391 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
392 return frames.back().get();
393 }
394
395 art::Mutex mutex;
396
397 // Storage. Only access directly after completion.
398
399 std::vector<art::Thread*> threads;
400 // "thread_peers" contains global references to their peers.
401 std::vector<jthread> thread_peers;
402
403 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
404 };
405
406 AllStackTracesData data;
407 art::Thread* current = art::Thread::Current();
408 {
409 art::ScopedObjectAccess soa(current);
410 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
411 }
412
413 // Convert the data into our output format.
414
415 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
416 // allocate one big chunk for this and the actual frames, which means we need
417 // to either be conservative or rearrange things later (the latter is implemented).
418 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
419 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
420 frame_infos.reserve(data.frames.size());
421
422 // Now run through and add data for each thread.
423 size_t sum_frames = 0;
424 for (size_t index = 0; index < data.frames.size(); ++index) {
425 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
426 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
427
428 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
429
430 // For the time being, set the thread to null. We'll fix it up in the second stage.
431 stack_info.thread = nullptr;
432 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
433
434 size_t collected_frames = thread_frames.size();
435 if (max_frame_count == 0 || collected_frames == 0) {
436 stack_info.frame_count = 0;
437 stack_info.frame_buffer = nullptr;
438 continue;
439 }
440 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
441
442 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
443 frame_infos.emplace_back(frame_info);
444
445 jint count;
446 jvmtiError translate_result = TranslateFrameVector(thread_frames,
447 0,
448 0,
449 static_cast<jint>(collected_frames),
450 frame_info,
451 &count);
452 DCHECK(translate_result == JVMTI_ERROR_NONE);
453 stack_info.frame_count = static_cast<jint>(collected_frames);
454 stack_info.frame_buffer = frame_info;
455 sum_frames += static_cast<size_t>(count);
456 }
457
458 // No errors, yet. Now put it all into an output buffer.
459 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * data.frames.size(),
460 alignof(jvmtiFrameInfo));
461 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
462 unsigned char* chunk_data;
463 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
464 if (alloc_result != ERR(NONE)) {
465 return alloc_result;
466 }
467
468 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
469 // First copy in all the basic data.
470 memcpy(stack_info, stack_info_array.get(), sizeof(jvmtiStackInfo) * data.frames.size());
471
472 // Now copy the frames and fix up the pointers.
473 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
474 chunk_data + rounded_stack_info_size);
475 for (size_t i = 0; i < data.frames.size(); ++i) {
476 jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
477 jvmtiStackInfo& new_stack_info = stack_info[i];
478
479 // Translate the global ref into a local ref.
480 new_stack_info.thread =
481 static_cast<JNIEnv*>(current->GetJniEnv())->NewLocalRef(data.thread_peers[i]);
482
483 if (old_stack_info.frame_count > 0) {
484 // Only copy when there's data - leave the nullptr alone.
485 size_t frames_size = static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
486 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
487 new_stack_info.frame_buffer = frame_info;
488 frame_info += old_stack_info.frame_count;
489 }
490 }
491
492 *stack_info_ptr = stack_info;
493 *thread_count_ptr = static_cast<jint>(data.frames.size());
494
495 return ERR(NONE);
496 }
497
GetThreadListStackTraces(jvmtiEnv * env,jint thread_count,const jthread * thread_list,jint max_frame_count,jvmtiStackInfo ** stack_info_ptr)498 jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
499 jint thread_count,
500 const jthread* thread_list,
501 jint max_frame_count,
502 jvmtiStackInfo** stack_info_ptr) {
503 if (max_frame_count < 0) {
504 return ERR(ILLEGAL_ARGUMENT);
505 }
506 if (thread_count < 0) {
507 return ERR(ILLEGAL_ARGUMENT);
508 }
509 if (thread_count == 0) {
510 *stack_info_ptr = nullptr;
511 return ERR(NONE);
512 }
513 if (thread_list == nullptr || stack_info_ptr == nullptr) {
514 return ERR(NULL_POINTER);
515 }
516
517 art::Thread* current = art::Thread::Current();
518 art::ScopedObjectAccess soa(current); // Now we know we have the shared lock.
519
520 struct SelectStackTracesData {
521 SelectStackTracesData() : mutex("GetSelectStackTraces", art::LockLevel::kAbortLock) {}
522
523 std::vector<jvmtiFrameInfo>* GetFrameStorageFor(art::Thread* self, art::Thread* thread)
524 REQUIRES_SHARED(art::Locks::mutator_lock_)
525 REQUIRES(!mutex) {
526 art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
527 for (size_t index = 0; index != handles.size(); ++index) {
528 if (peer == handles[index].Get()) {
529 // Found the thread.
530 art::MutexLock mu(self, mutex);
531
532 threads.push_back(thread);
533 thread_list_indices.push_back(index);
534
535 frames.emplace_back(new std::vector<jvmtiFrameInfo>());
536 return frames.back().get();
537 }
538 }
539 return nullptr;
540 }
541
542 art::Mutex mutex;
543
544 // Selection data.
545
546 std::vector<art::Handle<art::mirror::Object>> handles;
547
548 // Storage. Only access directly after completion.
549
550 std::vector<art::Thread*> threads;
551 std::vector<size_t> thread_list_indices;
552
553 std::vector<std::unique_ptr<std::vector<jvmtiFrameInfo>>> frames;
554 };
555
556 SelectStackTracesData data;
557
558 // Decode all threads to raw pointers. Put them into a handle scope to avoid any moving GC bugs.
559 art::VariableSizedHandleScope hs(current);
560 for (jint i = 0; i != thread_count; ++i) {
561 if (thread_list[i] == nullptr) {
562 return ERR(INVALID_THREAD);
563 }
564 if (!soa.Env()->IsInstanceOf(thread_list[i], art::WellKnownClasses::java_lang_Thread)) {
565 return ERR(INVALID_THREAD);
566 }
567 data.handles.push_back(hs.NewHandle(soa.Decode<art::mirror::Object>(thread_list[i])));
568 }
569
570 RunCheckpointAndWait(&data, static_cast<size_t>(max_frame_count));
571
572 // Convert the data into our output format.
573
574 // Note: we use an array of jvmtiStackInfo for convenience. The spec says we need to
575 // allocate one big chunk for this and the actual frames, which means we need
576 // to either be conservative or rearrange things later (the latter is implemented).
577 std::unique_ptr<jvmtiStackInfo[]> stack_info_array(new jvmtiStackInfo[data.frames.size()]);
578 std::vector<std::unique_ptr<jvmtiFrameInfo[]>> frame_infos;
579 frame_infos.reserve(data.frames.size());
580
581 // Now run through and add data for each thread.
582 size_t sum_frames = 0;
583 for (size_t index = 0; index < data.frames.size(); ++index) {
584 jvmtiStackInfo& stack_info = stack_info_array.get()[index];
585 memset(&stack_info, 0, sizeof(jvmtiStackInfo));
586
587 art::Thread* self = data.threads[index];
588 const std::vector<jvmtiFrameInfo>& thread_frames = *data.frames[index].get();
589
590 // For the time being, set the thread to null. We don't have good ScopedLocalRef
591 // infrastructure.
592 DCHECK(self->GetPeerFromOtherThread() != nullptr);
593 stack_info.thread = nullptr;
594 stack_info.state = JVMTI_THREAD_STATE_SUSPENDED;
595
596 size_t collected_frames = thread_frames.size();
597 if (max_frame_count == 0 || collected_frames == 0) {
598 stack_info.frame_count = 0;
599 stack_info.frame_buffer = nullptr;
600 continue;
601 }
602 DCHECK_LE(collected_frames, static_cast<size_t>(max_frame_count));
603
604 jvmtiFrameInfo* frame_info = new jvmtiFrameInfo[collected_frames];
605 frame_infos.emplace_back(frame_info);
606
607 jint count;
608 jvmtiError translate_result = TranslateFrameVector(thread_frames,
609 0,
610 0,
611 static_cast<jint>(collected_frames),
612 frame_info,
613 &count);
614 DCHECK(translate_result == JVMTI_ERROR_NONE);
615 stack_info.frame_count = static_cast<jint>(collected_frames);
616 stack_info.frame_buffer = frame_info;
617 sum_frames += static_cast<size_t>(count);
618 }
619
620 // No errors, yet. Now put it all into an output buffer. Note that this is not frames.size(),
621 // potentially.
622 size_t rounded_stack_info_size = art::RoundUp(sizeof(jvmtiStackInfo) * thread_count,
623 alignof(jvmtiFrameInfo));
624 size_t chunk_size = rounded_stack_info_size + sum_frames * sizeof(jvmtiFrameInfo);
625 unsigned char* chunk_data;
626 jvmtiError alloc_result = env->Allocate(chunk_size, &chunk_data);
627 if (alloc_result != ERR(NONE)) {
628 return alloc_result;
629 }
630
631 jvmtiStackInfo* stack_info = reinterpret_cast<jvmtiStackInfo*>(chunk_data);
632 jvmtiFrameInfo* frame_info = reinterpret_cast<jvmtiFrameInfo*>(
633 chunk_data + rounded_stack_info_size);
634
635 for (size_t i = 0; i < static_cast<size_t>(thread_count); ++i) {
636 // Check whether we found a running thread for this.
637 // Note: For simplicity, and with the expectation that the list is usually small, use a simple
638 // search. (The list is *not* sorted!)
639 auto it = std::find(data.thread_list_indices.begin(), data.thread_list_indices.end(), i);
640 if (it == data.thread_list_indices.end()) {
641 // No native thread. Must be new or dead. We need to fill out the stack info now.
642 // (Need to read the Java "started" field to know whether this is starting or terminated.)
643 art::ObjPtr<art::mirror::Object> peer = soa.Decode<art::mirror::Object>(thread_list[i]);
644 art::ObjPtr<art::mirror::Class> klass = peer->GetClass();
645 art::ArtField* started_field = klass->FindDeclaredInstanceField("started", "Z");
646 CHECK(started_field != nullptr);
647 bool started = started_field->GetBoolean(peer) != 0;
648 constexpr jint kStartedState = JVMTI_JAVA_LANG_THREAD_STATE_NEW;
649 constexpr jint kTerminatedState = JVMTI_THREAD_STATE_TERMINATED |
650 JVMTI_JAVA_LANG_THREAD_STATE_TERMINATED;
651 stack_info[i].thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
652 stack_info[i].state = started ? kTerminatedState : kStartedState;
653 stack_info[i].frame_count = 0;
654 stack_info[i].frame_buffer = nullptr;
655 } else {
656 // Had a native thread and frames.
657 size_t f_index = it - data.thread_list_indices.begin();
658
659 jvmtiStackInfo& old_stack_info = stack_info_array.get()[f_index];
660 jvmtiStackInfo& new_stack_info = stack_info[i];
661
662 memcpy(&new_stack_info, &old_stack_info, sizeof(jvmtiStackInfo));
663 new_stack_info.thread = reinterpret_cast<JNIEnv*>(soa.Env())->NewLocalRef(thread_list[i]);
664 if (old_stack_info.frame_count > 0) {
665 // Only copy when there's data - leave the nullptr alone.
666 size_t frames_size =
667 static_cast<size_t>(old_stack_info.frame_count) * sizeof(jvmtiFrameInfo);
668 memcpy(frame_info, old_stack_info.frame_buffer, frames_size);
669 new_stack_info.frame_buffer = frame_info;
670 frame_info += old_stack_info.frame_count;
671 }
672 }
673 }
674
675 *stack_info_ptr = stack_info;
676
677 return ERR(NONE);
678 }
679
680 struct GetFrameCountClosure : public art::Closure {
681 public:
GetFrameCountClosureopenjdkjvmti::GetFrameCountClosure682 GetFrameCountClosure() : count(0) {}
683
Runopenjdkjvmti::GetFrameCountClosure684 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
685 // This is not StackVisitor::ComputeNumFrames, as runtime methods and transitions must not be
686 // counted.
687 art::StackVisitor::WalkStack(
688 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
689 art::ArtMethod* m = stack_visitor->GetMethod();
690 if (m != nullptr && !m->IsRuntimeMethod()) {
691 count++;
692 }
693 return true;
694 },
695 self,
696 /* context= */ nullptr,
697 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
698 }
699
700 size_t count;
701 };
702
GetFrameCount(jvmtiEnv * env ATTRIBUTE_UNUSED,jthread java_thread,jint * count_ptr)703 jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED,
704 jthread java_thread,
705 jint* count_ptr) {
706 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
707 // that the thread isn't dying on us.
708 art::ScopedObjectAccess soa(art::Thread::Current());
709 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
710
711 art::Thread* thread;
712 jvmtiError thread_error = ERR(INTERNAL);
713 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
714 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
715 return thread_error;
716 }
717
718 DCHECK(thread != nullptr);
719 art::ThreadState state = thread->GetState();
720 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
721 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
722 return ERR(THREAD_NOT_ALIVE);
723 }
724
725 if (count_ptr == nullptr) {
726 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
727 return ERR(NULL_POINTER);
728 }
729
730 GetFrameCountClosure closure;
731 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
732 if (!thread->RequestSynchronousCheckpoint(&closure)) {
733 return ERR(THREAD_NOT_ALIVE);
734 }
735
736 *count_ptr = closure.count;
737 return ERR(NONE);
738 }
739
740 struct GetLocationClosure : public art::Closure {
741 public:
GetLocationClosureopenjdkjvmti::GetLocationClosure742 explicit GetLocationClosure(size_t n_in) : n(n_in), method(nullptr), dex_pc(0) {}
743
Runopenjdkjvmti::GetLocationClosure744 void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
745 // Walks up the stack 'n' callers.
746 size_t count = 0u;
747 art::StackVisitor::WalkStack(
748 [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
749 art::ArtMethod* m = stack_visitor->GetMethod();
750 if (m != nullptr && !m->IsRuntimeMethod()) {
751 DCHECK(method == nullptr);
752 if (count == n) {
753 method = m;
754 dex_pc = stack_visitor->GetDexPc(/*abort_on_failure=*/false);
755 return false;
756 }
757 count++;
758 }
759 return true;
760 },
761 self,
762 /* context= */ nullptr,
763 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
764 }
765
766 const size_t n;
767 art::ArtMethod* method;
768 uint32_t dex_pc;
769 };
770
GetFrameLocation(jvmtiEnv * env ATTRIBUTE_UNUSED,jthread java_thread,jint depth,jmethodID * method_ptr,jlocation * location_ptr)771 jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED,
772 jthread java_thread,
773 jint depth,
774 jmethodID* method_ptr,
775 jlocation* location_ptr) {
776 // It is not great that we have to hold these locks for so long, but it is necessary to ensure
777 // that the thread isn't dying on us.
778 art::ScopedObjectAccess soa(art::Thread::Current());
779 art::Locks::thread_list_lock_->ExclusiveLock(soa.Self());
780
781 art::Thread* thread;
782 jvmtiError thread_error = ERR(INTERNAL);
783 if (!ThreadUtil::GetAliveNativeThread(java_thread, soa, &thread, &thread_error)) {
784 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
785 return thread_error;
786 }
787 DCHECK(thread != nullptr);
788
789 art::ThreadState state = thread->GetState();
790 if (state == art::ThreadState::kStarting || thread->IsStillStarting()) {
791 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
792 return ERR(THREAD_NOT_ALIVE);
793 }
794
795 if (depth < 0) {
796 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
797 return ERR(ILLEGAL_ARGUMENT);
798 }
799 if (method_ptr == nullptr || location_ptr == nullptr) {
800 art::Locks::thread_list_lock_->ExclusiveUnlock(soa.Self());
801 return ERR(NULL_POINTER);
802 }
803
804 GetLocationClosure closure(static_cast<size_t>(depth));
805 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
806 if (!thread->RequestSynchronousCheckpoint(&closure)) {
807 return ERR(THREAD_NOT_ALIVE);
808 }
809
810 if (closure.method == nullptr) {
811 return ERR(NO_MORE_FRAMES);
812 }
813
814 *method_ptr = art::jni::EncodeArtMethod(closure.method);
815 if (closure.method->IsNative() || closure.method->IsProxyMethod()) {
816 *location_ptr = -1;
817 } else {
818 if (closure.dex_pc == art::dex::kDexNoIndex) {
819 return ERR(INTERNAL);
820 }
821 *location_ptr = static_cast<jlocation>(closure.dex_pc);
822 }
823
824 return ERR(NONE);
825 }
826
827 struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor {
828 // We need a context because VisitLocks needs it retrieve the monitor objects.
829 explicit MonitorVisitor(art::Thread* thread)
REQUIRES_SHAREDopenjdkjvmti::MonitorVisitor830 REQUIRES_SHARED(art::Locks::mutator_lock_)
831 : art::StackVisitor(thread,
832 art::Context::Create(),
833 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames),
834 hs(art::Thread::Current()),
835 current_stack_depth(0) {}
836
~MonitorVisitoropenjdkjvmti::MonitorVisitor837 ~MonitorVisitor() {
838 delete context_;
839 }
840
VisitFrameopenjdkjvmti::MonitorVisitor841 bool VisitFrame() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
842 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
843 if (!GetMethod()->IsRuntimeMethod()) {
844 art::Monitor::VisitLocks(this, AppendOwnedMonitors, this);
845 ++current_stack_depth;
846 }
847 return true;
848 }
849
AppendOwnedMonitorsopenjdkjvmti::MonitorVisitor850 static void AppendOwnedMonitors(art::ObjPtr<art::mirror::Object> owned_monitor, void* arg)
851 REQUIRES_SHARED(art::Locks::mutator_lock_) {
852 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
853 MonitorVisitor* visitor = reinterpret_cast<MonitorVisitor*>(arg);
854 // Filter out duplicates.
855 for (const art::Handle<art::mirror::Object>& monitor : visitor->monitors) {
856 if (monitor.Get() == owned_monitor) {
857 return;
858 }
859 }
860 visitor->monitors.push_back(visitor->hs.NewHandle(owned_monitor));
861 visitor->stack_depths.push_back(visitor->current_stack_depth);
862 }
863
VisitRootopenjdkjvmti::MonitorVisitor864 void VisitRoot(art::mirror::Object* obj, const art::RootInfo& info ATTRIBUTE_UNUSED)
865 override REQUIRES_SHARED(art::Locks::mutator_lock_) {
866 for (const art::Handle<art::mirror::Object>& m : monitors) {
867 if (m.Get() == obj) {
868 return;
869 }
870 }
871 monitors.push_back(hs.NewHandle(obj));
872 stack_depths.push_back(-1);
873 }
874
875 art::VariableSizedHandleScope hs;
876 jint current_stack_depth;
877 std::vector<art::Handle<art::mirror::Object>> monitors;
878 std::vector<jint> stack_depths;
879 };
880
881 template<typename Fn>
882 struct MonitorInfoClosure : public art::Closure {
883 public:
MonitorInfoClosureopenjdkjvmti::MonitorInfoClosure884 explicit MonitorInfoClosure(Fn handle_results)
885 : err_(OK), handle_results_(handle_results) {}
886
Runopenjdkjvmti::MonitorInfoClosure887 void Run(art::Thread* target) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
888 art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
889 // Find the monitors on the stack.
890 MonitorVisitor visitor(target);
891 visitor.WalkStack(/* include_transitions= */ false);
892 // Find any other monitors, including ones acquired in native code.
893 art::RootInfo root_info(art::kRootVMInternal);
894 target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info);
895 err_ = handle_results_(visitor);
896 }
897
GetErroropenjdkjvmti::MonitorInfoClosure898 jvmtiError GetError() {
899 return err_;
900 }
901
902 private:
903 jvmtiError err_;
904 Fn handle_results_;
905 };
906
907
908 template <typename Fn>
GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable & soa,jthread thread,Fn handle_results)909 static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa,
910 jthread thread,
911 Fn handle_results)
912 REQUIRES_SHARED(art::Locks::mutator_lock_) {
913 art::Thread* self = art::Thread::Current();
914 MonitorInfoClosure<Fn> closure(handle_results);
915 bool called_method = false;
916 {
917 art::Locks::thread_list_lock_->ExclusiveLock(self);
918 art::Thread* target = nullptr;
919 jvmtiError err = ERR(INTERNAL);
920 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
921 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
922 return err;
923 }
924 if (target != self) {
925 called_method = true;
926 // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
927 // Since this deals with object references we need to avoid going to sleep.
928 art::ScopedAssertNoThreadSuspension sants("Getting owned monitor usage");
929 if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) {
930 return ERR(THREAD_NOT_ALIVE);
931 }
932 } else {
933 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
934 }
935 }
936 // Cannot call the closure on the current thread if we have thread_list_lock since we need to call
937 // into the verifier which can cause the current thread to suspend for gc. Suspending would be a
938 // bad thing to do if we hold the ThreadListLock. For other threads since we are running it on a
939 // checkpoint we are fine but if the thread is the current one we need to drop the mutex first.
940 if (!called_method) {
941 closure.Run(self);
942 }
943 return closure.GetError();
944 }
945
GetOwnedMonitorStackDepthInfo(jvmtiEnv * env,jthread thread,jint * info_cnt,jvmtiMonitorStackDepthInfo ** info_ptr)946 jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env,
947 jthread thread,
948 jint* info_cnt,
949 jvmtiMonitorStackDepthInfo** info_ptr) {
950 if (info_cnt == nullptr || info_ptr == nullptr) {
951 return ERR(NULL_POINTER);
952 }
953 art::ScopedObjectAccess soa(art::Thread::Current());
954 std::vector<art::GcRoot<art::mirror::Object>> mons;
955 std::vector<uint32_t> depths;
956 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
957 for (size_t i = 0; i < visitor.monitors.size(); i++) {
958 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
959 depths.push_back(visitor.stack_depths[i]);
960 }
961 return OK;
962 };
963 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
964 if (err != OK) {
965 return err;
966 }
967 auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size();
968 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr));
969 if (err != OK) {
970 return err;
971 }
972 *info_cnt = mons.size();
973 for (uint32_t i = 0; i < mons.size(); i++) {
974 (*info_ptr)[i] = {
975 soa.AddLocalReference<jobject>(mons[i].Read()),
976 static_cast<jint>(depths[i])
977 };
978 }
979 return err;
980 }
981
GetOwnedMonitorInfo(jvmtiEnv * env,jthread thread,jint * owned_monitor_count_ptr,jobject ** owned_monitors_ptr)982 jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env,
983 jthread thread,
984 jint* owned_monitor_count_ptr,
985 jobject** owned_monitors_ptr) {
986 if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) {
987 return ERR(NULL_POINTER);
988 }
989 art::ScopedObjectAccess soa(art::Thread::Current());
990 std::vector<art::GcRoot<art::mirror::Object>> mons;
991 auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) {
992 for (size_t i = 0; i < visitor.monitors.size(); i++) {
993 mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get()));
994 }
995 return OK;
996 };
997 jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun);
998 if (err != OK) {
999 return err;
1000 }
1001 auto nbytes = sizeof(jobject) * mons.size();
1002 err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr));
1003 if (err != OK) {
1004 return err;
1005 }
1006 *owned_monitor_count_ptr = mons.size();
1007 for (uint32_t i = 0; i < mons.size(); i++) {
1008 (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read());
1009 }
1010 return err;
1011 }
1012
NotifyFramePop(jvmtiEnv * env,jthread thread,jint depth)1013 jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) {
1014 if (depth < 0) {
1015 return ERR(ILLEGAL_ARGUMENT);
1016 }
1017 ArtJvmTiEnv* tienv = ArtJvmTiEnv::AsArtJvmTiEnv(env);
1018 art::Thread* self = art::Thread::Current();
1019 art::Thread* target;
1020
1021 ScopedNoUserCodeSuspension snucs(self);
1022 // From now on we know we cannot get suspended by user-code.
1023 // NB This does a SuspendCheck (during thread state change) so we need to make
1024 // sure we don't have the 'suspend_lock' locked here.
1025 art::ScopedObjectAccess soa(self);
1026 art::Locks::thread_list_lock_->ExclusiveLock(self);
1027 jvmtiError err = ERR(INTERNAL);
1028 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1029 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1030 return err;
1031 }
1032 if (target != self) {
1033 // TODO This is part of the spec but we could easily avoid needing to do it.
1034 // We would just put all the logic into a sync-checkpoint.
1035 art::Locks::thread_suspend_count_lock_->ExclusiveLock(self);
1036 if (target->GetUserCodeSuspendCount() == 0) {
1037 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1038 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1039 return ERR(THREAD_NOT_SUSPENDED);
1040 }
1041 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1042 }
1043 // We hold the user_code_suspension_lock_ so the target thread is staying
1044 // suspended until we are done (unless it's 'self' in which case we don't care
1045 // since we aren't going to be returning).
1046 // TODO We could implement this using a synchronous checkpoint and not bother
1047 // with any of the suspension stuff. The spec does specifically say to return
1048 // THREAD_NOT_SUSPENDED though. Find the requested stack frame.
1049 std::unique_ptr<art::Context> context(art::Context::Create());
1050 FindFrameAtDepthVisitor visitor(target, context.get(), depth);
1051 visitor.WalkStack();
1052 if (!visitor.FoundFrame()) {
1053 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1054 return ERR(NO_MORE_FRAMES);
1055 }
1056 art::ArtMethod* method = visitor.GetMethod();
1057 if (method->IsNative()) {
1058 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1059 return ERR(OPAQUE_FRAME);
1060 }
1061 // From here we are sure to succeed.
1062 bool needs_instrument = false;
1063 // Get/create a shadow frame
1064 art::ShadowFrame* shadow_frame =
1065 visitor.GetOrCreateShadowFrame(&needs_instrument);
1066 {
1067 art::WriterMutexLock lk(self, tienv->event_info_mutex_);
1068 if (LIKELY(!shadow_frame->NeedsNotifyPop())) {
1069 // Ensure we won't miss exceptions being thrown if we get jit-compiled. We
1070 // only do this for the first NotifyPopFrame.
1071 target->IncrementForceInterpreterCount();
1072
1073 // Mark shadow frame as needs_notify_pop_
1074 shadow_frame->SetNotifyPop(true);
1075 }
1076 tienv->notify_frames.insert(shadow_frame);
1077 }
1078 // Make sure can we will go to the interpreter and use the shadow frames.
1079 if (needs_instrument) {
1080 art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
1081 DeoptManager::Get()->DeoptimizeThread(self);
1082 });
1083 target->RequestSynchronousCheckpoint(&fc);
1084 } else {
1085 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1086 }
1087 return OK;
1088 }
1089
PopFrame(jvmtiEnv * env,jthread thread)1090 jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
1091 art::Thread* self = art::Thread::Current();
1092 art::Thread* target;
1093
1094 ScopedNoUserCodeSuspension snucs(self);
1095 // From now on we know we cannot get suspended by user-code.
1096 // NB This does a SuspendCheck (during thread state change) so we need to make
1097 // sure we don't have the 'suspend_lock' locked here.
1098 art::ScopedObjectAccess soa(self);
1099 art::Locks::thread_list_lock_->ExclusiveLock(self);
1100 jvmtiError err = ERR(INTERNAL);
1101 if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
1102 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1103 return err;
1104 }
1105 {
1106 art::Locks::thread_suspend_count_lock_->ExclusiveLock(self);
1107 if (target == self || target->GetUserCodeSuspendCount() == 0) {
1108 // We cannot be the current thread for this function.
1109 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1110 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1111 return ERR(THREAD_NOT_SUSPENDED);
1112 }
1113 art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
1114 }
1115 JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target);
1116 constexpr art::StackVisitor::StackWalkKind kWalkKind =
1117 art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
1118 if (tls_data != nullptr &&
1119 tls_data->disable_pop_frame_depth !=
1120 JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
1121 tls_data->disable_pop_frame_depth ==
1122 art::StackVisitor::ComputeNumFrames(target, kWalkKind)) {
1123 JVMTI_LOG(WARNING, env)
1124 << "Disallowing frame pop due to in-progress class-load/prepare. "
1125 << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
1126 << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
1127 << "more information.";
1128 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1129 return ERR(OPAQUE_FRAME);
1130 }
1131 // We hold the user_code_suspension_lock_ so the target thread is staying
1132 // suspended until we are done.
1133 std::unique_ptr<art::Context> context(art::Context::Create());
1134 FindFrameAtDepthVisitor final_frame(target, context.get(), 0);
1135 FindFrameAtDepthVisitor penultimate_frame(target, context.get(), 1);
1136 final_frame.WalkStack();
1137 penultimate_frame.WalkStack();
1138
1139 if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
1140 // Cannot do it if there is only one frame!
1141 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1142 return ERR(NO_MORE_FRAMES);
1143 }
1144
1145 art::ArtMethod* called_method = final_frame.GetMethod();
1146 art::ArtMethod* calling_method = penultimate_frame.GetMethod();
1147 if (calling_method->IsNative() || called_method->IsNative()) {
1148 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1149 return ERR(OPAQUE_FRAME);
1150 }
1151 // From here we are sure to succeed.
1152
1153 // Get/create a shadow frame
1154 bool created_final_frame = false;
1155 bool created_penultimate_frame = false;
1156 art::ShadowFrame* called_shadow_frame =
1157 final_frame.GetOrCreateShadowFrame(&created_final_frame);
1158 art::ShadowFrame* calling_shadow_frame =
1159 penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame);
1160
1161 CHECK_NE(called_shadow_frame, calling_shadow_frame)
1162 << "Frames at different depths not different!";
1163
1164 // Tell the shadow-frame to return immediately and skip all exit events.
1165 called_shadow_frame->SetForcePopFrame(true);
1166 calling_shadow_frame->SetForceRetryInstruction(true);
1167
1168 // Make sure can we will go to the interpreter and use the shadow frames. The
1169 // early return for the final frame will force everything to the interpreter
1170 // so we only need to instrument if it was not present.
1171 if (created_final_frame) {
1172 art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
1173 DeoptManager::Get()->DeoptimizeThread(self);
1174 });
1175 target->RequestSynchronousCheckpoint(&fc);
1176 } else {
1177 art::Locks::thread_list_lock_->ExclusiveUnlock(self);
1178 }
1179 return OK;
1180 }
1181
1182 } // namespace openjdkjvmti
1183