• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "base/logging.h"
18 #include "base/mutex.h"
19 #include "debugger.h"
20 #include "jni_internal.h"
21 #include "scoped_fast_native_object_access.h"
22 #include "ScopedLocalRef.h"
23 #include "ScopedPrimitiveArray.h"
24 #include "stack.h"
25 #include "thread_list.h"
26 
27 namespace art {
28 
DdmVmInternal_enableRecentAllocations(JNIEnv *,jclass,jboolean enable)29 static void DdmVmInternal_enableRecentAllocations(JNIEnv*, jclass, jboolean enable) {
30   Dbg::SetAllocTrackingEnabled(enable);
31 }
32 
DdmVmInternal_getRecentAllocations(JNIEnv * env,jclass)33 static jbyteArray DdmVmInternal_getRecentAllocations(JNIEnv* env, jclass) {
34   ScopedFastNativeObjectAccess soa(env);
35   return Dbg::GetRecentAllocations();
36 }
37 
DdmVmInternal_getRecentAllocationStatus(JNIEnv *,jclass)38 static jboolean DdmVmInternal_getRecentAllocationStatus(JNIEnv*, jclass) {
39   return Dbg::IsAllocTrackingEnabled();
40 }
41 
42 /*
43  * Get a stack trace as an array of StackTraceElement objects.  Returns
44  * NULL on failure, e.g. if the threadId couldn't be found.
45  */
DdmVmInternal_getStackTraceById(JNIEnv * env,jclass,jint thin_lock_id)46 static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint thin_lock_id) {
47   jobjectArray trace = nullptr;
48   Thread* const self = Thread::Current();
49   if (static_cast<uint32_t>(thin_lock_id) == self->GetThreadId()) {
50     // No need to suspend ourself to build stacktrace.
51     ScopedObjectAccess soa(env);
52     jobject internal_trace = self->CreateInternalStackTrace<false>(soa);
53     trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace);
54   } else {
55     ThreadList* thread_list = Runtime::Current()->GetThreadList();
56     bool timed_out;
57 
58     // Check for valid thread
59     if (thin_lock_id == ThreadList::kInvalidThreadId) {
60       return nullptr;
61     }
62 
63     // Suspend thread to build stack trace.
64     Thread* thread;
65     {
66       // Take suspend thread lock to avoid races with threads trying to suspend this one.
67       MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
68       thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
69     }
70     if (thread != nullptr) {
71       {
72         ScopedObjectAccess soa(env);
73         jobject internal_trace = thread->CreateInternalStackTrace<false>(soa);
74         trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace);
75       }
76       // Restart suspended thread.
77       thread_list->Resume(thread, false);
78     } else {
79       if (timed_out) {
80         LOG(ERROR) << "Trying to get thread's stack by id failed as the thread failed to suspend "
81             "within a generous timeout.";
82       }
83     }
84   }
85   return trace;
86 }
87 
ThreadCountCallback(Thread *,void * context)88 static void ThreadCountCallback(Thread*, void* context) {
89   uint16_t& count = *reinterpret_cast<uint16_t*>(context);
90   ++count;
91 }
92 
93 static const int kThstBytesPerEntry = 18;
94 static const int kThstHeaderLen = 4;
95 
ThreadStatsGetterCallback(Thread * t,void * context)96 static void ThreadStatsGetterCallback(Thread* t, void* context) {
97   /*
98    * Generate the contents of a THST chunk.  The data encompasses all known
99    * threads.
100    *
101    * Response has:
102    *  (1b) header len
103    *  (1b) bytes per entry
104    *  (2b) thread count
105    * Then, for each thread:
106    *  (4b) thread id
107    *  (1b) thread status
108    *  (4b) tid
109    *  (4b) utime
110    *  (4b) stime
111    *  (1b) is daemon?
112    *
113    * The length fields exist in anticipation of adding additional fields
114    * without wanting to break ddms or bump the full protocol version.  I don't
115    * think it warrants full versioning.  They might be extraneous and could
116    * be removed from a future version.
117    */
118   char native_thread_state;
119   int utime;
120   int stime;
121   int task_cpu;
122   GetTaskStats(t->GetTid(), &native_thread_state, &utime, &stime, &task_cpu);
123 
124   std::vector<uint8_t>& bytes = *reinterpret_cast<std::vector<uint8_t>*>(context);
125   JDWP::Append4BE(bytes, t->GetThreadId());
126   JDWP::Append1BE(bytes, Dbg::ToJdwpThreadStatus(t->GetState()));
127   JDWP::Append4BE(bytes, t->GetTid());
128   JDWP::Append4BE(bytes, utime);
129   JDWP::Append4BE(bytes, stime);
130   JDWP::Append1BE(bytes, t->IsDaemon());
131 }
132 
DdmVmInternal_getThreadStats(JNIEnv * env,jclass)133 static jbyteArray DdmVmInternal_getThreadStats(JNIEnv* env, jclass) {
134   std::vector<uint8_t> bytes;
135   Thread* self = static_cast<JNIEnvExt*>(env)->self;
136   {
137     MutexLock mu(self, *Locks::thread_list_lock_);
138     ThreadList* thread_list = Runtime::Current()->GetThreadList();
139 
140     uint16_t thread_count = 0;
141     thread_list->ForEach(ThreadCountCallback, &thread_count);
142 
143     JDWP::Append1BE(bytes, kThstHeaderLen);
144     JDWP::Append1BE(bytes, kThstBytesPerEntry);
145     JDWP::Append2BE(bytes, thread_count);
146 
147     thread_list->ForEach(ThreadStatsGetterCallback, &bytes);
148   }
149 
150   jbyteArray result = env->NewByteArray(bytes.size());
151   if (result != NULL) {
152     env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
153   }
154   return result;
155 }
156 
DdmVmInternal_heapInfoNotify(JNIEnv * env,jclass,jint when)157 static jint DdmVmInternal_heapInfoNotify(JNIEnv* env, jclass, jint when) {
158   ScopedFastNativeObjectAccess soa(env);
159   return Dbg::DdmHandleHpifChunk(static_cast<Dbg::HpifWhen>(when));
160 }
161 
DdmVmInternal_heapSegmentNotify(JNIEnv *,jclass,jint when,jint what,jboolean native)162 static jboolean DdmVmInternal_heapSegmentNotify(JNIEnv*, jclass, jint when, jint what, jboolean native) {
163   return Dbg::DdmHandleHpsgNhsgChunk(static_cast<Dbg::HpsgWhen>(when), static_cast<Dbg::HpsgWhat>(what), native);
164 }
165 
DdmVmInternal_threadNotify(JNIEnv *,jclass,jboolean enable)166 static void DdmVmInternal_threadNotify(JNIEnv*, jclass, jboolean enable) {
167   Dbg::DdmSetThreadNotification(enable);
168 }
169 
170 static JNINativeMethod gMethods[] = {
171   NATIVE_METHOD(DdmVmInternal, enableRecentAllocations, "(Z)V"),
172   NATIVE_METHOD(DdmVmInternal, getRecentAllocations, "!()[B"),
173   NATIVE_METHOD(DdmVmInternal, getRecentAllocationStatus, "!()Z"),
174   NATIVE_METHOD(DdmVmInternal, getStackTraceById, "(I)[Ljava/lang/StackTraceElement;"),
175   NATIVE_METHOD(DdmVmInternal, getThreadStats, "()[B"),
176   NATIVE_METHOD(DdmVmInternal, heapInfoNotify, "!(I)Z"),
177   NATIVE_METHOD(DdmVmInternal, heapSegmentNotify, "(IIZ)Z"),
178   NATIVE_METHOD(DdmVmInternal, threadNotify, "(Z)V"),
179 };
180 
register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(JNIEnv * env)181 void register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(JNIEnv* env) {
182   REGISTER_NATIVE_METHODS("org/apache/harmony/dalvik/ddmc/DdmVmInternal");
183 }
184 
185 }  // namespace art
186