• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2024 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "trace_profile.h"
18 
19 #include "android-base/stringprintf.h"
20 #include "arch/context.h"
21 #include "art_method-inl.h"
22 #include "base/leb128.h"
23 #include "base/mutex.h"
24 #include "base/unix_file/fd_file.h"
25 #include "com_android_art_flags.h"
26 #include "dex/descriptors_names.h"
27 #include "gc/task_processor.h"
28 #include "oat/oat_quick_method_header.h"
29 #include "runtime.h"
30 #include "stack.h"
31 #include "thread-current-inl.h"
32 #include "thread.h"
33 #include "thread_list.h"
34 #include "trace.h"
35 #include "trace_common.h"
36 
37 namespace art_flags = com::android::art::flags;
38 
39 namespace art HIDDEN {
40 
41 using android::base::StringPrintf;
42 
43 // This specifies the maximum number of bits we need for encoding one entry. Each entry just
44 // consists of a SLEB encoded value of method and action encodig which is a maximum of
45 // sizeof(uintptr_t).
46 static constexpr size_t kMaxBytesPerTraceEntry = sizeof(uintptr_t);
47 
48 static constexpr size_t kMaxEntriesAfterFlush = kAlwaysOnTraceBufSize / 2;
49 
50 // We don't handle buffer overflows when processing the raw trace entries. We have a maximum of
51 // kAlwaysOnTraceBufSize raw entries and we need a maximum of kMaxBytesPerTraceEntry to encode
52 // each entry. To avoid overflow, we ensure that there are at least kMinBufSizeForEncodedData
53 // bytes free space in the buffer.
54 static constexpr size_t kMinBufSizeForEncodedData = kAlwaysOnTraceBufSize * kMaxBytesPerTraceEntry;
55 
56 static constexpr size_t kProfileMagicValue = 0x4C4F4D54;
57 
58 // TODO(mythria): 10 is a randomly chosen value. Tune it if required.
59 static constexpr size_t kBufSizeForEncodedData = kMinBufSizeForEncodedData * 10;
60 
61 static constexpr size_t kAlwaysOnTraceHeaderSize = 12;
62 static constexpr size_t kAlwaysOnMethodInfoHeaderSize = 11;
63 static constexpr size_t kAlwaysOnThreadInfoHeaderSize = 7;
64 
65 bool TraceProfiler::profile_in_progress_ = false;
66 
67 TraceData* TraceProfiler::trace_data_ = nullptr;
68 
AddTracedThread(Thread * thread)69 void TraceData::AddTracedThread(Thread* thread) {
70   MutexLock mu(Thread::Current(), trace_data_lock_);
71   size_t thread_id = thread->GetTid();
72   if (traced_threads_.find(thread_id) != traced_threads_.end()) {
73     return;
74   }
75 
76   std::string thread_name;
77   thread->GetThreadName(thread_name);
78   traced_threads_.emplace(thread_id, thread_name);
79 }
80 
MaybeWaitForTraceDumpToFinish()81 void TraceData::MaybeWaitForTraceDumpToFinish() {
82   if (!trace_dump_in_progress_) {
83     return;
84   }
85   trace_dump_condition_.Wait(Thread::Current());
86 }
87 
SignalTraceDumpComplete()88 void TraceData::SignalTraceDumpComplete() {
89   trace_dump_in_progress_ = false;
90   trace_dump_condition_.Broadcast(Thread::Current());
91 }
92 
AppendToLongRunningMethods(const uint8_t * buffer,size_t size)93 void TraceData::AppendToLongRunningMethods(const uint8_t* buffer, size_t size) {
94   MutexLock mu(Thread::Current(), trace_data_lock_);
95   if (curr_buffer_ == nullptr) {
96     curr_buffer_.reset(new uint8_t[kBufSizeForEncodedData]);
97     curr_index_ = 0;
98   }
99   if (curr_index_ + size <= kBufSizeForEncodedData) {
100     memcpy(curr_buffer_.get() + curr_index_, buffer, size);
101     curr_index_ += size;
102   } else {
103     size_t remaining_bytes = kBufSizeForEncodedData - curr_index_;
104     if (remaining_bytes != 0) {
105       memcpy(curr_buffer_.get() + curr_index_, buffer, remaining_bytes);
106     }
107     overflow_buffers_.push_back(std::move(curr_buffer_));
108     curr_buffer_.reset(new uint8_t[kBufSizeForEncodedData]);
109     memcpy(curr_buffer_.get(), buffer + remaining_bytes, size - remaining_bytes);
110   }
111 }
112 
AllocateBuffer(Thread * thread)113 void TraceProfiler::AllocateBuffer(Thread* thread) {
114   if (!art_flags::always_enable_profile_code()) {
115     return;
116   }
117 
118   Thread* self = Thread::Current();
119   MutexLock mu(self, *Locks::trace_lock_);
120   if (!profile_in_progress_) {
121     return;
122   }
123 
124   auto buffer = new uintptr_t[kAlwaysOnTraceBufSize];
125   size_t index = kAlwaysOnTraceBufSize;
126   if (trace_data_->GetTraceType() == LowOverheadTraceType::kAllMethods) {
127     memset(buffer, 0, kAlwaysOnTraceBufSize * sizeof(uintptr_t));
128   } else {
129     DCHECK(trace_data_->GetTraceType() == LowOverheadTraceType::kLongRunningMethods);
130     // For long running methods add a placeholder method exit entry. This avoids
131     // additional checks on method exits to see if the previous entry is valid.
132     index--;
133     buffer[index] = 0x1;
134   }
135   thread->SetMethodTraceBuffer(buffer, index);
136 }
137 
GetTraceType()138 LowOverheadTraceType TraceProfiler::GetTraceType() {
139   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
140   // LowOverhead trace entry points are configured based on the trace type. When trace_data_ is null
141   // then there is no low overhead tracing running, so we use nop entry points.
142   if (trace_data_ == nullptr) {
143     return LowOverheadTraceType::kNone;
144   }
145 
146   return trace_data_->GetTraceType();
147 }
148 
149 namespace {
RecordMethodsOnThreadStack(Thread * thread,uintptr_t * method_trace_buffer)150 void RecordMethodsOnThreadStack(Thread* thread, uintptr_t* method_trace_buffer)
151     REQUIRES_SHARED(Locks::mutator_lock_) {
152   struct MethodEntryStackVisitor final : public StackVisitor {
153     MethodEntryStackVisitor(Thread* thread_in, Context* context)
154         : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kSkipInlinedFrames) {}
155 
156     bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
157       ArtMethod* m = GetMethod();
158       if (m != nullptr && !m->IsRuntimeMethod()) {
159         if (GetCurrentShadowFrame() != nullptr) {
160           // TODO(mythria): Support low-overhead tracing for the switch interpreter.
161         } else {
162           const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
163           if (method_header == nullptr) {
164             // TODO(mythria): Consider low-overhead tracing support for the GenericJni stubs.
165           } else {
166             // Ignore nterp methods. We don't support recording trace events in nterp.
167             if (!method_header->IsNterpMethodHeader()) {
168               stack_methods_.push_back(m);
169             }
170           }
171         }
172       }
173       return true;
174     }
175 
176     std::vector<ArtMethod*> stack_methods_;
177   };
178 
179   std::unique_ptr<Context> context(Context::Create());
180   MethodEntryStackVisitor visitor(thread, context.get());
181   visitor.WalkStack(true);
182 
183   // Create method entry events for all methods currently on the thread's stack.
184   uint64_t init_ts = TimestampCounter::GetTimestamp();
185   // Set the lsb to 0 to indicate method entry.
186   init_ts = init_ts & ~1;
187   size_t index = kAlwaysOnTraceBufSize - 1;
188   for (auto smi = visitor.stack_methods_.rbegin(); smi != visitor.stack_methods_.rend(); smi++) {
189     method_trace_buffer[index--] = reinterpret_cast<uintptr_t>(*smi);
190     method_trace_buffer[index--] = init_ts;
191 
192     if (index < kMaxEntriesAfterFlush) {
193       // To keep the implementation simple, ignore methods deep down the stack. If the call stack
194       // unwinds beyond this point then we will see method exits without corresponding method
195       // entries.
196       break;
197     }
198   }
199 
200   // Record a placeholder method exit event into the buffer so we record method exits for the
201   // methods that are currently on stack.
202   method_trace_buffer[index] = 0x1;
203   thread->SetMethodTraceBuffer(method_trace_buffer, index);
204 }
205 
206 // Records the thread and method info.
DumpThreadMethodInfo(const std::unordered_map<size_t,std::string> & traced_threads,const std::unordered_set<ArtMethod * > & traced_methods,std::ostringstream & os)207 void DumpThreadMethodInfo(const std::unordered_map<size_t, std::string>& traced_threads,
208                           const std::unordered_set<ArtMethod*>& traced_methods,
209                           std::ostringstream& os) REQUIRES_SHARED(Locks::mutator_lock_) {
210   // Dump data about thread information.
211   for (const auto& it : traced_threads) {
212     uint8_t thread_header[kAlwaysOnThreadInfoHeaderSize];
213     thread_header[0] = kThreadInfoHeaderV2;
214     Append4LE(thread_header + 1, it.first);
215     Append2LE(thread_header + 5, it.second.length());
216     os.write(reinterpret_cast<char*>(thread_header), kAlwaysOnThreadInfoHeaderSize);
217     os.write(it.second.c_str(), it.second.length());
218   }
219 
220   // Dump data about method information.
221   for (ArtMethod* method : traced_methods) {
222     std::string method_line = GetMethodInfoLine(method);
223     uint16_t method_line_length = static_cast<uint16_t>(method_line.length());
224     uint8_t method_header[kAlwaysOnMethodInfoHeaderSize];
225     method_header[0] = kMethodInfoHeaderV2;
226     Append8LE(method_header + 1, reinterpret_cast<uint64_t>(method));
227     Append2LE(method_header + 9, method_line_length);
228     os.write(reinterpret_cast<char*>(method_header), kAlwaysOnMethodInfoHeaderSize);
229     os.write(method_line.c_str(), method_line_length);
230   }
231 }
232 }  // namespace
233 
234 class TraceStopTask : public gc::HeapTask {
235  public:
TraceStopTask(uint64_t target_run_time)236   explicit TraceStopTask(uint64_t target_run_time) : gc::HeapTask(target_run_time) {}
237 
Run(Thread * self)238   void Run([[maybe_unused]] Thread* self) override { TraceProfiler::TraceTimeElapsed(); }
239 };
240 
241 static class LongRunningMethodsTraceStartCheckpoint final : public Closure {
242  public:
Run(Thread * thread)243   void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
244     auto buffer = new uintptr_t[kAlwaysOnTraceBufSize];
245     // Record methods that are currently on stack.
246     RecordMethodsOnThreadStack(thread, buffer);
247     thread->UpdateTlsLowOverheadTraceEntrypoints(LowOverheadTraceType::kLongRunningMethods);
248   }
249 } long_running_methods_checkpoint_;
250 
251 static class AllMethodsTraceStartCheckpoint final : public Closure {
252  public:
Run(Thread * thread)253   void Run(Thread* thread) override {
254     auto buffer = new uintptr_t[kAlwaysOnTraceBufSize];
255     memset(buffer, 0, kAlwaysOnTraceBufSize * sizeof(uintptr_t));
256     thread->UpdateTlsLowOverheadTraceEntrypoints(LowOverheadTraceType::kAllMethods);
257     thread->SetMethodTraceBuffer(buffer, kAlwaysOnTraceBufSize);
258   }
259 } all_methods_checkpoint_;
260 
Start(LowOverheadTraceType trace_type,uint64_t trace_duration_ns)261 void TraceProfiler::Start(LowOverheadTraceType trace_type, uint64_t trace_duration_ns) {
262   if (!art_flags::always_enable_profile_code()) {
263     LOG(ERROR) << "Feature not supported. Please build with ART_ALWAYS_ENABLE_PROFILE_CODE.";
264     return;
265   }
266 
267   TimestampCounter::InitializeTimestampCounters();
268 
269   Runtime* runtime = Runtime::Current();
270   Thread* self = Thread::Current();
271   uint64_t new_end_time = 0;
272   bool add_trace_end_task = false;
273   {
274     MutexLock mu(self, *Locks::trace_lock_);
275     if (Trace::IsTracingEnabledLocked()) {
276       LOG(ERROR) << "Cannot start a low-overehad trace when regular tracing is in progress";
277       return;
278     }
279 
280     if (profile_in_progress_) {
281       // We allow overlapping starts only when collecting long running methods.
282       // If a trace of different type is in progress we ignore the request.
283       if (trace_type == LowOverheadTraceType::kAllMethods ||
284           trace_data_->GetTraceType() != trace_type) {
285         LOG(ERROR) << "Profile already in progress. Ignoring this request";
286         return;
287       }
288 
289       // For long running methods, just update the end time if there's a trace already in progress.
290       new_end_time = NanoTime() + trace_duration_ns;
291       if (trace_data_->GetTraceEndTime() < new_end_time) {
292         trace_data_->SetTraceEndTime(new_end_time);
293         add_trace_end_task = true;
294       }
295     } else {
296       profile_in_progress_ = true;
297       trace_data_ = new TraceData(trace_type);
298 
299       if (trace_type == LowOverheadTraceType::kAllMethods) {
300         runtime->GetThreadList()->RunCheckpoint(&all_methods_checkpoint_);
301       } else {
302         runtime->GetThreadList()->RunCheckpoint(&long_running_methods_checkpoint_);
303       }
304 
305       if (trace_type == LowOverheadTraceType::kLongRunningMethods) {
306         new_end_time = NanoTime() + trace_duration_ns;
307         add_trace_end_task = true;
308         trace_data_->SetTraceEndTime(new_end_time);
309       }
310     }
311   }
312 
313   if (add_trace_end_task) {
314     // Add a Task that stops the tracing after trace_duration.
315     runtime->GetHeap()->AddHeapTask(new TraceStopTask(new_end_time));
316   }
317 }
318 
Start()319 void TraceProfiler::Start() {
320   TraceProfiler::Start(LowOverheadTraceType::kAllMethods, /* trace_duration_ns= */ 0);
321 }
322 
Stop()323 void TraceProfiler::Stop() {
324   if (!art_flags::always_enable_profile_code()) {
325     LOG(ERROR) << "Feature not supported. Please build with ART_ALWAYS_ENABLE_PROFILE_CODE.";
326     return;
327   }
328 
329   Thread* self = Thread::Current();
330   MutexLock mu(self, *Locks::trace_lock_);
331   TraceProfiler::StopLocked();
332 }
333 
StopLocked()334 void TraceProfiler::StopLocked() {
335   if (!profile_in_progress_) {
336     LOG(ERROR) << "No Profile in progress but a stop was requested";
337     return;
338   }
339 
340   // We should not delete trace_data_ when there is an ongoing trace dump. So
341   // wait for any in progress trace dump to finish.
342   trace_data_->MaybeWaitForTraceDumpToFinish();
343 
344   static FunctionClosure reset_buffer([](Thread* thread) {
345     auto buffer = thread->GetMethodTraceBuffer();
346     if (buffer != nullptr) {
347       delete[] buffer;
348       thread->SetMethodTraceBuffer(/* buffer= */ nullptr, /* offset= */ 0);
349     }
350     thread->UpdateTlsLowOverheadTraceEntrypoints(LowOverheadTraceType::kNone);
351   });
352 
353   Runtime::Current()->GetThreadList()->RunCheckpoint(&reset_buffer);
354   profile_in_progress_ = false;
355   DCHECK_NE(trace_data_, nullptr);
356   delete trace_data_;
357   trace_data_ = nullptr;
358 }
359 
DumpBuffer(uint32_t thread_id,uintptr_t * method_trace_entries,uint8_t * buffer,std::unordered_set<ArtMethod * > & methods)360 size_t TraceProfiler::DumpBuffer(uint32_t thread_id,
361                                  uintptr_t* method_trace_entries,
362                                  uint8_t* buffer,
363                                  std::unordered_set<ArtMethod*>& methods) {
364   // Encode header at the end once we compute the number of records.
365   uint8_t* curr_buffer_ptr = buffer + kAlwaysOnTraceHeaderSize;
366 
367   int num_records = 0;
368   uintptr_t prev_method_action_encoding = 0;
369   int prev_action = -1;
370   for (size_t i = kAlwaysOnTraceBufSize - 1; i > 0; i-=1) {
371     uintptr_t method_action_encoding = method_trace_entries[i];
372     // 0 value indicates the rest of the entries are empty.
373     if (method_action_encoding == 0) {
374       break;
375     }
376 
377     int action = method_action_encoding & ~kMaskTraceAction;
378     int64_t diff;
379     if (action == TraceAction::kTraceMethodEnter) {
380       diff = method_action_encoding - prev_method_action_encoding;
381 
382       ArtMethod* method = reinterpret_cast<ArtMethod*>(method_action_encoding & kMaskTraceAction);
383       methods.insert(method);
384     } else {
385       // On a method exit, we don't record the information about method. We just need a 1 in the
386       // lsb and the method information can be derived from the last method that entered. To keep
387       // the encoded value small just add the smallest value to make the lsb one.
388       if (prev_action == TraceAction::kTraceMethodExit) {
389         diff = 0;
390       } else {
391         diff = 1;
392       }
393     }
394     curr_buffer_ptr = EncodeSignedLeb128(curr_buffer_ptr, diff);
395     num_records++;
396     prev_method_action_encoding = method_action_encoding;
397     prev_action = action;
398   }
399 
400   // Fill in header information:
401   // 1 byte of header identifier
402   // 4 bytes of thread_id
403   // 3 bytes of number of records
404   buffer[0] = kEntryHeaderV2;
405   Append4LE(buffer + 1, thread_id);
406   Append3LE(buffer + 5, num_records);
407   return curr_buffer_ptr - buffer;
408 }
409 
Dump(int fd)410 void TraceProfiler::Dump(int fd) {
411   if (!art_flags::always_enable_profile_code()) {
412     LOG(ERROR) << "Feature not supported. Please build with ART_ALWAYS_ENABLE_PROFILE_CODE.";
413     return;
414   }
415 
416   std::unique_ptr<File> trace_file(new File(fd, /*check_usage=*/true));
417   std::ostringstream os;
418   Dump(std::move(trace_file), os);
419 }
420 
Dump(const char * filename)421 void TraceProfiler::Dump(const char* filename) {
422   if (!art_flags::always_enable_profile_code()) {
423     LOG(ERROR) << "Feature not supported. Please build with ART_ALWAYS_ENABLE_PROFILE_CODE.";
424     return;
425   }
426 
427   std::unique_ptr<File> trace_file(OS::CreateEmptyFileWriteOnly(filename));
428   if (trace_file == nullptr) {
429     PLOG(ERROR) << "Unable to open trace file " << filename;
430     return;
431   }
432 
433   std::ostringstream os;
434   Dump(std::move(trace_file), os);
435 }
436 
Dump(std::unique_ptr<File> && trace_file,std::ostringstream & os)437 void TraceProfiler::Dump(std::unique_ptr<File>&& trace_file, std::ostringstream& os) {
438   Thread* self = Thread::Current();
439   Runtime* runtime = Runtime::Current();
440 
441   size_t threads_running_checkpoint = 0;
442   std::unique_ptr<TraceDumpCheckpoint> checkpoint;
443   {
444     MutexLock mu(self, *Locks::trace_lock_);
445     if (!profile_in_progress_ || trace_data_->IsTraceDumpInProgress()) {
446       if (trace_file != nullptr && !trace_file->Close()) {
447         PLOG(WARNING) << "Failed to close file.";
448       }
449       return;
450     }
451 
452     trace_data_->SetTraceDumpInProgress();
453 
454     // Collect long running methods from all the threads;
455     checkpoint.reset(new TraceDumpCheckpoint(trace_data_, trace_file));
456     threads_running_checkpoint = runtime->GetThreadList()->RunCheckpoint(checkpoint.get());
457   }
458 
459   // Wait for all threads to dump their data.
460   if (threads_running_checkpoint != 0) {
461     checkpoint->WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
462   }
463   checkpoint->FinishTraceDump(os);
464 
465   if (trace_file != nullptr) {
466     std::string info = os.str();
467     if (!trace_file->WriteFully(info.c_str(), info.length())) {
468       PLOG(WARNING) << "Failed writing information to file";
469     }
470 
471     if (!trace_file->Close()) {
472       PLOG(WARNING) << "Failed to close file.";
473     }
474   }
475 }
476 
ReleaseThreadBuffer(Thread * self)477 void TraceProfiler::ReleaseThreadBuffer(Thread* self) {
478   if (!IsTraceProfileInProgress()) {
479     return;
480   }
481   // TODO(mythria): Maybe it's good to cache these and dump them when requested. For now just
482   // relese the buffer when a thread is exiting.
483   auto buffer = self->GetMethodTraceBuffer();
484   delete[] buffer;
485   self->SetMethodTraceBuffer(nullptr, 0);
486 }
487 
IsTraceProfileInProgress()488 bool TraceProfiler::IsTraceProfileInProgress() {
489   return profile_in_progress_;
490 }
491 
StartTraceLongRunningMethods(uint64_t trace_duration_ns)492 void TraceProfiler::StartTraceLongRunningMethods(uint64_t trace_duration_ns) {
493   TraceProfiler::Start(LowOverheadTraceType::kLongRunningMethods, trace_duration_ns);
494 }
495 
TraceTimeElapsed()496 void TraceProfiler::TraceTimeElapsed() {
497   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
498   DCHECK_IMPLIES(!profile_in_progress_, trace_data_ != nullptr);
499   if (!profile_in_progress_ || trace_data_->GetTraceEndTime() > NanoTime()) {
500     // The end duration was extended by another start, so just ignore this task.
501     return;
502   }
503   TraceProfiler::StopLocked();
504 }
505 
DumpLongRunningMethodBuffer(uint32_t thread_id,uintptr_t * method_trace_entries,uintptr_t * end_trace_entries,uint8_t * buffer,std::unordered_set<ArtMethod * > & methods)506 size_t TraceProfiler::DumpLongRunningMethodBuffer(uint32_t thread_id,
507                                                   uintptr_t* method_trace_entries,
508                                                   uintptr_t* end_trace_entries,
509                                                   uint8_t* buffer,
510                                                   std::unordered_set<ArtMethod*>& methods) {
511   // Encode header at the end once we compute the number of records.
512   uint8_t* curr_buffer_ptr = buffer + kAlwaysOnTraceHeaderSize;
513 
514   int num_records = 0;
515   uintptr_t prev_time_action_encoding = 0;
516   uintptr_t prev_method_ptr = 0;
517   int64_t end_index = end_trace_entries - method_trace_entries;
518   for (int64_t i = kAlwaysOnTraceBufSize; i > end_index;) {
519     uintptr_t event = method_trace_entries[--i];
520     if (event == 0x1) {
521       // This is a placeholder event. Ignore this event.
522       continue;
523     }
524 
525     bool is_method_exit = event & 0x1;
526     uint64_t event_time;
527     uintptr_t method_ptr;
528     if (is_method_exit) {
529       // Method exit. We only have timestamp here.
530       event_time = TimestampCounter::GetNanoTime(event & ~0x1);
531     } else {
532       // method entry
533       method_ptr = event;
534       event_time = TimestampCounter::GetNanoTime(method_trace_entries[--i] & ~0x1);
535     }
536 
537     uint64_t time_action_encoding = event_time << 1;
538     if (is_method_exit) {
539       time_action_encoding |= 1;
540     }
541     int64_t time_action_diff = time_action_encoding - prev_time_action_encoding;
542     curr_buffer_ptr = EncodeSignedLeb128(curr_buffer_ptr, time_action_diff);
543     prev_time_action_encoding = time_action_encoding;
544 
545     if (!is_method_exit) {
546       int64_t method_diff = method_ptr - prev_method_ptr;
547       ArtMethod* method = reinterpret_cast<ArtMethod*>(method_ptr);
548       methods.insert(method);
549       prev_method_ptr = method_ptr;
550       curr_buffer_ptr = EncodeSignedLeb128(curr_buffer_ptr, method_diff);
551     }
552     num_records++;
553   }
554 
555   // Fill in header information:
556   // 1 byte of header identifier
557   // 4 bytes of thread_id
558   // 3 bytes of number of records
559   // 4 bytes the size of the data
560   buffer[0] = kEntryHeaderV2;
561   Append4LE(buffer + 1, thread_id);
562   Append3LE(buffer + 5, num_records);
563   size_t size = curr_buffer_ptr - buffer;
564   Append4LE(buffer + 8, size - kAlwaysOnTraceHeaderSize);
565   return curr_buffer_ptr - buffer;
566 }
567 
FlushBufferAndRecordTraceEvent(ArtMethod * method,Thread * thread,bool is_entry)568 void TraceProfiler::FlushBufferAndRecordTraceEvent(ArtMethod* method,
569                                                    Thread* thread,
570                                                    bool is_entry) {
571   uint64_t timestamp = TimestampCounter::GetTimestamp();
572   std::unordered_set<ArtMethod*> traced_methods;
573   uintptr_t* method_trace_entries = thread->GetMethodTraceBuffer();
574   DCHECK(method_trace_entries != nullptr);
575   uintptr_t** method_trace_curr_ptr = thread->GetTraceBufferCurrEntryPtr();
576 
577   // Find the last method exit event. We can flush all the entries before this event. We cannot
578   // flush remaining events because we haven't determined if they are long running or not.
579   uintptr_t* processed_events_ptr = nullptr;
580   for (uintptr_t* ptr = *method_trace_curr_ptr;
581        ptr < method_trace_entries + kAlwaysOnTraceBufSize;) {
582     if (*ptr & 0x1) {
583       // Method exit. We need to keep events until (including this method exit) here.
584       processed_events_ptr = ptr + 1;
585       break;
586     }
587     ptr += 2;
588   }
589 
590   size_t num_occupied_entries = (processed_events_ptr - *method_trace_curr_ptr);
591   size_t index = kAlwaysOnTraceBufSize;
592 
593   std::unique_ptr<uint8_t[]> buffer_ptr(new uint8_t[kBufSizeForEncodedData]);
594   size_t num_bytes;
595   if (num_occupied_entries > kMaxEntriesAfterFlush) {
596     // If we don't have sufficient space just record a placeholder exit and flush all the existing
597     // events. We have accurate timestamps to filter out these events in a post-processing step.
598     // This would happen only when we have very deeply (~1024) nested code.
599     num_bytes = DumpLongRunningMethodBuffer(thread->GetTid(),
600                                             method_trace_entries,
601                                             *method_trace_curr_ptr,
602                                             buffer_ptr.get(),
603                                             traced_methods);
604 
605     // Encode a placeholder exit event. This will be ignored when dumping the methods.
606     method_trace_entries[--index] = 0x1;
607   } else {
608     // Flush all the entries till the method exit event.
609     num_bytes = DumpLongRunningMethodBuffer(thread->GetTid(),
610                                             method_trace_entries,
611                                             processed_events_ptr,
612                                             buffer_ptr.get(),
613                                             traced_methods);
614 
615     // Move the remaining events to the start of the buffer.
616     for (uintptr_t* ptr = processed_events_ptr - 1; ptr >= *method_trace_curr_ptr; ptr--) {
617       method_trace_entries[--index] = *ptr;
618     }
619   }
620 
621   // Record new entry
622   if (is_entry) {
623     method_trace_entries[--index] = reinterpret_cast<uintptr_t>(method);
624     method_trace_entries[--index] = timestamp & ~1;
625   } else {
626     if (method_trace_entries[index] & 0x1) {
627       method_trace_entries[--index] = timestamp | 1;
628     } else {
629       size_t prev_timestamp = method_trace_entries[index];
630       if (timestamp - prev_timestamp < kLongRunningMethodThreshold) {
631         index += 2;
632         DCHECK_LT(index, kAlwaysOnTraceBufSize);
633       } else {
634         method_trace_entries[--index] = timestamp | 1;
635       }
636     }
637   }
638   *method_trace_curr_ptr = method_trace_entries + index;
639 
640   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
641   trace_data_->AppendToLongRunningMethods(buffer_ptr.get(), num_bytes);
642   trace_data_->AddTracedMethods(traced_methods);
643   trace_data_->AddTracedThread(thread);
644 }
645 
GetLongRunningMethodsString()646 std::string TraceProfiler::GetLongRunningMethodsString() {
647   if (!art_flags::always_enable_profile_code()) {
648     return std::string();
649   }
650 
651   std::ostringstream os;
652   Dump(std::unique_ptr<File>(), os);
653   return os.str();
654 }
655 
Run(Thread * thread)656 void TraceDumpCheckpoint::Run(Thread* thread) {
657   auto method_trace_entries = thread->GetMethodTraceBuffer();
658   if (method_trace_entries != nullptr) {
659     std::unordered_set<ArtMethod*> traced_methods;
660     if (trace_data_->GetTraceType() == LowOverheadTraceType::kLongRunningMethods) {
661       uintptr_t* method_trace_curr_ptr = *(thread->GetTraceBufferCurrEntryPtr());
662       std::unique_ptr<uint8_t[]> buffer_ptr(new uint8_t[kBufSizeForEncodedData]);
663       size_t num_bytes = TraceProfiler::DumpLongRunningMethodBuffer(thread->GetTid(),
664                                                                     method_trace_entries,
665                                                                     method_trace_curr_ptr,
666                                                                     buffer_ptr.get(),
667                                                                     traced_methods);
668       MutexLock mu(Thread::Current(), trace_file_lock_);
669       if (trace_file_ != nullptr) {
670         if (!trace_file_->WriteFully(buffer_ptr.get(), num_bytes)) {
671           PLOG(WARNING) << "Failed streaming a tracing event.";
672         }
673       } else {
674         trace_data_->AppendToLongRunningMethods(buffer_ptr.get(), num_bytes);
675       }
676     } else {
677       std::unique_ptr<uint8_t> buffer_ptr(new uint8_t[kBufSizeForEncodedData]);
678       size_t num_bytes = TraceProfiler::DumpBuffer(
679           thread->GetTid(), method_trace_entries, buffer_ptr.get(), traced_methods);
680       MutexLock mu(Thread::Current(), trace_file_lock_);
681       if (!trace_file_->WriteFully(buffer_ptr.get(), num_bytes)) {
682         PLOG(WARNING) << "Failed streaming a tracing event.";
683       }
684     }
685     trace_data_->AddTracedThread(thread);
686     trace_data_->AddTracedMethods(traced_methods);
687   }
688   barrier_.Pass(Thread::Current());
689 }
690 
WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint)691 void TraceDumpCheckpoint::WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
692   Thread* self = Thread::Current();
693   ScopedThreadStateChange tsc(self, ThreadState::kWaitingForCheckPointsToRun);
694   barrier_.Increment(self, threads_running_checkpoint);
695 }
696 
FinishTraceDump(std::ostringstream & os)697 void TraceDumpCheckpoint::FinishTraceDump(std::ostringstream& os) {
698   // Dump all the data.
699   trace_data_->DumpData(os);
700 
701   // Any trace stop requests will be blocked while a dump is in progress. So
702   // broadcast the completion condition for any waiting requests.
703   MutexLock mu(Thread::Current(), *Locks::trace_lock_);
704   trace_data_->SignalTraceDumpComplete();
705 }
706 
DumpData(std::ostringstream & os)707 void TraceData::DumpData(std::ostringstream& os) {
708   std::unordered_set<ArtMethod*> methods;
709   std::unordered_map<size_t, std::string> threads;
710   {
711     // We cannot dump method information while holding trace_lock_, since we have to also
712     // acquire a mutator lock. Take a snapshot of thread and method information.
713     MutexLock mu(Thread::Current(), trace_data_lock_);
714     if (curr_buffer_ != nullptr) {
715       for (size_t i = 0; i < overflow_buffers_.size(); i++) {
716         os.write(reinterpret_cast<char*>(overflow_buffers_[i].get()), kBufSizeForEncodedData);
717       }
718 
719       os.write(reinterpret_cast<char*>(curr_buffer_.get()), curr_index_);
720     }
721 
722     methods = traced_methods_;
723     if (trace_type_ != LowOverheadTraceType::kLongRunningMethods) {
724       threads = traced_threads_;
725     }
726   }
727 
728   // Dump the information about traced_methods and threads
729   {
730     ScopedObjectAccess soa(Thread::Current());
731     DumpThreadMethodInfo(threads, methods, os);
732   }
733 }
734 
735 }  // namespace art
736