• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_TRACE_H_
18 #define ART_RUNTIME_TRACE_H_
19 
20 #include <bitset>
21 #include <map>
22 #include <memory>
23 #include <ostream>
24 #include <set>
25 #include <string>
26 #include <unordered_map>
27 #include <vector>
28 
29 #include "base/atomic.h"
30 #include "base/locks.h"
31 #include "base/macros.h"
32 #include "base/mutex.h"
33 #include "base/os.h"
34 #include "base/safe_map.h"
35 #include "class_linker.h"
36 #include "instrumentation.h"
37 #include "runtime_globals.h"
38 #include "thread_pool.h"
39 
40 namespace unix_file {
41 class FdFile;
42 }  // namespace unix_file
43 
44 namespace art HIDDEN {
45 
46 class ArtField;
47 class ArtMethod;
48 class DexFile;
49 class ShadowFrame;
50 class Thread;
51 
52 struct MethodTraceRecord;
53 
54 using DexIndexBitSet = std::bitset<65536>;
55 
56 enum TracingMode {
57   kTracingInactive,
58   kMethodTracingActive,  // Trace activity synchronous with method progress.
59   kSampleProfilingActive,  // Trace activity captured by sampling thread.
60 };
61 std::ostream& operator<<(std::ostream& os, TracingMode rhs);
62 
63 // File format:
64 //     header
65 //     record 0
66 //     record 1
67 //     ...
68 //
69 // Header format:
70 //     u4  magic ('SLOW')
71 //     u2  version
72 //     u2  offset to data
73 //     u8  start date/time in usec
74 //     u2  record size in bytes (version >= 2 only)
75 //     ... padding to 32 bytes
76 //
77 // Record format v1:
78 //     u1  thread ID
79 //     u4  method ID | method action
80 //     u4  time delta since start, in usec
81 //
82 // Record format v2:
83 //     u2  thread ID
84 //     u4  method ID | method action
85 //     u4  time delta since start, in usec
86 //
87 // Record format v3:
88 //     u2  thread ID
89 //     u4  method ID | method action
90 //     u4  time delta since start, in usec
91 //     u4  wall time since start, in usec (when clock == "dual" only)
92 //
93 // 32 bits of microseconds is 70 minutes.
94 //
95 // All values are stored in little-endian order.
96 
97 enum TraceAction {
98     kTraceMethodEnter = 0x00,       // method entry
99     kTraceMethodExit = 0x01,        // method exit
100     kTraceUnroll = 0x02,            // method exited by exception unrolling
101     // 0x03 currently unused
102     kTraceMethodActionMask = 0x03,  // two bits
103 };
104 
105 enum class TraceOutputMode {
106     kFile,
107     kDDMS,
108     kStreaming
109 };
110 
111 // We need 3 entries to store 64-bit timestamp counter as two 32-bit values on 32-bit architectures.
112 static constexpr uint32_t kNumEntriesForWallClock =
113     (kRuntimePointerSize == PointerSize::k64) ? 2 : 3;
114 static constexpr uint32_t kNumEntriesForDualClock = kNumEntriesForWallClock + 1;
115 
116 // These define offsets in bytes for the individual fields of a trace entry. These are used by the
117 // JITed code when storing a trace entry.
118 static constexpr int32_t kMethodOffsetInBytes = 0;
119 static constexpr int32_t kTimestampOffsetInBytes = 1 * static_cast<uint32_t>(kRuntimePointerSize);
120 // On 32-bit architectures we store 64-bit timestamp as two 32-bit values.
121 // kHighTimestampOffsetInBytes is only relevant on 32-bit architectures.
122 static constexpr int32_t kHighTimestampOffsetInBytes =
123     2 * static_cast<uint32_t>(kRuntimePointerSize);
124 
125 static constexpr uintptr_t kMaskTraceAction = ~0b11;
126 
127 class TraceWriterThreadPool : public ThreadPool {
128  public:
Create(const char * name)129   static TraceWriterThreadPool* Create(const char* name) {
130     TraceWriterThreadPool* pool = new TraceWriterThreadPool(name);
131     pool->CreateThreads();
132     return pool;
133   }
134 
135   uintptr_t* FinishTaskAndClaimBuffer(size_t tid);
136 
137  private:
TraceWriterThreadPool(const char * name)138   explicit TraceWriterThreadPool(const char* name)
139       : ThreadPool(name,
140                    /* num_threads= */ 1,
141                    /* create_peers= */ false,
142                    /* worker_stack_size= */ ThreadPoolWorker::kDefaultStackSize) {}
143 };
144 
145 class TraceWriter {
146  public:
147   TraceWriter(File* trace_file,
148               TraceOutputMode output_mode,
149               TraceClockSource clock_source,
150               size_t buffer_size,
151               int num_trace_buffers,
152               int trace_format_version,
153               uint32_t clock_overhead_ns);
154 
155   // This encodes all the events in the per-thread trace buffer and writes it to the trace file /
156   // buffer. This acquires streaming lock to prevent any other threads writing concurrently. It is
157   // required to serialize these since each method is encoded with a unique id which is assigned
158   // when the method is seen for the first time in the recoreded events. So we need to serialize
159   // these flushes across threads.
160   void FlushBuffer(Thread* thread, bool is_sync, bool free_buffer)
161       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!trace_writer_lock_);
162 
163   // This is called when the per-thread buffer is full and a new entry needs to be recorded. This
164   // returns a pointer to the new buffer where the entries should be recorded.
165   // In streaming mode, we just flush the per-thread buffer. The buffer is flushed asynchronously
166   // on a thread pool worker. This creates a new buffer and updates the per-thread buffer pointer
167   // and returns a pointer to the newly created buffer.
168   // In non-streaming mode, buffers from all threads are flushed to see if there's enough room
169   // in the centralized buffer before recording new entries. We just flush these buffers
170   // synchronously and reuse the existing buffer. Since this mode is mostly deprecated we want to
171   // keep the implementation simple here.
172   uintptr_t* PrepareBufferForNewEntries(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
173       REQUIRES(!trace_writer_lock_);
174 
175   // Flushes all per-thread buffer and also write a summary entry.
176   void FinishTracing(int flags, bool flush_entries) REQUIRES(!trace_writer_lock_)
177       REQUIRES_SHARED(Locks::mutator_lock_);
178 
179   void PreProcessTraceForMethodInfos(uintptr_t* buffer,
180                                      size_t num_entries,
181                                      std::unordered_map<ArtMethod*, std::string>& method_infos)
182       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!trace_writer_lock_);
183 
184   // Flush buffer to the file (for streaming) or to the common buffer (for non-streaming). In
185   // non-streaming case it returns false if all the contents couldn't be flushed.
186   void FlushBuffer(uintptr_t* buffer,
187                    size_t num_entries,
188                    size_t tid,
189                    const std::unordered_map<ArtMethod*, std::string>& method_infos)
190       REQUIRES(!trace_writer_lock_);
191 
192   // This is called when we see the first entry from the thread to record the information about the
193   // thread.
194   void RecordThreadInfo(Thread* thread) REQUIRES(!trace_writer_lock_);
195 
196   // Records information about all methods in the newly loaded class in the buffer. If the buffer
197   // doesn't have enough space to record the entry, then it adds a task to flush the buffer
198   // contents and uses a new buffer to record the information.
199   // buffer is the pointer to buffer that is used to record method info and the offset is the
200   // offset in the buffer to start recording method info. If *buffer is nullptr then a new one is
201   // allocated and buffer is updated to point to the newly allocated one.
202   void RecordMethodInfoV2(mirror::Class* klass, uint8_t** buffer, size_t* offset)
203       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!trace_writer_lock_);
204 
HasOverflow()205   bool HasOverflow() { return overflow_; }
GetOutputMode()206   TraceOutputMode GetOutputMode() { return trace_output_mode_; }
GetBufferSize()207   size_t GetBufferSize() { return buffer_size_; }
208 
209   // Performs the initialization for the buffer pool. It marks all buffers as free by storing 0
210   // as the owner tid. This also allocates the buffer pool.
211   void InitializeTraceBuffers();
212 
213   // Releases the trace buffer and signals any waiting threads about a free buffer.
214   void ReleaseBuffer(int index);
215 
216   // Release the trace buffer of the thread. This is called to release the buffer without flushing
217   // the entries. See a comment in ThreadList::Unregister for more detailed explanation.
218   void ReleaseBufferForThread(Thread* self);
219 
220   // Tries to find a free buffer (which has owner of 0) from the pool. If there are no free buffers
221   // then it just waits for a free buffer. To prevent any deadlocks, we only wait if the number of
222   // pending tasks are greater than the number of waiting threads. Allocates a new buffer if it
223   // isn't safe to wait.
224   uintptr_t* AcquireTraceBuffer(size_t tid) REQUIRES_SHARED(Locks::mutator_lock_)
225       REQUIRES(!trace_writer_lock_);
226 
227   // Returns the index corresponding to the start of the current_buffer. We allocate one large
228   // buffer and assign parts of it for each thread.
229   int GetMethodTraceIndex(uintptr_t* current_buffer);
230 
GetTraceFormatVersion()231   int GetTraceFormatVersion() { return trace_format_version_; }
232 
233   // Ensures that there are no threads suspended waiting for a free buffer. It signals threads
234   // waiting for a free buffer and waits for all the threads to respond to the signal.
235   void StopTracing();
236 
237   // Adds a task to write method info to the file. The buffer is already in the
238   // right format and it just adds a new task which takes the ownership of the
239   // buffer and returns a new buffer that can be used. If release is set to true
240   // then it doesn't fetch a new buffer.
241   uint8_t* AddMethodInfoWriteTask(uint8_t* buffer, size_t offset, size_t tid, bool release)
242       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!trace_writer_lock_);
243 
244   // Writes buffer contents to the file.
245   void WriteToFile(uint8_t* buffer, size_t offset);
246 
247  private:
248   void ReadValuesFromRecord(uintptr_t* method_trace_entries,
249                             size_t record_index,
250                             MethodTraceRecord& record,
251                             bool has_thread_cpu_clock,
252                             bool has_wall_clock);
253 
254   void FlushEntriesFormatV2(uintptr_t* method_trace_entries,
255                             size_t tid,
256                             size_t num_records,
257                             size_t* current_index,
258                             uint8_t* init_buffer_ptr) REQUIRES(trace_writer_lock_);
259 
260   void FlushEntriesFormatV1(uintptr_t* method_trace_entries,
261                             size_t tid,
262                             const std::unordered_map<ArtMethod*, std::string>& method_infos,
263                             size_t end_offset,
264                             size_t* current_index,
265                             uint8_t* buffer_ptr) REQUIRES(trace_writer_lock_);
266   // Get a 32-bit id for the method and specify if the method hasn't been seen before. If this is
267   // the first time we see this method record information (like method name, declaring class etc.,)
268   // about the method.
269   std::pair<uint32_t, bool> GetMethodEncoding(ArtMethod* method) REQUIRES(trace_writer_lock_);
270   bool HasMethodEncoding(ArtMethod* method) REQUIRES(trace_writer_lock_);
271 
272   // Get a 16-bit id for the thread. We don't want to use thread ids directly since they can be
273   // more than 16-bit.
274   uint16_t GetThreadEncoding(pid_t thread_id) REQUIRES(trace_writer_lock_);
275 
276   // Get the information about the method.
277   std::string GetMethodLine(const std::string& method_line, uint32_t method_id);
278   std::string GetMethodInfoLine(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
279 
280   // Helper function to record method information when processing the events. These are used by
281   // streaming output mode. Non-streaming modes dump the methods and threads list at the end of
282   // tracing.
283   void RecordMethodInfoV1(const std::string& method_line, uint64_t method_id)
284       REQUIRES(trace_writer_lock_);
285 
286   // Encodes the trace event. This assumes that there is enough space reserved to encode the entry.
287   void EncodeEventEntry(uint8_t* ptr,
288                         uint16_t thread_id,
289                         uint32_t method_index,
290                         TraceAction action,
291                         uint32_t thread_clock_diff,
292                         uint32_t wall_clock_diff) REQUIRES(trace_writer_lock_);
293 
294   // Encodes the header for the events block. This assumes that there is enough space reserved to
295   // encode the entry.
296   void EncodeEventBlockHeader(uint8_t* ptr,
297                               uint32_t thread_id,
298                               uint64_t method_index,
299                               uint32_t init_thread_clock_time,
300                               uint32_t init_wall_clock_time,
301                               uint16_t num_records) REQUIRES(trace_writer_lock_);
302 
303   // Ensures there is sufficient space in the buffer to record the requested_size. If there is not
304   // enough sufficient space the current contents of the buffer are written to the file and
305   // current_index is reset to 0. This doesn't check if buffer_size is big enough to hold the
306   // requested size.
307   void EnsureSpace(uint8_t* buffer,
308                    size_t* current_index,
309                    size_t buffer_size,
310                    size_t required_size);
311 
312   // Flush tracing buffers from all the threads.
313   void FlushAllThreadBuffers() REQUIRES(!Locks::thread_list_lock_) REQUIRES(!trace_writer_lock_);
314 
315 
316   // Methods to output traced methods and threads.
317   void DumpMethodList(std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_)
318       REQUIRES(!trace_writer_lock_);
319   void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_, !trace_writer_lock_);
320 
321   // File to write trace data out to, null if direct to ddms.
322   std::unique_ptr<File> trace_file_;
323 
324   // The kind of output for this tracing.
325   const TraceOutputMode trace_output_mode_;
326 
327   // The clock source for this tracing.
328   const TraceClockSource clock_source_;
329 
330   // Map of thread ids and names. This is used only in non-streaming mode, since we have to dump
331   // information about all threads in one block. In streaming mode, thread info is recorded directly
332   // in the file when we see the first even from this thread.
333   SafeMap<uint16_t, std::string> threads_list_;
334 
335   // Map from ArtMethod* to index.
336   std::unordered_map<ArtMethod*, uint32_t> art_method_id_map_ GUARDED_BY(trace_writer_lock_);
337   uint32_t current_method_index_ = 0;
338 
339   // Map from thread_id to a 16-bit identifier.
340   std::unordered_map<pid_t, uint16_t> thread_id_map_ GUARDED_BY(trace_writer_lock_);
341   uint16_t current_thread_index_;
342 
343   // Buffer used when generating trace data from the raw entries.
344   // In streaming mode, the trace data is flushed to file when the per-thread buffer gets full.
345   // In non-streaming mode, this data is flushed at the end of tracing. If the buffer gets full
346   // we stop tracing and following trace events are ignored. The size of this buffer is
347   // specified by the user in non-streaming mode.
348   std::unique_ptr<uint8_t[]> buf_;
349 
350   // The cur_offset_ into the buf_. Accessed only in SuspendAll scope when flushing data from the
351   // thread local buffers to buf_.
352   size_t cur_offset_ GUARDED_BY(trace_writer_lock_);
353 
354   // Size of buf_.
355   const size_t buffer_size_;
356 
357   // Version of trace output
358   const int trace_format_version_;
359 
360   // Time trace was created.
361   const uint64_t start_time_;
362 
363   // Did we overflow the buffer recording traces?
364   bool overflow_;
365 
366   // Total number of records flushed to file.
367   size_t num_records_;
368 
369   // Clock overhead.
370   const uint32_t clock_overhead_ns_;
371 
372   std::vector<std::atomic<size_t>> owner_tids_;
373   std::unique_ptr<uintptr_t[]> trace_buffer_;
374 
375   Mutex buffer_pool_lock_;
376   ConditionVariable buffer_available_ GUARDED_BY(buffer_pool_lock_);
377   ConditionVariable num_waiters_zero_cond_ GUARDED_BY(buffer_pool_lock_);
378   std::atomic<size_t> num_waiters_for_buffer_;
379   std::atomic<bool> finish_tracing_ = false;
380 
381   // Lock to protect common data structures accessed from multiple threads like
382   // art_method_id_map_, thread_id_map_.
383   Mutex trace_writer_lock_;
384 
385   // Thread pool to flush the trace entries to file.
386   std::unique_ptr<TraceWriterThreadPool> thread_pool_;
387 };
388 
389 // Class for recording event traces. Trace data is either collected
390 // synchronously during execution (TracingMode::kMethodTracingActive),
391 // or by a separate sampling thread (TracingMode::kSampleProfilingActive).
392 class Trace final : public instrumentation::InstrumentationListener, public ClassLoadCallback {
393  public:
394   enum TraceFlag {
395     kTraceCountAllocs = 0x001,
396     kTraceClockSourceWallClock = 0x010,
397     kTraceClockSourceThreadCpu = 0x100,
398   };
399 
400   static const int kFormatV1 = 0;
401   static const int kFormatV2 = 1;
402   static const int kTraceFormatVersionFlagMask = 0b110;
403   static const int kTraceFormatVersionShift = 1;
404 
405   enum class TraceMode {
406     kMethodTracing,
407     kSampling
408   };
409 
410   static void SetDefaultClockSource(TraceClockSource clock_source);
411 
412   static void Start(const char* trace_filename,
413                     size_t buffer_size,
414                     int flags,
415                     TraceOutputMode output_mode,
416                     TraceMode trace_mode,
417                     int interval_us)
418       REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
419                !Locks::trace_lock_);
420   static void Start(int trace_fd,
421                     size_t buffer_size,
422                     int flags,
423                     TraceOutputMode output_mode,
424                     TraceMode trace_mode,
425                     int interval_us)
426       REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
427                !Locks::trace_lock_);
428   static void Start(std::unique_ptr<unix_file::FdFile>&& file,
429                     size_t buffer_size,
430                     int flags,
431                     TraceOutputMode output_mode,
432                     TraceMode trace_mode,
433                     int interval_us)
434       REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
435                !Locks::trace_lock_);
436   static void StartDDMS(size_t buffer_size,
437                         int flags,
438                         TraceMode trace_mode,
439                         int interval_us)
440       REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
441                !Locks::trace_lock_);
442 
443   // Stop tracing. This will finish the trace and write it to file/send it via DDMS.
444   static void Stop()
445       REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
446   // Abort tracing. This will just stop tracing and *not* write/send the collected data.
447   static void Abort()
448       REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
449   static void Shutdown()
450       REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
451 
452   static TracingMode GetMethodTracingMode() REQUIRES(!Locks::trace_lock_);
453 
454   // Flush the per-thread buffer. This is called when the thread is about to detach.
455   static void FlushThreadBuffer(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
456       REQUIRES(!Locks::trace_lock_) NO_THREAD_SAFETY_ANALYSIS;
457 
458   // Release per-thread buffer without flushing any entries. This is used when a new trace buffer is
459   // allocated while the thread is terminating. See ThreadList::Unregister for more details.
460   static void ReleaseThreadBuffer(Thread* thread)
461       REQUIRES(!Locks::trace_lock_) NO_THREAD_SAFETY_ANALYSIS;
462 
463   // Removes any listeners installed for method tracing. This is used in non-streaming case
464   // when we no longer record any events once the buffer is full. In other cases listeners are
465   // removed only when tracing stops. This is expected to be called in SuspendAll scope.
466   static void RemoveListeners() REQUIRES(Locks::mutator_lock_);
467 
468   void MeasureClockOverhead();
469   uint32_t GetClockOverheadNanoSeconds();
470 
471   void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace)
472       REQUIRES_SHARED(Locks::mutator_lock_);
473 
474   // InstrumentationListener implementation.
475   void MethodEntered(Thread* thread, ArtMethod* method)
476       REQUIRES_SHARED(Locks::mutator_lock_) override;
477   void MethodExited(Thread* thread,
478                     ArtMethod* method,
479                     instrumentation::OptionalFrame frame,
480                     JValue& return_value) REQUIRES_SHARED(Locks::mutator_lock_) override;
481   void MethodUnwind(Thread* thread, ArtMethod* method, uint32_t dex_pc)
482       REQUIRES_SHARED(Locks::mutator_lock_) override;
483   void DexPcMoved(Thread* thread,
484                   Handle<mirror::Object> this_object,
485                   ArtMethod* method,
486                   uint32_t new_dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) override;
487   void FieldRead(Thread* thread,
488                  Handle<mirror::Object> this_object,
489                  ArtMethod* method,
490                  uint32_t dex_pc,
491                  ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_) override;
492   void FieldWritten(Thread* thread,
493                     Handle<mirror::Object> this_object,
494                     ArtMethod* method,
495                     uint32_t dex_pc,
496                     ArtField* field,
497                     const JValue& field_value) REQUIRES_SHARED(Locks::mutator_lock_) override;
498   void ExceptionThrown(Thread* thread, Handle<mirror::Throwable> exception_object)
499       REQUIRES_SHARED(Locks::mutator_lock_) override;
500   void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
501       REQUIRES_SHARED(Locks::mutator_lock_) override;
502   void Branch(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
503       REQUIRES_SHARED(Locks::mutator_lock_) override;
504   void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
505       REQUIRES_SHARED(Locks::mutator_lock_) override;
506 
507   // ClassLoadCallback implementation
ClassLoad(Handle<mirror::Class> klass)508   void ClassLoad([[maybe_unused]] Handle<mirror::Class> klass)
509       REQUIRES_SHARED(Locks::mutator_lock_) override {}
510   void ClassPrepare(Handle<mirror::Class> temp_klass, Handle<mirror::Class> klass)
511       REQUIRES_SHARED(Locks::mutator_lock_) override;
512 
GetClockSource()513   TraceClockSource GetClockSource() { return clock_source_; }
514 
515   // Reuse an old stack trace if it exists, otherwise allocate a new one.
516   static std::vector<ArtMethod*>* AllocStackTrace();
517   // Clear and store an old stack trace for later use.
518   static void FreeStackTrace(std::vector<ArtMethod*>* stack_trace);
519 
520   static TraceOutputMode GetOutputMode() REQUIRES(!Locks::trace_lock_);
521   static TraceMode GetMode() REQUIRES(!Locks::trace_lock_);
522   static size_t GetBufferSize() REQUIRES(!Locks::trace_lock_);
523   static int GetFlags() REQUIRES(!Locks::trace_lock_);
524   static int GetIntervalInMillis() REQUIRES(!Locks::trace_lock_);
525 
526   // Used by class linker to prevent class unloading.
527   static bool IsTracingEnabled() REQUIRES(!Locks::trace_lock_);
528 
529   // Callback for each class prepare event to record information about the newly created methods.
530   static void ClassPrepare(Handle<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
531 
GetTraceWriter()532   TraceWriter* GetTraceWriter() { return trace_writer_.get(); }
533 
534  private:
535   Trace(File* trace_file,
536         size_t buffer_size,
537         int flags,
538         TraceOutputMode output_mode,
539         TraceMode trace_mode);
540 
541   // The sampling interval in microseconds is passed as an argument.
542   static void* RunSamplingThread(void* arg) REQUIRES(!Locks::trace_lock_);
543 
544   static void StopTracing(bool flush_entries)
545       REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_)
546       // There is an annoying issue with static functions that create a new object and call into
547       // that object that causes them to not be able to tell that we don't currently hold the lock.
548       // This causes the negative annotations to incorrectly have a false positive. TODO: Figure out
549       // how to annotate this.
550       NO_THREAD_SAFETY_ANALYSIS;
551 
552   void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint64_t* timestamp_counter);
553 
554   void LogMethodTraceEvent(Thread* thread,
555                            ArtMethod* method,
556                            TraceAction action,
557                            uint32_t thread_clock_diff,
558                            uint64_t timestamp_counter) REQUIRES_SHARED(Locks::mutator_lock_);
559 
560   // Singleton instance of the Trace or null when no method tracing is active.
561   static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
562 
563   // The default profiler clock source.
564   static TraceClockSource default_clock_source_;
565 
566   // Sampling thread, non-zero when sampling.
567   static pthread_t sampling_pthread_;
568 
569   // Used to remember an unused stack trace to avoid re-allocation during sampling.
570   static std::unique_ptr<std::vector<ArtMethod*>> temp_stack_trace_;
571 
572   // Flags enabling extra tracing of things such as alloc counts.
573   const int flags_;
574 
575   // The tracing method.
576   const TraceMode trace_mode_;
577 
578   const TraceClockSource clock_source_;
579 
580   // Sampling profiler sampling interval.
581   int interval_us_;
582 
583   // A flag to indicate to the sampling thread whether to stop tracing
584   bool stop_tracing_;
585 
586   std::unique_ptr<TraceWriter> trace_writer_;
587 
588   DISALLOW_COPY_AND_ASSIGN(Trace);
589 };
590 
591 }  // namespace art
592 
593 #endif  // ART_RUNTIME_TRACE_H_
594