• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/profiler/stack_sampling_profiler.h"
6 
7 #include <algorithm>
8 #include <cmath>
9 #include <map>
10 #include <optional>
11 #include <utility>
12 
13 #include "base/atomic_sequence_num.h"
14 #include "base/atomicops.h"
15 #include "base/functional/bind.h"
16 #include "base/functional/callback.h"
17 #include "base/functional/callback_helpers.h"
18 #include "base/location.h"
19 #include "base/memory/ptr_util.h"
20 #include "base/memory/raw_ptr.h"
21 #include "base/memory/singleton.h"
22 #include "base/profiler/profiler_buildflags.h"
23 #include "base/profiler/stack_buffer.h"
24 #include "base/profiler/stack_sampler.h"
25 #include "base/profiler/stack_unwind_data.h"
26 #include "base/profiler/unwinder.h"
27 #include "base/synchronization/lock.h"
28 #include "base/synchronization/waitable_event.h"
29 #include "base/thread_annotations.h"
30 #include "base/threading/thread.h"
31 #include "base/threading/thread_restrictions.h"
32 #include "base/time/time.h"
33 #include "base/trace_event/base_tracing.h"
34 #include "build/build_config.h"
35 
36 #if BUILDFLAG(IS_WIN)
37 #include "base/win/static_constants.h"
38 #endif
39 
40 #if BUILDFLAG(IS_APPLE)
41 #include "base/mac/mac_util.h"
42 #endif
43 
44 namespace base {
45 
46 // Allows StackSamplingProfiler to recall a thread which should already pretty
47 // much be dead (thus it should be a fast Join()).
48 class ScopedAllowThreadRecallForStackSamplingProfiler
49     : public ScopedAllowBaseSyncPrimitivesOutsideBlockingScope {};
50 
51 namespace {
52 
53 // This value is used to initialize the WaitableEvent object. This MUST BE set
54 // to MANUAL for correct operation of the IsSignaled() call in Start(). See the
55 // comment there for why.
56 constexpr WaitableEvent::ResetPolicy kResetPolicy =
57     WaitableEvent::ResetPolicy::MANUAL;
58 
59 // This value is used when there is no collection in progress and thus no ID
60 // for referencing the active collection to the SamplingThread.
61 const int kNullProfilerId = -1;
62 
GetNextSampleTimeImpl(TimeTicks scheduled_current_sample_time,TimeDelta sampling_interval,TimeTicks now)63 TimeTicks GetNextSampleTimeImpl(TimeTicks scheduled_current_sample_time,
64                                 TimeDelta sampling_interval,
65                                 TimeTicks now) {
66   // Schedule the next sample at the next sampling_interval-aligned time in
67   // the future that's sufficiently far enough from the current sample. In the
68   // general case this will be one sampling_interval from the current
69   // sample. In cases where sample tasks were unable to be executed, such as
70   // during system suspend or bad system-wide jank, we may have missed some
71   // samples. The right thing to do for those cases is to skip the missed
72   // samples since the rest of the systems also wasn't executing.
73 
74   // Ensure that the next sample time is at least half a sampling interval
75   // away. This causes the second sample after resume to be taken between 0.5
76   // and 1.5 samples after the first, or 1 sample interval on average. The delay
77   // also serves to provide a grace period in the normal sampling case where the
78   // current sample may be taken slightly later than its scheduled time.
79   const TimeTicks earliest_next_sample_time = now + sampling_interval / 2;
80 
81   const TimeDelta minimum_time_delta_to_next_sample =
82       earliest_next_sample_time - scheduled_current_sample_time;
83 
84   // The minimum number of sampling intervals required to get from the scheduled
85   // current sample time to the earliest next sample time.
86   const int64_t required_sampling_intervals = static_cast<int64_t>(
87       std::ceil(minimum_time_delta_to_next_sample / sampling_interval));
88   return scheduled_current_sample_time +
89          required_sampling_intervals * sampling_interval;
90 }
91 
92 }  // namespace
93 
94 // StackSamplingProfiler::SamplingThread --------------------------------------
95 
96 class StackSamplingProfiler::SamplingThread : public Thread {
97  public:
98   class TestPeer {
99    public:
100     // Reset the existing sampler. This will unfortunately create the object
101     // unnecessarily if it doesn't already exist but there's no way around that.
102     static void Reset();
103 
104     // Disables inherent idle-shutdown behavior.
105     static void DisableIdleShutdown();
106 
107     // Begins an idle shutdown as if the idle-timer had expired and wait for
108     // it to execute. Since the timer would have only been started at a time
109     // when the sampling thread actually was idle, this must be called only
110     // when it is known that there are no active sampling threads. If
111     // |simulate_intervening_add| is true then, when executed, the shutdown
112     // task will believe that a new collection has been added since it was
113     // posted.
114     static void ShutdownAssumingIdle(bool simulate_intervening_add);
115 
116    private:
117     // Calls the sampling threads ShutdownTask and then signals an event.
118     static void ShutdownTaskAndSignalEvent(SamplingThread* sampler,
119                                            int add_events,
120                                            WaitableEvent* event);
121   };
122 
123   struct CollectionContext {
CollectionContextbase::StackSamplingProfiler::SamplingThread::CollectionContext124     CollectionContext(PlatformThreadId thread_id,
125                       const SamplingParams& params,
126                       WaitableEvent* finished,
127                       std::unique_ptr<StackSampler> sampler)
128         : collection_id(next_collection_id.GetNext()),
129           thread_id(thread_id),
130           params(params),
131           finished(finished),
132           sampler(std::move(sampler)) {}
133     ~CollectionContext() = default;
134 
135     // An identifier for this collection, used to uniquely identify the
136     // collection to outside interests.
137     const int collection_id;
138     const PlatformThreadId thread_id;  // Thread id of the sampled thread.
139 
140     const SamplingParams params;    // Information about how to sample.
141     const raw_ptr<WaitableEvent>
142         finished;  // Signaled when all sampling complete.
143 
144     // Platform-specific module that does the actual sampling.
145     std::unique_ptr<StackSampler> sampler;
146 
147     // The absolute time for the next sample.
148     TimeTicks next_sample_time;
149 
150     // The time that a profile was started, for calculating the total duration.
151     TimeTicks profile_start_time;
152 
153     // Counter that indicates the current sample position along the acquisition.
154     int sample_count = 0;
155 
156     // Whether stop has been requested.
157     bool stopping = false;
158 
159     // Sequence number for generating new collection ids.
160     static AtomicSequenceNumber next_collection_id;
161   };
162 
163   // Gets the single instance of this class.
164   static SamplingThread* GetInstance();
165 
166   SamplingThread(const SamplingThread&) = delete;
167   SamplingThread& operator=(const SamplingThread&) = delete;
168 
169   // Adds a new CollectionContext to the thread. This can be called externally
170   // from any thread. This returns a collection id that can later be used to
171   // stop the sampling.
172   int Add(std::unique_ptr<CollectionContext> collection);
173 
174   // Adds an auxiliary unwinder to be used for the collection, to handle
175   // additional, non-native-code unwind scenarios.
176   void AddAuxUnwinder(int collection_id, std::unique_ptr<Unwinder> unwinder);
177 
178   // Applies the metadata to already recorded samples in all collections.
179   void ApplyMetadataToPastSamples(base::TimeTicks period_start,
180                                   base::TimeTicks period_end,
181                                   uint64_t name_hash,
182                                   std::optional<int64_t> key,
183                                   int64_t value,
184                                   std::optional<PlatformThreadId> thread_id);
185 
186   // Adds the metadata as profile metadata. Profile metadata stores metadata
187   // global to the profile.
188   void AddProfileMetadata(uint64_t name_hash,
189                           std::optional<int64_t> key,
190                           int64_t value,
191                           std::optional<PlatformThreadId> thread_id);
192 
193   // Removes an active collection based on its collection id, forcing it to run
194   // its callback if any data has been collected. This can be called externally
195   // from any thread.
196   void Remove(int collection_id);
197 
198  private:
199   friend struct DefaultSingletonTraits<SamplingThread>;
200 
201   // The different states in which the sampling-thread can be.
202   enum ThreadExecutionState {
203     // The thread is not running because it has never been started. It will be
204     // started when a sampling request is received.
205     NOT_STARTED,
206 
207     // The thread is running and processing tasks. This is the state when any
208     // sampling requests are active and during the "idle" period afterward
209     // before the thread is stopped.
210     RUNNING,
211 
212     // Once all sampling requests have finished and the "idle" period has
213     // expired, the thread will be set to this state and its shutdown
214     // initiated. A call to Stop() must be made to ensure the previous thread
215     // has completely exited before calling Start() and moving back to the
216     // RUNNING state.
217     EXITING,
218   };
219 
220   SamplingThread();
221   ~SamplingThread() override;
222 
223   // Get task runner that is usable from the outside.
224   scoped_refptr<SingleThreadTaskRunner> GetOrCreateTaskRunnerForAdd();
225   scoped_refptr<SingleThreadTaskRunner> GetTaskRunner(
226       ThreadExecutionState* out_state);
227 
228   // Get task runner that is usable from the sampling thread itself.
229   scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread();
230 
231   // Finishes a collection. The collection's |finished| waitable event will be
232   // signalled. The |collection| should already have been removed from
233   // |active_collections_| by the caller, as this is needed to avoid flakiness
234   // in unit tests.
235   void FinishCollection(std::unique_ptr<CollectionContext> collection);
236 
237   // Check if the sampling thread is idle and begin a shutdown if it is.
238   void ScheduleShutdownIfIdle();
239 
240   // These methods are tasks that get posted to the internal message queue.
241   void AddCollectionTask(std::unique_ptr<CollectionContext> collection);
242   void AddAuxUnwinderTask(int collection_id,
243                           std::unique_ptr<Unwinder> unwinder);
244   void ApplyMetadataToPastSamplesTask(
245       base::TimeTicks period_start,
246       base::TimeTicks period_end,
247       uint64_t name_hash,
248       std::optional<int64_t> key,
249       int64_t value,
250       std::optional<PlatformThreadId> thread_id);
251   void AddProfileMetadataTask(uint64_t name_hash,
252                               std::optional<int64_t> key,
253                               int64_t value,
254                               std::optional<PlatformThreadId> thread_id);
255   void RemoveCollectionTask(int collection_id);
256   void ScheduleCollectionStop(int collection_id);
257   void RecordSampleTask(int collection_id);
258   void ShutdownTask(int add_events);
259 
260   // Thread:
261   void CleanUp() override;
262 
263   // A stack-buffer used by the sampler for its work. This buffer is re-used
264   // across multiple sampler objects since their execution is serialized on the
265   // sampling thread.
266   std::unique_ptr<StackBuffer> stack_buffer_;
267 
268   // A map of collection ids to collection contexts. Because this class is a
269   // singleton that is never destroyed, context objects will never be destructed
270   // except by explicit action. Thus, it's acceptable to pass unretained
271   // pointers to these objects when posting tasks.
272   std::map<int, std::unique_ptr<CollectionContext>> active_collections_;
273 
274   // State maintained about the current execution (or non-execution) of
275   // the thread. This state must always be accessed while holding the
276   // lock. A copy of the task-runner is maintained here for use by any
277   // calling thread; this is necessary because Thread's accessor for it is
278   // not itself thread-safe. The lock is also used to order calls to the
279   // Thread API (Start, Stop, StopSoon, & DetachFromSequence) so that
280   // multiple threads may make those calls.
281   Lock thread_execution_state_lock_;  // Protects all thread_execution_state_*
282   ThreadExecutionState thread_execution_state_
283       GUARDED_BY(thread_execution_state_lock_) = NOT_STARTED;
284   scoped_refptr<SingleThreadTaskRunner> thread_execution_state_task_runner_
285       GUARDED_BY(thread_execution_state_lock_);
286   bool thread_execution_state_disable_idle_shutdown_for_testing_
287       GUARDED_BY(thread_execution_state_lock_) = false;
288 
289   // A counter that notes adds of new collection requests. It is incremented
290   // when changes occur so that delayed shutdown tasks are able to detect if
291   // something new has happened while it was waiting. Like all "execution_state"
292   // vars, this must be accessed while holding |thread_execution_state_lock_|.
293   int thread_execution_state_add_events_
294       GUARDED_BY(thread_execution_state_lock_) = 0;
295 };
296 
297 // static
Reset()298 void StackSamplingProfiler::SamplingThread::TestPeer::Reset() {
299   SamplingThread* sampler = SamplingThread::GetInstance();
300 
301   ThreadExecutionState state;
302   {
303     AutoLock lock(sampler->thread_execution_state_lock_);
304     state = sampler->thread_execution_state_;
305     DCHECK(sampler->active_collections_.empty());
306   }
307 
308   // Stop the thread and wait for it to exit. This has to be done through by
309   // the thread itself because it has taken ownership of its own lifetime.
310   if (state == RUNNING) {
311     ShutdownAssumingIdle(false);
312     state = EXITING;
313   }
314   // Make sure thread is cleaned up since state will be reset to NOT_STARTED.
315   if (state == EXITING)
316     sampler->Stop();
317 
318   // Reset internal variables to the just-initialized state.
319   {
320     AutoLock lock(sampler->thread_execution_state_lock_);
321     sampler->thread_execution_state_ = NOT_STARTED;
322     sampler->thread_execution_state_task_runner_ = nullptr;
323     sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = false;
324     sampler->thread_execution_state_add_events_ = 0;
325   }
326 }
327 
328 // static
DisableIdleShutdown()329 void StackSamplingProfiler::SamplingThread::TestPeer::DisableIdleShutdown() {
330   SamplingThread* sampler = SamplingThread::GetInstance();
331 
332   {
333     AutoLock lock(sampler->thread_execution_state_lock_);
334     sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = true;
335   }
336 }
337 
338 // static
ShutdownAssumingIdle(bool simulate_intervening_add)339 void StackSamplingProfiler::SamplingThread::TestPeer::ShutdownAssumingIdle(
340     bool simulate_intervening_add) {
341   SamplingThread* sampler = SamplingThread::GetInstance();
342 
343   ThreadExecutionState state;
344   scoped_refptr<SingleThreadTaskRunner> task_runner =
345       sampler->GetTaskRunner(&state);
346   DCHECK_EQ(RUNNING, state);
347   DCHECK(task_runner);
348 
349   int add_events;
350   {
351     AutoLock lock(sampler->thread_execution_state_lock_);
352     add_events = sampler->thread_execution_state_add_events_;
353     if (simulate_intervening_add)
354       ++sampler->thread_execution_state_add_events_;
355   }
356 
357   WaitableEvent executed(WaitableEvent::ResetPolicy::MANUAL,
358                          WaitableEvent::InitialState::NOT_SIGNALED);
359   // PostTaskAndReply won't work because thread and associated message-loop may
360   // be shut down.
361   task_runner->PostTask(
362       FROM_HERE, BindOnce(&ShutdownTaskAndSignalEvent, Unretained(sampler),
363                           add_events, Unretained(&executed)));
364   executed.Wait();
365 }
366 
367 // static
368 void StackSamplingProfiler::SamplingThread::TestPeer::
ShutdownTaskAndSignalEvent(SamplingThread * sampler,int add_events,WaitableEvent * event)369     ShutdownTaskAndSignalEvent(SamplingThread* sampler,
370                                int add_events,
371                                WaitableEvent* event) {
372   sampler->ShutdownTask(add_events);
373   event->Signal();
374 }
375 
376 AtomicSequenceNumber StackSamplingProfiler::SamplingThread::CollectionContext::
377     next_collection_id;
378 
SamplingThread()379 StackSamplingProfiler::SamplingThread::SamplingThread()
380     : Thread("StackSamplingProfiler") {}
381 
382 StackSamplingProfiler::SamplingThread::~SamplingThread() = default;
383 
384 StackSamplingProfiler::SamplingThread*
GetInstance()385 StackSamplingProfiler::SamplingThread::GetInstance() {
386   return Singleton<SamplingThread, LeakySingletonTraits<SamplingThread>>::get();
387 }
388 
Add(std::unique_ptr<CollectionContext> collection)389 int StackSamplingProfiler::SamplingThread::Add(
390     std::unique_ptr<CollectionContext> collection) {
391   // This is not to be run on the sampling thread.
392 
393   int collection_id = collection->collection_id;
394   scoped_refptr<SingleThreadTaskRunner> task_runner =
395       GetOrCreateTaskRunnerForAdd();
396 
397   task_runner->PostTask(
398       FROM_HERE, BindOnce(&SamplingThread::AddCollectionTask, Unretained(this),
399                           std::move(collection)));
400 
401   return collection_id;
402 }
403 
AddAuxUnwinder(int collection_id,std::unique_ptr<Unwinder> unwinder)404 void StackSamplingProfiler::SamplingThread::AddAuxUnwinder(
405     int collection_id,
406     std::unique_ptr<Unwinder> unwinder) {
407   ThreadExecutionState state;
408   scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
409   if (state != RUNNING)
410     return;
411   DCHECK(task_runner);
412   task_runner->PostTask(
413       FROM_HERE, BindOnce(&SamplingThread::AddAuxUnwinderTask, Unretained(this),
414                           collection_id, std::move(unwinder)));
415 }
416 
ApplyMetadataToPastSamples(base::TimeTicks period_start,base::TimeTicks period_end,uint64_t name_hash,std::optional<int64_t> key,int64_t value,std::optional<PlatformThreadId> thread_id)417 void StackSamplingProfiler::SamplingThread::ApplyMetadataToPastSamples(
418     base::TimeTicks period_start,
419     base::TimeTicks period_end,
420     uint64_t name_hash,
421     std::optional<int64_t> key,
422     int64_t value,
423     std::optional<PlatformThreadId> thread_id) {
424   ThreadExecutionState state;
425   scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
426   if (state != RUNNING)
427     return;
428   DCHECK(task_runner);
429   task_runner->PostTask(
430       FROM_HERE, BindOnce(&SamplingThread::ApplyMetadataToPastSamplesTask,
431                           Unretained(this), period_start, period_end, name_hash,
432                           key, value, thread_id));
433 }
434 
AddProfileMetadata(uint64_t name_hash,std::optional<int64_t> key,int64_t value,std::optional<PlatformThreadId> thread_id)435 void StackSamplingProfiler::SamplingThread::AddProfileMetadata(
436     uint64_t name_hash,
437     std::optional<int64_t> key,
438     int64_t value,
439     std::optional<PlatformThreadId> thread_id) {
440   ThreadExecutionState state;
441   scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
442   if (state != RUNNING) {
443     return;
444   }
445   DCHECK(task_runner);
446   task_runner->PostTask(
447       FROM_HERE, BindOnce(&SamplingThread::AddProfileMetadataTask,
448                           Unretained(this), name_hash, key, value, thread_id));
449 }
450 
Remove(int collection_id)451 void StackSamplingProfiler::SamplingThread::Remove(int collection_id) {
452   // This is not to be run on the sampling thread.
453 
454   ThreadExecutionState state;
455   scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
456   if (state != RUNNING)
457     return;
458   DCHECK(task_runner);
459 
460   // This can fail if the thread were to exit between acquisition of the task
461   // runner above and the call below. In that case, however, everything has
462   // stopped so there's no need to try to stop it.
463   task_runner->PostTask(FROM_HERE,
464                         BindOnce(&SamplingThread::ScheduleCollectionStop,
465                                  Unretained(this), collection_id));
466 }
467 
468 scoped_refptr<SingleThreadTaskRunner>
GetOrCreateTaskRunnerForAdd()469 StackSamplingProfiler::SamplingThread::GetOrCreateTaskRunnerForAdd() {
470   AutoLock lock(thread_execution_state_lock_);
471 
472   // The increment of the "add events" count is why this method is to be only
473   // called from "add".
474   ++thread_execution_state_add_events_;
475 
476   if (thread_execution_state_ == RUNNING) {
477     DCHECK(thread_execution_state_task_runner_);
478     // This shouldn't be called from the sampling thread as it's inefficient.
479     // Use GetTaskRunnerOnSamplingThread() instead.
480     DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
481     return thread_execution_state_task_runner_;
482   }
483 
484   if (thread_execution_state_ == EXITING) {
485     // StopSoon() was previously called to shut down the thread
486     // asynchonously. Stop() must now be called before calling Start() again to
487     // reset the thread state.
488     //
489     // We must allow blocking here to satisfy the Thread implementation, but in
490     // practice the Stop() call is unlikely to actually block. For this to
491     // happen a new profiling request would have to be made within the narrow
492     // window between StopSoon() and thread exit following the end of the 60
493     // second idle period.
494     ScopedAllowThreadRecallForStackSamplingProfiler allow_thread_join;
495     Stop();
496   }
497 
498   DCHECK(!stack_buffer_);
499   stack_buffer_ = StackSampler::CreateStackBuffer();
500 
501   // The thread is not running. Start it and get associated runner. The task-
502   // runner has to be saved for future use because though it can be used from
503   // any thread, it can be acquired via task_runner() only on the created
504   // thread and the thread that creates it (i.e. this thread) for thread-safety
505   // reasons which are alleviated in SamplingThread by gating access to it with
506   // the |thread_execution_state_lock_|.
507   Start();
508   thread_execution_state_ = RUNNING;
509   thread_execution_state_task_runner_ = Thread::task_runner();
510 
511   // Detach the sampling thread from the "sequence" (i.e. thread) that
512   // started it so that it can be self-managed or stopped by another thread.
513   DetachFromSequence();
514 
515   return thread_execution_state_task_runner_;
516 }
517 
518 scoped_refptr<SingleThreadTaskRunner>
GetTaskRunner(ThreadExecutionState * out_state)519 StackSamplingProfiler::SamplingThread::GetTaskRunner(
520     ThreadExecutionState* out_state) {
521   AutoLock lock(thread_execution_state_lock_);
522   if (out_state)
523     *out_state = thread_execution_state_;
524   if (thread_execution_state_ == RUNNING) {
525     // This shouldn't be called from the sampling thread as it's inefficient.
526     // Use GetTaskRunnerOnSamplingThread() instead.
527     DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
528     DCHECK(thread_execution_state_task_runner_);
529   } else {
530     DCHECK(!thread_execution_state_task_runner_);
531   }
532 
533   return thread_execution_state_task_runner_;
534 }
535 
536 scoped_refptr<SingleThreadTaskRunner>
GetTaskRunnerOnSamplingThread()537 StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() {
538   // This should be called only from the sampling thread as it has limited
539   // accessibility.
540   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
541 
542   return Thread::task_runner();
543 }
544 
FinishCollection(std::unique_ptr<CollectionContext> collection)545 void StackSamplingProfiler::SamplingThread::FinishCollection(
546     std::unique_ptr<CollectionContext> collection) {
547   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
548   DCHECK_EQ(0u, active_collections_.count(collection->collection_id));
549 
550   TimeDelta profile_duration = TimeTicks::Now() -
551                                collection->profile_start_time +
552                                collection->params.sampling_interval;
553 
554   collection->sampler->GetStackUnwindData()
555       ->profile_builder()
556       ->OnProfileCompleted(profile_duration,
557                            collection->params.sampling_interval);
558 
559   // Signal that this collection is finished.
560   WaitableEvent* collection_finished = collection->finished;
561   // Ensure the collection is destroyed before signaling, so that it may
562   // not outlive StackSamplingProfiler.
563   collection.reset();
564   collection_finished->Signal();
565 
566   ScheduleShutdownIfIdle();
567 }
568 
ScheduleShutdownIfIdle()569 void StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle() {
570   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
571 
572   if (!active_collections_.empty())
573     return;
574 
575   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
576                "StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle");
577 
578   int add_events;
579   {
580     AutoLock lock(thread_execution_state_lock_);
581     if (thread_execution_state_disable_idle_shutdown_for_testing_)
582       return;
583     add_events = thread_execution_state_add_events_;
584   }
585 
586   GetTaskRunnerOnSamplingThread()->PostDelayedTask(
587       FROM_HERE,
588       BindOnce(&SamplingThread::ShutdownTask, Unretained(this), add_events),
589       Seconds(60));
590 }
591 
AddAuxUnwinderTask(int collection_id,std::unique_ptr<Unwinder> unwinder)592 void StackSamplingProfiler::SamplingThread::AddAuxUnwinderTask(
593     int collection_id,
594     std::unique_ptr<Unwinder> unwinder) {
595   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
596 
597   auto loc = active_collections_.find(collection_id);
598   if (loc == active_collections_.end())
599     return;
600 
601   loc->second->sampler->AddAuxUnwinder(std::move(unwinder));
602 }
603 
ApplyMetadataToPastSamplesTask(base::TimeTicks period_start,base::TimeTicks period_end,uint64_t name_hash,std::optional<int64_t> key,int64_t value,std::optional<PlatformThreadId> thread_id)604 void StackSamplingProfiler::SamplingThread::ApplyMetadataToPastSamplesTask(
605     base::TimeTicks period_start,
606     base::TimeTicks period_end,
607     uint64_t name_hash,
608     std::optional<int64_t> key,
609     int64_t value,
610     std::optional<PlatformThreadId> thread_id) {
611   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
612   MetadataRecorder::Item item(name_hash, key, thread_id, value);
613   for (auto& id_collection_pair : active_collections_) {
614     if (thread_id && id_collection_pair.second->thread_id != thread_id)
615       continue;
616     id_collection_pair.second->sampler->GetStackUnwindData()
617         ->profile_builder()
618         ->ApplyMetadataRetrospectively(period_start, period_end, item);
619   }
620 }
621 
AddProfileMetadataTask(uint64_t name_hash,std::optional<int64_t> key,int64_t value,std::optional<PlatformThreadId> thread_id)622 void StackSamplingProfiler::SamplingThread::AddProfileMetadataTask(
623     uint64_t name_hash,
624     std::optional<int64_t> key,
625     int64_t value,
626     std::optional<PlatformThreadId> thread_id) {
627   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
628   MetadataRecorder::Item item(name_hash, key, thread_id, value);
629   for (auto& id_collection_pair : active_collections_) {
630     if (thread_id && id_collection_pair.second->thread_id != thread_id) {
631       continue;
632     }
633     id_collection_pair.second->sampler->GetStackUnwindData()
634         ->profile_builder()
635         ->AddProfileMetadata(item);
636   }
637 }
638 
AddCollectionTask(std::unique_ptr<CollectionContext> collection)639 void StackSamplingProfiler::SamplingThread::AddCollectionTask(
640     std::unique_ptr<CollectionContext> collection) {
641   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
642 
643   const int collection_id = collection->collection_id;
644   const TimeDelta initial_delay = collection->params.initial_delay;
645 
646   collection->sampler->Initialize();
647 
648   active_collections_.insert(
649       std::make_pair(collection_id, std::move(collection)));
650 
651   GetTaskRunnerOnSamplingThread()->PostDelayedTask(
652       FROM_HERE,
653       BindOnce(&SamplingThread::RecordSampleTask, Unretained(this),
654                collection_id),
655       initial_delay);
656 
657   // Another increment of "add events" serves to invalidate any pending
658   // shutdown tasks that may have been initiated between the Add() and this
659   // task running.
660   {
661     AutoLock lock(thread_execution_state_lock_);
662     ++thread_execution_state_add_events_;
663   }
664 }
665 
RemoveCollectionTask(int collection_id)666 void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(
667     int collection_id) {
668   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
669 
670   auto found = active_collections_.find(collection_id);
671   if (found == active_collections_.end())
672     return;
673 
674   // Remove |collection| from |active_collections_|.
675   std::unique_ptr<CollectionContext> collection = std::move(found->second);
676   size_t count = active_collections_.erase(collection_id);
677   DCHECK_EQ(1U, count);
678 
679   FinishCollection(std::move(collection));
680 }
681 
ScheduleCollectionStop(int collection_id)682 void StackSamplingProfiler::SamplingThread::ScheduleCollectionStop(
683     int collection_id) {
684   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
685 
686   auto found = active_collections_.find(collection_id);
687   if (found == active_collections_.end()) {
688     return;
689   }
690 
691   CollectionContext* collection = found->second.get();
692   collection->stopping = true;
693   collection->sampler->Stop(BindOnce(&SamplingThread::RemoveCollectionTask,
694                                      Unretained(this), collection_id));
695 }
696 
RecordSampleTask(int collection_id)697 void StackSamplingProfiler::SamplingThread::RecordSampleTask(
698     int collection_id) {
699   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
700 
701   auto found = active_collections_.find(collection_id);
702 
703   // The task won't be found if it has been stopped.
704   if (found == active_collections_.end()) {
705     return;
706   }
707 
708   CollectionContext* collection = found->second.get();
709 
710   // If we are in the process of stopping just don't collect a stack
711   // trace as that would cause further jobs to be scheduled.
712   if (collection->stopping) {
713     return;
714   }
715 
716   // If this is the first sample, the collection params need to be filled.
717   if (collection->sample_count == 0) {
718     collection->profile_start_time = TimeTicks::Now();
719     collection->next_sample_time = TimeTicks::Now();
720   }
721 
722   bool more_collections_remaining =
723       ++collection->sample_count < collection->params.samples_per_profile;
724   // Record a single sample.
725   collection->sampler->RecordStackFrames(
726       stack_buffer_.get(), collection->thread_id,
727       more_collections_remaining
728           ? DoNothing()
729           : BindOnce(&SamplingThread::RemoveCollectionTask, Unretained(this),
730                      collection_id));
731   // Schedule the next sample recording if there is one.
732   if (more_collections_remaining) {
733     collection->next_sample_time = GetNextSampleTimeImpl(
734         collection->next_sample_time, collection->params.sampling_interval,
735         TimeTicks::Now());
736     bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
737         FROM_HERE,
738         BindOnce(&SamplingThread::RecordSampleTask, Unretained(this),
739                  collection_id),
740         std::max(collection->next_sample_time - TimeTicks::Now(), TimeDelta()));
741     DCHECK(success);
742     return;
743   }
744 }
745 
ShutdownTask(int add_events)746 void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) {
747   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
748 
749   // Holding this lock ensures that any attempt to start another job will
750   // get postponed until |thread_execution_state_| is updated, thus eliminating
751   // the race in starting a new thread while the previous one is exiting.
752   AutoLock lock(thread_execution_state_lock_);
753 
754   // If the current count of creation requests doesn't match the passed count
755   // then other tasks have been created since this was posted. Abort shutdown.
756   if (thread_execution_state_add_events_ != add_events)
757     return;
758 
759   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
760                "StackSamplingProfiler::SamplingThread::ShutdownTask");
761 
762   // There can be no new AddCollectionTasks at this point because creating
763   // those always increments "add events". There may be other requests, like
764   // Remove, but it's okay to schedule the thread to stop once they've been
765   // executed (i.e. "soon").
766   DCHECK(active_collections_.empty());
767   StopSoon();
768 
769   // StopSoon will have set the owning sequence (again) so it must be detached
770   // (again) in order for Stop/Start to be called (again) should more work
771   // come in. Holding the |thread_execution_state_lock_| ensures the necessary
772   // happens-after with regard to this detach and future Thread API calls.
773   DetachFromSequence();
774 
775   // Set the thread_state variable so the thread will be restarted when new
776   // work comes in. Remove the |thread_execution_state_task_runner_| to avoid
777   // confusion.
778   thread_execution_state_ = EXITING;
779   thread_execution_state_task_runner_ = nullptr;
780   stack_buffer_.reset();
781 }
782 
CleanUp()783 void StackSamplingProfiler::SamplingThread::CleanUp() {
784   DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
785 
786   // There should be no collections remaining when the thread stops.
787   DCHECK(active_collections_.empty());
788 
789   // Let the parent clean up.
790   Thread::CleanUp();
791 }
792 
793 // StackSamplingProfiler ------------------------------------------------------
794 
795 // static
Reset()796 void StackSamplingProfiler::TestPeer::Reset() {
797   SamplingThread::TestPeer::Reset();
798 }
799 
800 // static
IsSamplingThreadRunning()801 bool StackSamplingProfiler::TestPeer::IsSamplingThreadRunning() {
802   return SamplingThread::GetInstance()->IsRunning();
803 }
804 
805 // static
DisableIdleShutdown()806 void StackSamplingProfiler::TestPeer::DisableIdleShutdown() {
807   SamplingThread::TestPeer::DisableIdleShutdown();
808 }
809 
810 // static
PerformSamplingThreadIdleShutdown(bool simulate_intervening_start)811 void StackSamplingProfiler::TestPeer::PerformSamplingThreadIdleShutdown(
812     bool simulate_intervening_start) {
813   SamplingThread::TestPeer::ShutdownAssumingIdle(simulate_intervening_start);
814 }
815 
816 // static
GetNextSampleTime(TimeTicks scheduled_current_sample_time,TimeDelta sampling_interval,TimeTicks now)817 TimeTicks StackSamplingProfiler::TestPeer::GetNextSampleTime(
818     TimeTicks scheduled_current_sample_time,
819     TimeDelta sampling_interval,
820     TimeTicks now) {
821   return GetNextSampleTimeImpl(scheduled_current_sample_time, sampling_interval,
822                                now);
823 }
824 
825 // static
826 // The profiler is currently supported for Windows x64, macOS, iOS 64-bit,
827 // Android ARM32 and ARM64, and ChromeOS x64 and ARM64.
IsSupportedForCurrentPlatform()828 bool StackSamplingProfiler::IsSupportedForCurrentPlatform() {
829 #if (BUILDFLAG(IS_WIN) && defined(ARCH_CPU_X86_64)) || BUILDFLAG(IS_MAC) || \
830     (BUILDFLAG(IS_IOS) && defined(ARCH_CPU_64_BITS)) ||                     \
831     (BUILDFLAG(IS_ANDROID) &&                                               \
832      ((defined(ARCH_CPU_ARMEL) && BUILDFLAG(ENABLE_ARM_CFI_TABLE)) ||       \
833       (defined(ARCH_CPU_ARM64)))) ||                                        \
834     (BUILDFLAG(IS_CHROMEOS) &&                                              \
835      (defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARM64)))
836 #if BUILDFLAG(IS_WIN)
837   // Do not start the profiler when Application Verifier is in use; running them
838   // simultaneously can cause crashes and has no known use case.
839   if (GetModuleHandleA(base::win::kApplicationVerifierDllName))
840     return false;
841   // Checks if Trend Micro DLLs are loaded in process, so we can disable the
842   // profiler to avoid hitting their performance bug. See
843   // https://crbug.com/1018291 and https://crbug.com/1113832.
844   if (GetModuleHandleA("tmmon64.dll") || GetModuleHandleA("tmmonmgr64.dll"))
845     return false;
846 #endif  // BUILDFLAG(IS_WIN)
847   return true;
848 #else
849   return false;
850 #endif
851 }
852 
StackSamplingProfiler(SamplingProfilerThreadToken thread_token,const SamplingParams & params,std::unique_ptr<ProfileBuilder> profile_builder,UnwindersFactory core_unwinders_factory,RepeatingClosure record_sample_callback,StackSamplerTestDelegate * test_delegate)853 StackSamplingProfiler::StackSamplingProfiler(
854     SamplingProfilerThreadToken thread_token,
855     const SamplingParams& params,
856     std::unique_ptr<ProfileBuilder> profile_builder,
857     UnwindersFactory core_unwinders_factory,
858     RepeatingClosure record_sample_callback,
859     StackSamplerTestDelegate* test_delegate)
860     : thread_token_(thread_token),
861       params_(params),
862       sampler_(StackSampler::Create(
863           thread_token,
864           std::make_unique<StackUnwindData>(std::move(profile_builder)),
865           std::move(core_unwinders_factory),
866           std::move(record_sample_callback),
867           test_delegate)),
868       // The event starts "signaled" so code knows it's safe to start thread
869       // and "manual" so that it can be waited in multiple places.
870       profiling_inactive_(kResetPolicy, WaitableEvent::InitialState::SIGNALED),
871       profiler_id_(kNullProfilerId) {
872   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
873                "StackSamplingProfiler::StackSamplingProfiler");
874 }
875 
~StackSamplingProfiler()876 StackSamplingProfiler::~StackSamplingProfiler() {
877   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
878                "StackSamplingProfiler::~StackSamplingProfiler");
879 
880   // Stop returns immediately but the shutdown runs asynchronously. There is a
881   // non-zero probability that one more sample will be taken after this call
882   // returns.
883   Stop();
884 
885   // The behavior of sampling a thread that has exited is undefined and could
886   // cause Bad Things(tm) to occur. The safety model provided by this class is
887   // that an instance of this object is expected to live at least as long as
888   // the thread it is sampling. However, because the sampling is performed
889   // asynchronously by the SamplingThread, there is no way to guarantee this
890   // is true without waiting for it to signal that it has finished.
891   //
892   // The wait time should, at most, be only as long as it takes to collect one
893   // sample (~200us) or none at all if sampling has already completed.
894   ScopedAllowBaseSyncPrimitivesOutsideBlockingScope allow_wait;
895   profiling_inactive_.Wait();
896 }
897 
Start()898 void StackSamplingProfiler::Start() {
899   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
900                "StackSamplingProfiler::Start");
901 
902   // Multiple calls to Start() for a single StackSamplingProfiler object is not
903   // allowed. If `sampler_` is nullptr, then Start() has been called already or
904   // sampling isn't supported on the current platform.
905   if (!sampler_) {
906     return;
907   }
908 
909   // The IsSignaled() check below requires that the WaitableEvent be manually
910   // reset, to avoid signaling the event in IsSignaled() itself.
911   static_assert(kResetPolicy == WaitableEvent::ResetPolicy::MANUAL,
912                 "The reset policy must be set to MANUAL");
913 
914   // If a previous profiling phase is still winding down, wait for it to
915   // complete. We can't use task posting for this coordination because the
916   // thread owning the profiler may not have a message loop.
917   if (!profiling_inactive_.IsSignaled())
918     profiling_inactive_.Wait();
919   profiling_inactive_.Reset();
920 
921   DCHECK_EQ(kNullProfilerId, profiler_id_);
922   profiler_id_ = SamplingThread::GetInstance()->Add(
923       std::make_unique<SamplingThread::CollectionContext>(
924           thread_token_.id, params_, &profiling_inactive_,
925           std::move(sampler_)));
926   DCHECK_NE(kNullProfilerId, profiler_id_);
927 
928   TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
929                "StackSamplingProfiler::Started", "profiler_id", profiler_id_);
930 }
931 
Stop()932 void StackSamplingProfiler::Stop() {
933   TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("cpu_profiler"),
934                "StackSamplingProfiler::Stop", "profiler_id", profiler_id_);
935 
936   SamplingThread::GetInstance()->Remove(profiler_id_);
937   profiler_id_ = kNullProfilerId;
938 }
939 
AddAuxUnwinder(std::unique_ptr<Unwinder> unwinder)940 void StackSamplingProfiler::AddAuxUnwinder(std::unique_ptr<Unwinder> unwinder) {
941   if (profiler_id_ == kNullProfilerId) {
942     // We haven't started sampling, and so we can add |unwinder| to the sampler
943     // directly
944     if (sampler_)
945       sampler_->AddAuxUnwinder(std::move(unwinder));
946     return;
947   }
948 
949   SamplingThread::GetInstance()->AddAuxUnwinder(profiler_id_,
950                                                 std::move(unwinder));
951 }
952 
953 // static
ApplyMetadataToPastSamples(base::TimeTicks period_start,base::TimeTicks period_end,uint64_t name_hash,std::optional<int64_t> key,int64_t value,std::optional<PlatformThreadId> thread_id)954 void StackSamplingProfiler::ApplyMetadataToPastSamples(
955     base::TimeTicks period_start,
956     base::TimeTicks period_end,
957     uint64_t name_hash,
958     std::optional<int64_t> key,
959     int64_t value,
960     std::optional<PlatformThreadId> thread_id) {
961   SamplingThread::GetInstance()->ApplyMetadataToPastSamples(
962       period_start, period_end, name_hash, key, value, thread_id);
963 }
964 
965 // static
AddProfileMetadata(uint64_t name_hash,int64_t key,int64_t value,std::optional<PlatformThreadId> thread_id)966 void StackSamplingProfiler::AddProfileMetadata(
967     uint64_t name_hash,
968     int64_t key,
969     int64_t value,
970     std::optional<PlatformThreadId> thread_id) {
971   SamplingThread::GetInstance()->AddProfileMetadata(name_hash, key, value,
972                                                     thread_id);
973 }
974 
975 }  // namespace base
976