• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/task/sequence_manager/thread_controller.h"
6 #include <atomic>
7 
8 #include "base/check.h"
9 #include "base/feature_list.h"
10 #include "base/metrics/histogram.h"
11 #include "base/metrics/histogram_base.h"
12 #include "base/notreached.h"
13 #include "base/strings/string_util.h"
14 #include "base/time/tick_clock.h"
15 #include "base/trace_event/base_tracing.h"
16 
17 namespace base {
18 namespace sequence_manager {
19 namespace internal {
20 
21 namespace {
22 // Control whether sample metadata is recorded in this class. Enabled by
23 // default to ensure never losing data.
24 BASE_FEATURE(kThreadControllerSetsProfilerMetadata,
25              "ThreadControllerSetsProfilerMetadata",
26              base::FEATURE_ENABLED_BY_DEFAULT);
27 
28 // Thread safe copy to be updated once feature list is available. This
29 // defaults to true to make sure that no metadata is lost on clients that
30 // need to record. This leads to some overeporting before feature list
31 // initialization on other clients but that's still way better than the current
32 // situation which is reporting all the time.
33 std::atomic<bool> g_thread_controller_sets_profiler_metadata{true};
34 }  // namespace
35 
ThreadController(const TickClock * time_source)36 ThreadController::ThreadController(const TickClock* time_source)
37     : associated_thread_(AssociatedThreadId::CreateUnbound()),
38       time_source_(time_source) {}
39 
40 ThreadController::~ThreadController() = default;
41 
SetTickClock(const TickClock * clock)42 void ThreadController::SetTickClock(const TickClock* clock) {
43   DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
44   time_source_ = clock;
45 }
46 
RunLevelTracker(const ThreadController & outer)47 ThreadController::RunLevelTracker::RunLevelTracker(
48     const ThreadController& outer)
49     : outer_(outer) {}
50 
~RunLevelTracker()51 ThreadController::RunLevelTracker::~RunLevelTracker() {
52   DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
53 
54   // There shouldn't be any remaining |run_levels_| by the time this unwinds.
55   DCHECK_EQ(run_levels_.size(), 0u);
56 }
57 
58 // static
InitializeFeatures()59 void ThreadController::InitializeFeatures() {
60   g_thread_controller_sets_profiler_metadata.store(
61       base::FeatureList::IsEnabled(kThreadControllerSetsProfilerMetadata),
62       std::memory_order_relaxed);
63 }
64 
ShouldRecordSampleMetadata()65 bool ThreadController::RunLevelTracker::RunLevel::ShouldRecordSampleMetadata() {
66   return g_thread_controller_sets_profiler_metadata.load(
67       std::memory_order_relaxed);
68 }
69 
EnableMessagePumpTimeKeeperMetrics(const char * thread_name)70 void ThreadController::EnableMessagePumpTimeKeeperMetrics(
71     const char* thread_name) {
72   // MessagePump runs too fast, a low-res clock would result in noisy metrics.
73   if (!base::TimeTicks::IsHighResolution())
74     return;
75 
76   run_level_tracker_.EnableTimeKeeperMetrics(thread_name);
77 }
78 
EnableTimeKeeperMetrics(const char * thread_name)79 void ThreadController::RunLevelTracker::EnableTimeKeeperMetrics(
80     const char* thread_name) {
81   time_keeper_.EnableRecording(thread_name);
82 }
83 
EnableRecording(const char * thread_name)84 void ThreadController::RunLevelTracker::TimeKeeper::EnableRecording(
85     const char* thread_name) {
86   DCHECK(!histogram_);
87   histogram_ = LinearHistogram::FactoryGet(
88       JoinString({"Scheduling.MessagePumpTimeKeeper", thread_name}, "."), 1,
89       Phase::kLastPhase, Phase::kLastPhase + 1,
90       base::HistogramBase::kUmaTargetedHistogramFlag);
91 
92 #if BUILDFLAG(ENABLE_BASE_TRACING)
93   perfetto_track_.emplace(
94       reinterpret_cast<uint64_t>(this),
95       // TODO(crbug.com/1006541): Replace with ThreadTrack::Current() after SDK
96       // migration.
97       // In the non-SDK version, ThreadTrack::Current() returns a different
98       // track id on some platforms (for example Mac OS), which results in
99       // async tracks not being associated with their thread.
100       perfetto::ThreadTrack::ForThread(base::PlatformThread::CurrentId()));
101   // TODO(1006541): Use Perfetto library to name this Track.
102   // auto desc = perfetto_track_->Serialize();
103   // desc.set_name(JoinString({"MessagePumpPhases", thread_name}, " "));
104   // perfetto::internal::TrackEventDataSource::SetTrackDescriptor(
105   //     *perfetto_track_, desc);
106 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
107 }
108 
OnRunLoopStarted(State initial_state,LazyNow & lazy_now)109 void ThreadController::RunLevelTracker::OnRunLoopStarted(State initial_state,
110                                                          LazyNow& lazy_now) {
111   DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
112 
113   const bool is_nested = !run_levels_.empty();
114   run_levels_.emplace(initial_state, is_nested, time_keeper_, lazy_now
115 #if BUILDFLAG(ENABLE_BASE_TRACING)
116                       ,
117                       terminating_wakeup_lambda_
118 #endif
119   );
120 
121   // In unit tests, RunLoop::Run() acts as the initial wake-up.
122   if (!is_nested && initial_state != kIdle)
123     time_keeper_.RecordWakeUp(lazy_now);
124 }
125 
OnRunLoopEnded()126 void ThreadController::RunLevelTracker::OnRunLoopEnded() {
127   DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
128   // Normally this will occur while kIdle or kInBetweenWorkItems but it can also
129   // occur while kRunningWorkItem in rare situations where the owning
130   // ThreadController is deleted from within a task. Ref.
131   // SequenceManagerWithTaskRunnerTest.DeleteSequenceManagerInsideATask. Thus we
132   // can't assert anything about the current state other than that it must be
133   // exiting an existing RunLevel.
134   DCHECK(!run_levels_.empty());
135   LazyNow exit_lazy_now(outer_->time_source_);
136   run_levels_.top().set_exit_lazy_now(&exit_lazy_now);
137   run_levels_.pop();
138 }
139 
OnWorkStarted(LazyNow & lazy_now)140 void ThreadController::RunLevelTracker::OnWorkStarted(LazyNow& lazy_now) {
141   DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
142   // Ignore work outside the main run loop.
143   // The only practical case where this would happen is if a native loop is spun
144   // outside the main runloop (e.g. system dialog during startup). We cannot
145   // support this because we are not guaranteed to be able to observe its exit
146   // (like we would inside an application task which is at least guaranteed to
147   // itself notify us when it ends). Some ThreadControllerWithMessagePumpTest
148   // also drive ThreadController outside a RunLoop and hit this.
149   if (run_levels_.empty())
150     return;
151 
152   // Already running a work item? => #work-in-work-implies-nested
153   if (run_levels_.top().state() == kRunningWorkItem) {
154     run_levels_.emplace(kRunningWorkItem, /*nested=*/true, time_keeper_,
155                         lazy_now
156 #if BUILDFLAG(ENABLE_BASE_TRACING)
157                         ,
158                         terminating_wakeup_lambda_
159 #endif
160     );
161   } else {
162     if (run_levels_.top().state() == kIdle) {
163       time_keeper_.RecordWakeUp(lazy_now);
164     } else {
165       time_keeper_.RecordEndOfPhase(kPumpOverhead, lazy_now);
166     }
167 
168     // Going from kIdle or kInBetweenWorkItems to kRunningWorkItem.
169     run_levels_.top().UpdateState(kRunningWorkItem);
170   }
171 }
172 
OnApplicationTaskSelected(TimeTicks queue_time,LazyNow & lazy_now)173 void ThreadController::RunLevelTracker::OnApplicationTaskSelected(
174     TimeTicks queue_time,
175     LazyNow& lazy_now) {
176   DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
177   // As-in OnWorkStarted. Early native loops can result in
178   // ThreadController::DoWork because the lack of a top-level RunLoop means
179   // `task_execution_allowed` wasn't consumed.
180   if (run_levels_.empty())
181     return;
182 
183   // OnWorkStarted() is expected to precede OnApplicationTaskSelected().
184   DCHECK_EQ(run_levels_.top().state(), kRunningWorkItem);
185 
186   time_keeper_.OnApplicationTaskSelected(queue_time, lazy_now);
187 }
188 
OnWorkEnded(LazyNow & lazy_now,int run_level_depth)189 void ThreadController::RunLevelTracker::OnWorkEnded(LazyNow& lazy_now,
190                                                     int run_level_depth) {
191   DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
192   if (run_levels_.empty())
193     return;
194 
195   // #done-work-at-lower-runlevel-implies-done-nested
196   if (run_level_depth != static_cast<int>(num_run_levels())) {
197     DCHECK_EQ(run_level_depth + 1, static_cast<int>(num_run_levels()));
198     run_levels_.top().set_exit_lazy_now(&lazy_now);
199     run_levels_.pop();
200   } else {
201     time_keeper_.RecordEndOfPhase(kWorkItem, lazy_now);
202   }
203 
204   // Whether we exited a nested run-level or not: the current run-level is now
205   // transitioning from kRunningWorkItem to kInBetweenWorkItems.
206   DCHECK_EQ(run_levels_.top().state(), kRunningWorkItem);
207   run_levels_.top().UpdateState(kInBetweenWorkItems);
208 }
209 
OnIdle(LazyNow & lazy_now)210 void ThreadController::RunLevelTracker::OnIdle(LazyNow& lazy_now) {
211   DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
212   if (run_levels_.empty())
213     return;
214 
215   DCHECK_NE(run_levels_.top().state(), kRunningWorkItem);
216   time_keeper_.RecordEndOfPhase(kIdleWork, lazy_now);
217   run_levels_.top().UpdateState(kIdle);
218 }
219 
RecordScheduleWork()220 void ThreadController::RunLevelTracker::RecordScheduleWork() {
221   // Matching TerminatingFlow is found at
222   // ThreadController::RunLevelTracker::RunLevel::UpdateState
223   if (outer_->associated_thread_->IsBoundToCurrentThread()) {
224     TRACE_EVENT_INSTANT("wakeup.flow", "ScheduleWorkToSelf");
225   } else {
226     TRACE_EVENT_INSTANT("wakeup.flow", "ScheduleWork",
227                         perfetto::Flow::FromPointer(this));
228   }
229 }
230 
231 // static
SetTraceObserverForTesting(TraceObserverForTesting * trace_observer_for_testing)232 void ThreadController::RunLevelTracker::SetTraceObserverForTesting(
233     TraceObserverForTesting* trace_observer_for_testing) {
234   DCHECK_NE(!!trace_observer_for_testing_, !!trace_observer_for_testing);
235   trace_observer_for_testing_ = trace_observer_for_testing;
236 }
237 
238 // static
239 ThreadController::RunLevelTracker::TraceObserverForTesting*
240     ThreadController::RunLevelTracker::trace_observer_for_testing_ = nullptr;
241 
RunLevel(State initial_state,bool is_nested,TimeKeeper & time_keeper,LazyNow & lazy_now,TerminatingFlowLambda & terminating_wakeup_flow_lambda)242 ThreadController::RunLevelTracker::RunLevel::RunLevel(
243     State initial_state,
244     bool is_nested,
245     TimeKeeper& time_keeper,
246     LazyNow& lazy_now
247 #if BUILDFLAG(ENABLE_BASE_TRACING)
248     ,
249     TerminatingFlowLambda& terminating_wakeup_flow_lambda
250 #endif
251     )
252     : is_nested_(is_nested),
253       time_keeper_(time_keeper),
254       thread_controller_sample_metadata_("ThreadController active",
255                                          base::SampleMetadataScope::kThread)
256 #if BUILDFLAG(ENABLE_BASE_TRACING)
257       ,
258       terminating_wakeup_flow_lambda_(terminating_wakeup_flow_lambda)
259 #endif
260 {
261   if (is_nested_) {
262     // Stop the current kWorkItem phase now, it will resume after the kNested
263     // phase ends.
264     time_keeper_->RecordEndOfPhase(kWorkItemSuspendedOnNested, lazy_now);
265   }
266   UpdateState(initial_state);
267 }
268 
~RunLevel()269 ThreadController::RunLevelTracker::RunLevel::~RunLevel() {
270   if (!was_moved_) {
271     DCHECK(exit_lazy_now_);
272     UpdateState(kIdle);
273     if (is_nested_) {
274       // Attribute the entire time in this nested RunLevel to kNested phase. If
275       // this wasn't the last nested RunLevel, this is ignored and will be
276       // applied on the final pop().
277       time_keeper_->RecordEndOfPhase(kNested, *exit_lazy_now_);
278 
279       if (ShouldRecordSampleMetadata()) {
280         // Intentionally ordered after UpdateState(kIdle), reinstantiates
281         // thread_controller_sample_metadata_ when yielding back to a parent
282         // RunLevel (which is active by definition as it is currently running
283         // this one).
284         thread_controller_sample_metadata_.Set(
285             static_cast<int64_t>(++thread_controller_active_id_));
286       }
287     }
288   }
289 }
290 
291 ThreadController::RunLevelTracker::RunLevel::RunLevel(RunLevel&& other) =
292     default;
293 
UpdateState(State new_state)294 void ThreadController::RunLevelTracker::RunLevel::UpdateState(State new_state) {
295   // The only state that can be redeclared is idle, anything else should be a
296   // transition.
297   DCHECK(state_ != new_state || new_state == kIdle)
298       << state_ << "," << new_state;
299 
300   const bool was_active = state_ != kIdle;
301   const bool is_active = new_state != kIdle;
302 
303   state_ = new_state;
304   if (was_active == is_active)
305     return;
306 
307   // Change of state.
308   if (is_active) {
309     // Flow emission is found at
310     // ThreadController::RunLevelTracker::RecordScheduleWork.
311     TRACE_EVENT_BEGIN("base", "ThreadController active",
312                       *terminating_wakeup_flow_lambda_);
313 
314     if (ShouldRecordSampleMetadata()) {
315       // Overriding the annotation from the previous RunLevel is intentional.
316       // Only the top RunLevel is ever updated, which holds the relevant state.
317       thread_controller_sample_metadata_.Set(
318           static_cast<int64_t>(++thread_controller_active_id_));
319     }
320   } else {
321     if (ShouldRecordSampleMetadata()) {
322       thread_controller_sample_metadata_.Remove();
323     }
324     TRACE_EVENT_END("base");
325     // TODO(crbug.com/1021571): Remove this once fixed.
326     PERFETTO_INTERNAL_ADD_EMPTY_EVENT();
327   }
328 
329   if (trace_observer_for_testing_) {
330     if (is_active)
331       trace_observer_for_testing_->OnThreadControllerActiveBegin();
332     else
333       trace_observer_for_testing_->OnThreadControllerActiveEnd();
334   }
335 }
336 
TimeKeeper(const RunLevelTracker & outer)337 ThreadController::RunLevelTracker::TimeKeeper::TimeKeeper(
338     const RunLevelTracker& outer)
339     : outer_(outer) {}
340 
RecordWakeUp(LazyNow & lazy_now)341 void ThreadController::RunLevelTracker::TimeKeeper::RecordWakeUp(
342     LazyNow& lazy_now) {
343   if (!ShouldRecordNow(ShouldRecordReqs::kOnWakeUp))
344     return;
345 
346   // Phase::kScheduled will be accounted against `last_wakeup_` in
347   // OnTaskSelected, if there's an application task in this work cycle.
348   last_wakeup_ = lazy_now.Now();
349   // Account the next phase starting from now.
350   last_phase_end_ = last_wakeup_;
351 
352 #if BUILDFLAG(ENABLE_BASE_TRACING)
353   // Emit the END of the kScheduled phase right away, this avoids incorrect
354   // ordering when kScheduled is later emitted and its END matches the BEGIN of
355   // an already emitted phase (tracing's sort is stable and would keep the late
356   // END for kScheduled after the earlier BEGIN of the next phase):
357   // crbug.com/1333460. As we just woke up, there are no events active at this
358   // point (we don't record MessagePumpPhases while nested). In the absence of
359   // a kScheduled phase, this unmatched END will be ignored.
360   TRACE_EVENT_END(TRACE_DISABLED_BY_DEFAULT("base"), *perfetto_track_,
361                   last_wakeup_);
362 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
363 }
364 
OnApplicationTaskSelected(TimeTicks queue_time,LazyNow & lazy_now)365 void ThreadController::RunLevelTracker::TimeKeeper::OnApplicationTaskSelected(
366     TimeTicks queue_time,
367     LazyNow& lazy_now) {
368   if (!ShouldRecordNow())
369     return;
370 
371   if (!last_wakeup_.is_null()) {
372     // `queue_time` can be null on threads that did not
373     // `SetAddQueueTimeToTasks(true)`. `queue_time` can also be ahead of
374     // `last_wakeup` in racy cases where the first chrome task is enqueued
375     // while the pump was already awake (e.g. for native work). Consider the
376     // kScheduled phase inexistent in that case.
377     if (!queue_time.is_null() && queue_time < last_wakeup_) {
378       if (!last_sleep_.is_null() && queue_time < last_sleep_) {
379         // Avoid overlapping kScheduled and kIdleWork phases when work is
380         // scheduled while going to sleep.
381         queue_time = last_sleep_;
382       }
383       RecordTimeInPhase(kScheduled, queue_time, last_wakeup_);
384 #if BUILDFLAG(ENABLE_BASE_TRACING)
385       // Match the END event which was already emitted by RecordWakeUp().
386       TRACE_EVENT_BEGIN(TRACE_DISABLED_BY_DEFAULT("base"),
387                         perfetto::StaticString(PhaseToEventName(kScheduled)),
388                         *perfetto_track_, queue_time);
389 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
390     }
391     last_wakeup_ = TimeTicks();
392   }
393   RecordEndOfPhase(kSelectingApplicationTask, lazy_now);
394   current_work_item_is_native_ = false;
395 }
396 
RecordEndOfPhase(Phase phase,LazyNow & lazy_now)397 void ThreadController::RunLevelTracker::TimeKeeper::RecordEndOfPhase(
398     Phase phase,
399     LazyNow& lazy_now) {
400   if (!ShouldRecordNow(phase == kNested ? ShouldRecordReqs::kOnEndNested
401                                         : ShouldRecordReqs::kRegular)) {
402     return;
403   }
404 
405   if (phase == kWorkItem && !current_work_item_is_native_) {
406     phase = kApplicationTask;
407     // Back to assuming future work is native until OnApplicationTaskSelected()
408     // is invoked.
409     current_work_item_is_native_ = true;
410   } else if (phase == kWorkItemSuspendedOnNested) {
411     // kWorkItemSuspendedOnNested temporarily marks the end of time allocated to
412     // the current work item. It is reported as a separate phase to skip the
413     // above `current_work_item_is_native_ = true` which assumes the work item
414     // is truly complete.
415     phase = current_work_item_is_native_ ? kNativeWork : kApplicationTask;
416   }
417 
418   const TimeTicks phase_end = lazy_now.Now();
419   RecordTimeInPhase(phase, last_phase_end_, phase_end);
420 
421 #if BUILDFLAG(ENABLE_BASE_TRACING)
422   // Ugly hack to name our `perfetto_track_`.
423   bool is_tracing_enabled = false;
424   TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("base"),
425                                      &is_tracing_enabled);
426   if (is_tracing_enabled) {
427     if (!was_tracing_enabled_) {
428       // The first event name on the track hackily names the track...
429       // TODO(1006541): Use the Perfetto library to properly name this Track in
430       // EnableRecording above.
431       TRACE_EVENT_INSTANT(TRACE_DISABLED_BY_DEFAULT("base"),
432                           "MessagePumpPhases", *perfetto_track_,
433                           last_phase_end_ - Seconds(1));
434     }
435 
436     const char* event_name = PhaseToEventName(phase);
437     TRACE_EVENT_BEGIN(TRACE_DISABLED_BY_DEFAULT("base"),
438                       perfetto::StaticString(event_name), *perfetto_track_,
439                       last_phase_end_);
440     TRACE_EVENT_END(TRACE_DISABLED_BY_DEFAULT("base"), *perfetto_track_,
441                     phase_end);
442   }
443   was_tracing_enabled_ = is_tracing_enabled;
444 #endif  // BUILDFLAG(ENABLE_BASE_TRACING)
445 
446   last_phase_end_ = phase_end;
447 }
448 
ShouldRecordNow(ShouldRecordReqs reqs)449 bool ThreadController::RunLevelTracker::TimeKeeper::ShouldRecordNow(
450     ShouldRecordReqs reqs) {
451   DCHECK_CALLED_ON_VALID_THREAD(
452       outer_->outer_->associated_thread_->thread_checker);
453   // Recording is technically enabled once `histogram_` is set, however
454   // `last_phase_end_` will be null until the next RecordWakeUp in the work
455   // cycle in which `histogram_` is enabled. Only start recording from there.
456   // Ignore any nested phases. `reqs` may indicate exceptions to this.
457   //
458   // TODO(crbug.com/1329717): In a follow-up, we could probably always be
459   // tracking the phases of the pump and merely ignore the reporting if
460   // `histogram_` isn't set.
461   switch (reqs) {
462     case ShouldRecordReqs::kRegular:
463       return histogram_ && !last_phase_end_.is_null() &&
464              outer_->run_levels_.size() == 1;
465     case ShouldRecordReqs::kOnWakeUp:
466       return histogram_ && outer_->run_levels_.size() == 1;
467     case ShouldRecordReqs::kOnEndNested:
468       return histogram_ && !last_phase_end_.is_null() &&
469              outer_->run_levels_.size() <= 2;
470   }
471 }
472 
RecordTimeInPhase(Phase phase,TimeTicks phase_begin,TimeTicks phase_end)473 void ThreadController::RunLevelTracker::TimeKeeper::RecordTimeInPhase(
474     Phase phase,
475     TimeTicks phase_begin,
476     TimeTicks phase_end) {
477   DCHECK(ShouldRecordNow(phase == kNested ? ShouldRecordReqs::kOnEndNested
478                                           : ShouldRecordReqs::kRegular));
479 
480   // Report a phase only when at least 100ms has been attributed to it.
481   static constexpr auto kReportInterval = Milliseconds(100);
482 
483   // Above 30s in a single phase, assume suspend-resume and ignore the report.
484   static constexpr auto kSkippedDelta = Seconds(30);
485 
486   const auto delta = phase_end - phase_begin;
487   DCHECK(!delta.is_negative()) << delta;
488   if (delta >= kSkippedDelta)
489     return;
490 
491   deltas_[phase] += delta;
492   if (deltas_[phase] >= kReportInterval) {
493     const int count = deltas_[phase] / Milliseconds(1);
494     histogram_->AddCount(phase, count);
495     deltas_[phase] -= Milliseconds(count);
496   }
497 
498   if (phase == kIdleWork)
499     last_sleep_ = phase_end;
500 
501   if (outer_->trace_observer_for_testing_)
502     outer_->trace_observer_for_testing_->OnPhaseRecorded(phase);
503 }
504 
505 // static
PhaseToEventName(Phase phase)506 const char* ThreadController::RunLevelTracker::TimeKeeper::PhaseToEventName(
507     Phase phase) {
508   switch (phase) {
509     case kScheduled:
510       return "Scheduled";
511     case kPumpOverhead:
512       return "PumpOverhead";
513     case kNativeWork:
514       return "NativeTask";
515     case kSelectingApplicationTask:
516       return "SelectingApplicationTask";
517     case kApplicationTask:
518       return "ApplicationTask";
519     case kIdleWork:
520       return "IdleWork";
521     case kNested:
522       return "Nested";
523     case kWorkItemSuspendedOnNested:
524       // kWorkItemSuspendedOnNested should be transformed into kNativeWork or
525       // kApplicationTask before this point.
526       NOTREACHED();
527       return "";
528   }
529 }
530 
531 }  // namespace internal
532 }  // namespace sequence_manager
533 }  // namespace base
534