• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TASK_THREAD_POOL_THREAD_GROUP_H_
6 #define BASE_TASK_THREAD_POOL_THREAD_GROUP_H_
7 
8 #include <memory>
9 #include <optional>
10 #include <string>
11 #include <string_view>
12 #include <vector>
13 
14 #include "base/base_export.h"
15 #include "base/compiler_specific.h"
16 #include "base/dcheck_is_on.h"
17 #include "base/memory/raw_ptr.h"
18 #include "base/memory/raw_ptr_exclusion.h"
19 #include "base/memory/stack_allocated.h"
20 #include "base/task/common/checked_lock.h"
21 #include "base/task/thread_pool/priority_queue.h"
22 #include "base/task/thread_pool/task.h"
23 #include "base/task/thread_pool/task_source.h"
24 #include "base/task/thread_pool/tracked_ref.h"
25 #include "base/task/thread_pool/worker_thread.h"
26 #include "build/build_config.h"
27 #include "third_party/abseil-cpp/absl/container/inlined_vector.h"
28 
29 #if BUILDFLAG(IS_WIN)
30 #include "base/win/scoped_windows_thread_environment.h"
31 #endif
32 
33 namespace base {
34 
35 class WorkerThreadObserver;
36 
37 namespace internal {
38 
39 class TaskTracker;
40 
41 // Interface and base implementation for a thread group. A thread group is a
42 // subset of the threads in the thread pool (see GetThreadGroupForTraits() for
43 // thread group selection logic when posting tasks and creating task runners).
44 //
45 // This class is thread-safe.
46 class BASE_EXPORT ThreadGroup {
47  public:
48   // Delegate interface for ThreadGroup.
49   class BASE_EXPORT Delegate {
50    public:
51     virtual ~Delegate() = default;
52 
53     // Invoked when a TaskSource with |traits| is non-empty after the
54     // ThreadGroup has run a task from it. The implementation must return the
55     // thread group in which the TaskSource should be reenqueued.
56     virtual ThreadGroup* GetThreadGroupForTraits(const TaskTraits& traits) = 0;
57   };
58 
59   enum class WorkerEnvironment {
60     // No special worker environment required.
61     NONE,
62 #if BUILDFLAG(IS_WIN)
63     // Initialize a COM MTA on the worker.
64     COM_MTA,
65 #endif  // BUILDFLAG(IS_WIN)
66   };
67 
68   ThreadGroup(const ThreadGroup&) = delete;
69   ThreadGroup& operator=(const ThreadGroup&) = delete;
70   virtual ~ThreadGroup();
71 
72   // Creates threads, allowing existing and future tasks to run. The thread
73   // group runs at most `max_tasks` / `max_best_effort_tasks` unblocked task
74   // with any / BEST_EFFORT priority concurrently. It reclaims unused threads
75   // after `suggested_reclaim_time`. It uses `service_thread_task_runner` to
76   // monitor for blocked tasks, `service_thread_task_runner` is used to setup
77   // FileDescriptorWatcher on worker threads. It must refer to a Thread with
78   // MessagePumpType::IO. If specified, it notifies `worker_thread_observer`
79   // when a worker enters and exits its main function (the observer must not be
80   // destroyed before JoinForTesting() has returned). `worker_environment`
81   // specifies the environment in which tasks are executed.
82   // `may_block_threshold` is the timeout after which a task in a MAY_BLOCK
83   // ScopedBlockingCall is considered blocked (the thread group will choose an
84   // appropriate value if none is specified).
85   // `synchronous_thread_start_for_testing` is true if this ThreadGroup should
86   // synchronously wait for OnMainEntry() after starting each worker. Can only
87   // be called once. CHECKs on failure.
88   virtual void Start(
89       size_t max_tasks,
90       size_t max_best_effort_tasks,
91       TimeDelta suggested_reclaim_time,
92       scoped_refptr<SingleThreadTaskRunner> service_thread_task_runner,
93       WorkerThreadObserver* worker_thread_observer,
94       WorkerEnvironment worker_environment,
95       bool synchronous_thread_start_for_testing,
96       std::optional<TimeDelta> may_block_threshold) = 0;
97 
98   // Registers the thread group in TLS.
99   void BindToCurrentThread();
100 
101   // Resets the thread group in TLS.
102   void UnbindFromCurrentThread();
103 
104   // Returns true if the thread group is registered in TLS.
105   bool IsBoundToCurrentThread() const;
106 
107   // Sets a new maximum number of concurrent tasks, subject to adjustments for
108   // blocking tasks.
109   void SetMaxTasks(size_t max_tasks);
110 
111   // Resets the maximum number of concurrent tasks to the default provided
112   // in constructor, subject to adjustments for blocking tasks.
113   void ResetMaxTasks();
114 
115   // Removes |task_source| from |priority_queue_|. Returns a
116   // RegisteredTaskSource that evaluats to true if successful, or false if
117   // |task_source| is not currently in |priority_queue_|, such as when a worker
118   // is running a task from it.
119   RegisteredTaskSource RemoveTaskSource(const TaskSource& task_source);
120 
121   // Updates the position of the TaskSource in |transaction| in this
122   // ThreadGroup's PriorityQueue based on the TaskSource's current traits.
123   //
124   // Implementations should instantiate a concrete ScopedCommandsExecutor and
125   // invoke UpdateSortKeyImpl().
126   virtual void UpdateSortKey(TaskSource::Transaction transaction) = 0;
127 
128   // Pushes the TaskSource in |transaction_with_task_source| into this
129   // ThreadGroup's PriorityQueue and wakes up workers as appropriate.
130   //
131   // Implementations should instantiate a concrete ScopedCommandsExecutor and
132   // invoke PushTaskSourceAndWakeUpWorkersImpl().
133   virtual void PushTaskSourceAndWakeUpWorkers(
134       RegisteredTaskSourceAndTransaction transaction_with_task_source) = 0;
135 
136   // Move all task sources from this ThreadGroup's PriorityQueue to the
137   // |destination_thread_group|'s.
138   void HandoffAllTaskSourcesToOtherThreadGroup(
139       ThreadGroup* destination_thread_group);
140   // Move all task sources except the ones with TaskPriority::USER_BLOCKING,
141   // from this ThreadGroup's PriorityQueue to the |destination_thread_group|'s.
142   void HandoffNonUserBlockingTaskSourcesToOtherThreadGroup(
143       ThreadGroup* destination_thread_group);
144 
145   // Returns true if a task with |sort_key| running in this thread group should
146   // return ASAP, either because its priority is not allowed to run or because
147   // work of higher priority is pending. Thread-safe but may return an outdated
148   // result (if a task unnecessarily yields due to this, it will simply be
149   // re-scheduled).
150   bool ShouldYield(TaskSourceSortKey sort_key);
151 
152   // Prevents new tasks from starting to run and waits for currently running
153   // tasks to complete their execution. It is guaranteed that no thread will do
154   // work on behalf of this ThreadGroup after this returns. It is
155   // invalid to post a task once this is called. TaskTracker::Flush() can be
156   // called before this to complete existing tasks, which might otherwise post a
157   // task during JoinForTesting(). This can only be called once.
158   virtual void JoinForTesting() = 0;
159 
160   // Returns the maximum number of non-blocked tasks that can run concurrently
161   // in this ThreadGroup.
162   //
163   // TODO(fdoray): Remove this method. https://crbug.com/687264
164   virtual size_t GetMaxConcurrentNonBlockedTasksDeprecated() const;
165 
166   // Wakes up workers as appropriate for the new CanRunPolicy policy. Must be
167   // called after an update to CanRunPolicy in TaskTracker.
168   virtual void DidUpdateCanRunPolicy() = 0;
169 
170   virtual void OnShutdownStarted() = 0;
171 
172   // Returns true if a thread group is registered in TLS. Used by diagnostic
173   // code to check whether it's inside a ThreadPool task.
174   static bool CurrentThreadHasGroup();
175 
176   // Returns |max_tasks_|/|max_best_effort_tasks_|.
177   size_t GetMaxTasksForTesting() const;
178   size_t GetMaxBestEffortTasksForTesting() const;
179 
180   // Waits until at least |n| workers are idle. Note that while workers are
181   // disallowed from cleaning up during this call: tests using a custom
182   // |suggested_reclaim_time_| need to be careful to invoke this swiftly after
183   // unblocking the waited upon workers as: if a worker is already detached by
184   // the time this is invoked, it will never make it onto the idle set and
185   // this call will hang.
186   void WaitForWorkersIdleForTesting(size_t n);
187 
188   // Waits until at least |n| workers are idle.
189   void WaitForWorkersIdleLockRequiredForTesting(size_t n)
190       EXCLUSIVE_LOCKS_REQUIRED(lock_);
191 
192   // Waits until all workers are idle.
193   void WaitForAllWorkersIdleForTesting();
194 
195   // Waits until |n| workers have cleaned up (went through
196   // WorkerThread::Delegate::OnMainExit()) since the last call to
197   // WaitForWorkersCleanedUpForTesting() (or Start() if that wasn't called yet).
198   void WaitForWorkersCleanedUpForTesting(size_t n);
199 
200   // Returns the number of workers in this thread group.
201   size_t NumberOfWorkersForTesting() const;
202   // Returns the number of workers that are idle (i.e. not running tasks).
203   size_t NumberOfIdleWorkersForTesting() const;
204   // Returns the number of workers that are idle (i.e. not running tasks).
205   virtual size_t NumberOfIdleWorkersLockRequiredForTesting() const
206       EXCLUSIVE_LOCKS_REQUIRED(lock_) = 0;
207 
208   class ThreadGroupWorkerDelegate;
209 
210  protected:
211   ThreadGroup(std::string_view histogram_label,
212               std::string_view thread_group_label,
213               ThreadType thread_type_hint,
214               TrackedRef<TaskTracker> task_tracker,
215               TrackedRef<Delegate> delegate);
216 
217   void StartImpl(
218       size_t max_tasks,
219       size_t max_best_effort_tasks,
220       TimeDelta suggested_reclaim_time,
221       scoped_refptr<SingleThreadTaskRunner> service_thread_task_runner,
222       WorkerThreadObserver* worker_thread_observer,
223       WorkerEnvironment worker_environment,
224       bool synchronous_thread_start_for_testing = false,
225       std::optional<TimeDelta> may_block_threshold =
226           std::optional<TimeDelta>());
227 
228   // Derived classes must implement a ScopedCommandsExecutor that derives from
229   // this to perform operations at the end of a scope, when all locks have been
230   // released.
231   class BaseScopedCommandsExecutor {
232    public:
233     BaseScopedCommandsExecutor(const BaseScopedCommandsExecutor&) = delete;
234     BaseScopedCommandsExecutor& operator=(const BaseScopedCommandsExecutor&) =
235         delete;
236     virtual ~BaseScopedCommandsExecutor();
237 
238     void ScheduleStart(scoped_refptr<WorkerThread> worker);
239     void ScheduleAdjustMaxTasks();
240     void ScheduleReleaseTaskSource(RegisteredTaskSource task_source);
241 
242    protected:
243     explicit BaseScopedCommandsExecutor(ThreadGroup* outer);
244 
245     // RAW_PTR_EXCLUSION: Performance: visible in sampling profiler and stack
246     // scoped, also a back-pointer to the owning object.
247     RAW_PTR_EXCLUSION ThreadGroup* outer_ = nullptr;
248 
249    protected:
250     // Performs BaseScopedCommandsExecutor-related tasks, must be called in this
251     // class' destructor.
252     void Flush();
253 
254     std::vector<RegisteredTaskSource> task_sources_to_release_;
255     absl::InlinedVector<scoped_refptr<WorkerThread>, 2> workers_to_start_;
256     bool must_schedule_adjust_max_tasks_ = false;
257   };
258 
259   // Allows a task source to be pushed to a ThreadGroup's PriorityQueue at the
260   // end of a scope, when all locks have been released.
261   class ScopedReenqueueExecutor {
262     STACK_ALLOCATED();
263 
264    public:
265     ScopedReenqueueExecutor();
266     ScopedReenqueueExecutor(const ScopedReenqueueExecutor&) = delete;
267     ScopedReenqueueExecutor& operator=(const ScopedReenqueueExecutor&) = delete;
268     ~ScopedReenqueueExecutor();
269 
270     // A RegisteredTaskSourceAndTransaction and the ThreadGroup in which it
271     // should be enqueued.
272     void SchedulePushTaskSourceAndWakeUpWorkers(
273         RegisteredTaskSourceAndTransaction transaction_with_task_source,
274         ThreadGroup* destination_thread_group);
275 
276    private:
277     // A RegisteredTaskSourceAndTransaction and the thread group in which it
278     // should be enqueued.
279     std::optional<RegisteredTaskSourceAndTransaction>
280         transaction_with_task_source_;
281     ThreadGroup* destination_thread_group_ = nullptr;
282   };
283 
284   ThreadGroup(TrackedRef<TaskTracker> task_tracker,
285               TrackedRef<Delegate> delegate);
286 
287 #if BUILDFLAG(IS_WIN)
288   static std::unique_ptr<win::ScopedWindowsThreadEnvironment>
289   GetScopedWindowsThreadEnvironment(WorkerEnvironment environment);
290 #endif
291 
292   const TrackedRef<TaskTracker> task_tracker_;
293   const TrackedRef<Delegate> delegate_;
294 
295   // Returns the number of workers required of workers to run all queued
296   // BEST_EFFORT task sources allowed to run by the current CanRunPolicy.
297   size_t GetNumAdditionalWorkersForBestEffortTaskSourcesLockRequired() const
298       EXCLUSIVE_LOCKS_REQUIRED(lock_);
299 
300   // Returns the number of workers required to run all queued
301   // USER_VISIBLE/USER_BLOCKING task sources allowed to run by the current
302   // CanRunPolicy.
303   size_t GetNumAdditionalWorkersForForegroundTaskSourcesLockRequired() const
304       EXCLUSIVE_LOCKS_REQUIRED(lock_);
305 
306   // Ensures that there are enough workers to run queued task sources.
307   // |executor| is forwarded from the one received in
308   // PushTaskSourceAndWakeUpWorkersImpl()
309   virtual void EnsureEnoughWorkersLockRequired(
310       BaseScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_) = 0;
311 
312   // Reenqueues a |transaction_with_task_source| from which a Task just ran in
313   // the current ThreadGroup into the appropriate ThreadGroup.
314   void ReEnqueueTaskSourceLockRequired(
315       BaseScopedCommandsExecutor* workers_executor,
316       ScopedReenqueueExecutor* reenqueue_executor,
317       RegisteredTaskSourceAndTransaction transaction_with_task_source)
318       EXCLUSIVE_LOCKS_REQUIRED(lock_);
319 
320   // Returns the next task source from |priority_queue_| if permitted to run and
321   // pops |priority_queue_| if the task source returned no longer needs to be
322   // queued (reached its maximum concurrency). Otherwise returns nullptr and
323   // pops |priority_queue_| so this can be called again.
324   RegisteredTaskSource TakeRegisteredTaskSource(
325       BaseScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_);
326 
327   // Must be invoked by implementations of the corresponding non-Impl() methods.
328   void UpdateSortKeyImpl(BaseScopedCommandsExecutor* executor,
329                          TaskSource::Transaction transaction);
330   void PushTaskSourceAndWakeUpWorkersImpl(
331       BaseScopedCommandsExecutor* executor,
332       RegisteredTaskSourceAndTransaction transaction_with_task_source);
333 
334   // Returns the desired number of awake workers, given current workload and
335   // concurrency limits.
336   size_t GetDesiredNumAwakeWorkersLockRequired() const
337       EXCLUSIVE_LOCKS_REQUIRED(lock_);
338 
339   // Enqueues all task sources from `new_priority_queue` into this thread group.
340   void EnqueueAllTaskSources(PriorityQueue* new_priority_queue);
341 
342   // Returns the threshold after which the max tasks is increased to compensate
343   // for a worker that is within a MAY_BLOCK ScopedBlockingCall.
may_block_threshold_for_testing()344   TimeDelta may_block_threshold_for_testing() const {
345     return after_start().may_block_threshold;
346   }
347 
348   // Interval at which the service thread checks for workers in this thread
349   // group that have been in a MAY_BLOCK ScopedBlockingCall for more than
350   // may_block_threshold().
blocked_workers_poll_period_for_testing()351   TimeDelta blocked_workers_poll_period_for_testing() const {
352     return after_start().blocked_workers_poll_period;
353   }
354 
355   // Schedules AdjustMaxTasks() if required.
356   void MaybeScheduleAdjustMaxTasksLockRequired(
357       BaseScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_);
358 
359   // Starts calling AdjustMaxTasks() periodically on
360   // |service_thread_task_runner_|.
361   virtual void ScheduleAdjustMaxTasks() = 0;
362 
363   // Examines the list of WorkerThreads and increments |max_tasks_| for each
364   // worker that has been within the scope of a MAY_BLOCK ScopedBlockingCall for
365   // more than BlockedThreshold(). Reschedules a call if necessary.
366   virtual void AdjustMaxTasks() = 0;
367 
368   // Returns true if AdjustMaxTasks() should periodically be called on
369   // |service_thread_task_runner_|.
370   bool ShouldPeriodicallyAdjustMaxTasksLockRequired()
371       EXCLUSIVE_LOCKS_REQUIRED(lock_);
372 
373   // Updates the minimum priority allowed to run below which tasks should yield.
374   // This should be called whenever |num_running_tasks_| or |max_tasks| changes,
375   // or when a new task is added to |priority_queue_|.
376   void UpdateMinAllowedPriorityLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
377 
378   // Increments/decrements the number of tasks of |priority| that are currently
379   // running in this thread group. Must be invoked before/after running a task.
380   void DecrementTasksRunningLockRequired(TaskPriority priority)
381       EXCLUSIVE_LOCKS_REQUIRED(lock_);
382   void IncrementTasksRunningLockRequired(TaskPriority priority)
383       EXCLUSIVE_LOCKS_REQUIRED(lock_);
384 
385   // Increments/decrements the number of [best effort] tasks that can run in
386   // this thread group.
387   void DecrementMaxTasksLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
388   void IncrementMaxTasksLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
389   void DecrementMaxBestEffortTasksLockRequired()
390       EXCLUSIVE_LOCKS_REQUIRED(lock_);
391   void IncrementMaxBestEffortTasksLockRequired()
392       EXCLUSIVE_LOCKS_REQUIRED(lock_);
393 
394   // Values set at Start() and never modified afterwards.
395   struct InitializedInStart {
396     InitializedInStart();
397     ~InitializedInStart();
398 
399 #if DCHECK_IS_ON()
400     // Set after all members of this struct are set.
401     bool initialized = false;
402 #endif
403 
404     // Initial value of |max_tasks_|.
405     size_t initial_max_tasks = 0;
406 
407     // Suggested reclaim time for workers.
408     TimeDelta suggested_reclaim_time;
409 
410     // Environment to be initialized per worker.
411     WorkerEnvironment worker_environment = WorkerEnvironment::NONE;
412 
413     scoped_refptr<SingleThreadTaskRunner> service_thread_task_runner;
414 
415     // Optional observer notified when a worker enters and exits its main.
416     raw_ptr<WorkerThreadObserver> worker_thread_observer = nullptr;
417 
418     // Threshold after which the max tasks is increased to compensate for a
419     // worker that is within a MAY_BLOCK ScopedBlockingCall.
420     TimeDelta may_block_threshold;
421 
422     // The period between calls to AdjustMaxTasks() when the thread group is at
423     // capacity.
424     TimeDelta blocked_workers_poll_period;
425   } initialized_in_start_;
426 
in_start()427   InitializedInStart& in_start() LIFETIME_BOUND {
428 #if DCHECK_IS_ON()
429     DCHECK(!initialized_in_start_.initialized);
430 #endif
431     return initialized_in_start_;
432   }
after_start()433   const InitializedInStart& after_start() const LIFETIME_BOUND {
434 #if DCHECK_IS_ON()
435     DCHECK(initialized_in_start_.initialized);
436 #endif
437     return initialized_in_start_;
438   }
439 
440   // Synchronizes accesses to all members of this class which are neither const,
441   // atomic, nor immutable after start. Since this lock is a bottleneck to post
442   // and schedule work, only simple data structure manipulations are allowed
443   // within its scope (no thread creation or wake up).
444   mutable CheckedLock lock_{};
445 
GUARDED_BY(lock_)446   bool disable_fair_scheduling_ GUARDED_BY(lock_){false};
447 
448   // PriorityQueue from which all threads of this ThreadGroup get work.
449   PriorityQueue priority_queue_ GUARDED_BY(lock_);
450 
451   struct YieldSortKey {
452     TaskPriority priority;
453     uint8_t worker_count;
454   };
455   // Sort key which compares greater than or equal to any other sort key.
456   static constexpr YieldSortKey kMaxYieldSortKey = {TaskPriority::BEST_EFFORT,
457                                                     0U};
458 
459   // When the thread group is at or above capacity and has pending work, this is
460   // set to contain the priority and worker count of the next TaskSource to
461   // schedule, or kMaxYieldSortKey otherwise. This is used to decide whether a
462   // TaskSource should yield. Once ShouldYield() returns true, it is reset to
463   // kMaxYieldSortKey to prevent additional from unnecessary yielding. This is
464   // expected to be always kept up-to-date by derived classes when |lock_| is
465   // released. It is annotated as GUARDED_BY(lock_) because it is always updated
466   // under the lock (to avoid races with other state during the update) but it
467   // is nonetheless always safe to read it without the lock (since it's atomic).
GUARDED_BY(lock_)468   std::atomic<YieldSortKey> max_allowed_sort_key_ GUARDED_BY(lock_){
469       kMaxYieldSortKey};
470 
471   const std::string histogram_label_;
472   const std::string thread_group_label_;
473   const ThreadType thread_type_hint_;
474 
475   // All workers owned by this thread group.
476   size_t worker_sequence_num_ GUARDED_BY(lock_) = 0;
477 
478   bool shutdown_started_ GUARDED_BY(lock_) = false;
479 
480   // Maximum number of tasks of any priority / BEST_EFFORT priority that can run
481   // concurrently in this thread group currently, excluding adjustment for
482   // blocking tasks.
483   size_t baseline_max_tasks_ GUARDED_BY(lock_) = 0;
484   // Same as `baseline_max_tasks_`, but including adjustment for blocking tasks.
485   size_t max_tasks_ GUARDED_BY(lock_) = 0;
486   size_t max_best_effort_tasks_ GUARDED_BY(lock_) = 0;
487 
488   // Number of tasks of any priority / BEST_EFFORT priority that are currently
489   // running in this thread group.
490   size_t num_running_tasks_ GUARDED_BY(lock_) = 0;
491   size_t num_running_best_effort_tasks_ GUARDED_BY(lock_) = 0;
492 
493   // Number of workers running a task of any priority / BEST_EFFORT priority
494   // that are within the scope of a MAY_BLOCK ScopedBlockingCall but haven't
495   // caused a max tasks increase yet.
496   int num_unresolved_may_block_ GUARDED_BY(lock_) = 0;
497   int num_unresolved_best_effort_may_block_ GUARDED_BY(lock_) = 0;
498 
499   // Signaled when a worker is added to the idle workers set.
500   ConditionVariable idle_workers_set_cv_for_testing_ GUARDED_BY(lock_);
501 
502   // Whether an AdjustMaxTasks() task was posted to the service thread.
503   bool adjust_max_tasks_posted_ GUARDED_BY(lock_) = false;
504 
505   // Indicates to the delegates that workers are not permitted to cleanup.
506   bool worker_cleanup_disallowed_for_testing_ GUARDED_BY(lock_) = false;
507 
508   // Counts the number of workers cleaned up (went through
509   // WorkerThreadDelegateImpl::OnMainExit()) since the last call to
510   // WaitForWorkersCleanedUpForTesting() (or Start() if that wasn't called yet).
511   // |some_workers_cleaned_up_for_testing_| is true if this was ever
512   // incremented. Tests with a custom |suggested_reclaim_time_| can wait on a
513   // specific number of workers being cleaned up via
514   // WaitForWorkersCleanedUpForTesting().
515   size_t num_workers_cleaned_up_for_testing_ GUARDED_BY(lock_) = 0;
516 #if DCHECK_IS_ON()
517   bool some_workers_cleaned_up_for_testing_ GUARDED_BY(lock_) = false;
518 #endif
519 
520   // Signaled, if non-null, when |num_workers_cleaned_up_for_testing_| is
521   // incremented.
522   std::optional<ConditionVariable> num_workers_cleaned_up_for_testing_cv_
523       GUARDED_BY(lock_);
524 
525   // All workers owned by this thread group.
526   std::vector<scoped_refptr<WorkerThread>> workers_ GUARDED_BY(lock_);
527 
528   // Null-opt unless |synchronous_thread_start_for_testing| was true at
529   // construction. In that case, it's signaled each time
530   // WorkerThreadDelegateImpl::OnMainEntry() completes.
531   std::optional<WaitableEvent> worker_started_for_testing_;
532 
533   // Set at the start of JoinForTesting().
534   bool join_for_testing_started_ GUARDED_BY(lock_) = false;
535 };
536 
537 }  // namespace internal
538 }  // namespace base
539 
540 #endif  // BASE_TASK_THREAD_POOL_THREAD_GROUP_H_
541