• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
6 #define BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
7 
8 #include <array>
9 #include <atomic>
10 #include <memory>
11 
12 #include "base/base_export.h"
13 #include "base/functional/callback.h"
14 #include "base/memory/raw_ptr.h"
15 #include "base/task/sequence_manager/associated_thread_id.h"
16 
17 namespace base::sequence_manager::internal {
18 
19 // This class maintains a set of AtomicFlags which can be activated or
20 // deactivated at any time by any thread. When a flag is created a callback is
21 // specified and the RunActiveCallbacks method can be invoked to fire callbacks
22 // for all active flags. Creating releasing or destroying an AtomicFlag must be
23 // done on the associated thread, as must calling RunActiveCallbacks. This
24 // class is thread-affine.
25 class BASE_EXPORT AtomicFlagSet {
26  protected:
27   struct Group;
28 
29  public:
30   explicit AtomicFlagSet(
31       scoped_refptr<const AssociatedThreadId> associated_thread);
32   AtomicFlagSet(const AtomicFlagSet&) = delete;
33   AtomicFlagSet& operator=(const AtomicFlagSet&) = delete;
34   // AtomicFlags need to be released (or deleted) before this can be deleted.
35   ~AtomicFlagSet();
36 
37   // This class is thread-affine in addition SetActive can be called
38   // concurrently from any thread.
39   class BASE_EXPORT AtomicFlag {
40    public:
41     AtomicFlag();
42 
43     // Automatically releases the AtomicFlag.
44     ~AtomicFlag();
45 
46     AtomicFlag(const AtomicFlag&) = delete;
47     AtomicFlag(AtomicFlag&& other);
48 
49     // Can be called on any thread. Marks whether the flag is active or not,
50     // which controls whether RunActiveCallbacks() will fire the associated
51     // callback or not. In the absence of external synchronization, the value
52     // set by this call might not immediately be visible to a thread calling
53     // RunActiveCallbacks(); the only guarantee is that a value set by this will
54     // eventually be visible to other threads due to cache coherency. Release /
55     // acquire semantics are used on the underlying atomic operations so if
56     // RunActiveCallbacks sees the value set by a call to SetActive(), it will
57     // also see the memory changes that happened prior to that SetActive() call.
58     void SetActive(bool active);
59 
60     // Releases the flag. Must be called on the associated thread. SetActive
61     // can't be called after this.
62     void ReleaseAtomicFlag();
63 
64    private:
65     friend AtomicFlagSet;
66 
67     AtomicFlag(AtomicFlagSet* outer, Group* element, size_t flag_bit);
68 
69     raw_ptr<AtomicFlagSet, DanglingUntriaged> outer_ = nullptr;
70     raw_ptr<Group> group_ = nullptr;  // Null when AtomicFlag is invalid.
71     size_t flag_bit_ = 0;  // This is 1 << index of this flag within the group.
72   };
73 
74   // Adds a new flag to the set. The |callback| will be fired by
75   // RunActiveCallbacks if the flag is active. Must be called on the associated
76   // thread.
77   AtomicFlag AddFlag(RepeatingClosure callback);
78 
79   // Runs the registered callback for all flags marked as active and atomically
80   // resets all flags to inactive. Must be called on the associated thread.
81   void RunActiveCallbacks() const;
82 
83  protected:
GetAllocListForTesting()84   Group* GetAllocListForTesting() const { return alloc_list_head_.get(); }
85 
GetPartiallyFreeListForTesting()86   Group* GetPartiallyFreeListForTesting() const {
87     return partially_free_list_head_;
88   }
89 
90   // Wraps a single std::atomic<size_t> which is shared by a number of
91   // AtomicFlag's with one bit per flag.
92   struct BASE_EXPORT Group {
93     Group();
94     Group(const Group&) = delete;
95     Group& operator=(const Group&) = delete;
96     ~Group();
97 
98     static constexpr int kNumFlags = sizeof(size_t) * 8;
99 
100     std::atomic<size_t> flags = {0};
101     size_t allocated_flags = 0;
102     std::array<RepeatingClosure, kNumFlags> flag_callbacks;
103     raw_ptr<Group> prev = nullptr;
104     std::unique_ptr<Group> next;
105     raw_ptr<Group> partially_free_list_prev = nullptr;
106     raw_ptr<Group> partially_free_list_next = nullptr;
107 
108     bool IsFull() const;
109 
110     bool IsEmpty() const;
111 
112     // Returns the index of the first unallocated flag. Must not be called when
113     // all flags are set.
114     size_t FindFirstUnallocatedFlag() const;
115 
116     // Computes the index of the |flag_callbacks| based on the number of leading
117     // zero bits in |flag|.
118     static size_t IndexOfFirstFlagSet(size_t flag);
119   };
120 
121  private:
122   void AddToAllocList(std::unique_ptr<Group> element);
123 
124   // This deletes |element|.
125   void RemoveFromAllocList(Group* element);
126 
127   void AddToPartiallyFreeList(Group* element);
128 
129   // This does not delete |element|.
130   void RemoveFromPartiallyFreeList(Group* element);
131 
132   const scoped_refptr<const AssociatedThreadId> associated_thread_;
133   std::unique_ptr<Group> alloc_list_head_;
134   raw_ptr<Group> partially_free_list_head_ = nullptr;
135 };
136 
137 }  // namespace base::sequence_manager::internal
138 
139 #endif  // BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
140