1 // Copyright 2019 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/task/sequence_manager/atomic_flag_set.h"
6
7 #include <bit>
8 #include <utility>
9
10 #include "base/check_op.h"
11 #include "base/functional/callback.h"
12
13 namespace base::sequence_manager::internal {
14
AtomicFlagSet(scoped_refptr<const AssociatedThreadId> associated_thread)15 AtomicFlagSet::AtomicFlagSet(
16 scoped_refptr<const AssociatedThreadId> associated_thread)
17 : associated_thread_(std::move(associated_thread)) {}
18
~AtomicFlagSet()19 AtomicFlagSet::~AtomicFlagSet() {
20 DCHECK(!alloc_list_head_);
21 DCHECK(!partially_free_list_head_);
22 }
23
24 AtomicFlagSet::AtomicFlag::AtomicFlag() = default;
25
~AtomicFlag()26 AtomicFlagSet::AtomicFlag::~AtomicFlag() {
27 ReleaseAtomicFlag();
28 }
29
AtomicFlag(AtomicFlagSet * outer,Group * element,size_t flag_bit)30 AtomicFlagSet::AtomicFlag::AtomicFlag(AtomicFlagSet* outer,
31 Group* element,
32 size_t flag_bit)
33 : outer_(outer), group_(element), flag_bit_(flag_bit) {}
34
AtomicFlag(AtomicFlag && other)35 AtomicFlagSet::AtomicFlag::AtomicFlag(AtomicFlag&& other)
36 : outer_(other.outer_), group_(other.group_), flag_bit_(other.flag_bit_) {
37 other.outer_ = nullptr;
38 other.group_ = nullptr;
39 }
40
SetActive(bool active)41 void AtomicFlagSet::AtomicFlag::SetActive(bool active) {
42 DCHECK(group_);
43 if (active) {
44 // Release semantics are required to ensure that all memory accesses made on
45 // this thread happen-before any others done on the thread running the
46 // active callbacks.
47 group_->flags.fetch_or(flag_bit_, std::memory_order_release);
48 } else {
49 // No operation is being performed based on the bit *not* being set (i.e.
50 // state of other memory is irrelevant); hence no memory order is required
51 // when unsetting it.
52 group_->flags.fetch_and(~flag_bit_, std::memory_order_relaxed);
53 }
54 }
55
ReleaseAtomicFlag()56 void AtomicFlagSet::AtomicFlag::ReleaseAtomicFlag() {
57 if (!group_)
58 return;
59
60 DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
61 SetActive(false);
62
63 // If |group_| was full then add it on the partially free list.
64 if (group_->IsFull())
65 outer_->AddToPartiallyFreeList(group_);
66
67 size_t index = Group::IndexOfFirstFlagSet(flag_bit_);
68 DCHECK(!group_->flag_callbacks[index].is_null());
69 group_->flag_callbacks[index] = RepeatingClosure();
70 group_->allocated_flags &= ~flag_bit_;
71
72 // If |group_| has become empty delete it.
73 if (group_->IsEmpty()) {
74 auto ptr = group_.ExtractAsDangling();
75 outer_->RemoveFromPartiallyFreeList(ptr);
76 outer_->RemoveFromAllocList(ptr);
77 }
78
79 outer_ = nullptr;
80 group_ = nullptr;
81 }
82
AddFlag(RepeatingClosure callback)83 AtomicFlagSet::AtomicFlag AtomicFlagSet::AddFlag(RepeatingClosure callback) {
84 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
85 // Allocate a new Group if needed.
86 if (!partially_free_list_head_) {
87 AddToAllocList(std::make_unique<Group>());
88 AddToPartiallyFreeList(alloc_list_head_.get());
89 }
90
91 DCHECK(partially_free_list_head_);
92 Group* group = partially_free_list_head_;
93 size_t first_unoccupied_index = group->FindFirstUnallocatedFlag();
94 DCHECK(!group->flag_callbacks[first_unoccupied_index]);
95 group->flag_callbacks[first_unoccupied_index] = std::move(callback);
96
97 size_t flag_bit = size_t{1} << first_unoccupied_index;
98 group->allocated_flags |= flag_bit;
99
100 if (group->IsFull())
101 RemoveFromPartiallyFreeList(group);
102
103 return AtomicFlag(this, group, flag_bit);
104 }
105
RunActiveCallbacks() const106 void AtomicFlagSet::RunActiveCallbacks() const {
107 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
108 for (Group* iter = alloc_list_head_.get(); iter; iter = iter->next.get()) {
109 // Acquire semantics are required to guarantee that all memory side-effects
110 // made by other threads that were allowed to perform operations are
111 // synchronized with this thread before it returns from this method.
112 size_t active_flags = std::atomic_exchange_explicit(
113 &iter->flags, size_t{0}, std::memory_order_acquire);
114 // This is O(number of bits set).
115 while (active_flags) {
116 size_t index = Group::IndexOfFirstFlagSet(active_flags);
117 // Clear the flag.
118 active_flags ^= size_t{1} << index;
119 iter->flag_callbacks[index].Run();
120 }
121 }
122 }
123
124 AtomicFlagSet::Group::Group() = default;
125
~Group()126 AtomicFlagSet::Group::~Group() {
127 DCHECK_EQ(allocated_flags, 0u);
128 DCHECK(!partially_free_list_prev);
129 DCHECK(!partially_free_list_next);
130 }
131
IsFull() const132 bool AtomicFlagSet::Group::IsFull() const {
133 return (~allocated_flags) == 0u;
134 }
135
IsEmpty() const136 bool AtomicFlagSet::Group::IsEmpty() const {
137 return allocated_flags == 0u;
138 }
139
FindFirstUnallocatedFlag() const140 size_t AtomicFlagSet::Group::FindFirstUnallocatedFlag() const {
141 size_t unallocated_flags = ~allocated_flags;
142 DCHECK_NE(unallocated_flags, 0u);
143 size_t index = IndexOfFirstFlagSet(unallocated_flags);
144 DCHECK_LT(index, static_cast<size_t>(kNumFlags));
145 return index;
146 }
147
148 // static
IndexOfFirstFlagSet(size_t flag)149 size_t AtomicFlagSet::Group::IndexOfFirstFlagSet(size_t flag) {
150 DCHECK_NE(flag, 0u);
151 // std::countr_zero is non-negative.
152 return static_cast<size_t>(std::countr_zero(flag));
153 }
154
AddToAllocList(std::unique_ptr<Group> group)155 void AtomicFlagSet::AddToAllocList(std::unique_ptr<Group> group) {
156 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
157 if (alloc_list_head_)
158 alloc_list_head_->prev = group.get();
159
160 group->next = std::move(alloc_list_head_);
161 alloc_list_head_ = std::move(group);
162 }
163
RemoveFromAllocList(Group * group)164 void AtomicFlagSet::RemoveFromAllocList(Group* group) {
165 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
166 if (group->next)
167 group->next->prev = group->prev;
168
169 if (group->prev) {
170 group->prev->next = std::move(group->next);
171 } else {
172 alloc_list_head_ = std::move(group->next);
173 }
174 }
175
AddToPartiallyFreeList(Group * group)176 void AtomicFlagSet::AddToPartiallyFreeList(Group* group) {
177 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
178 DCHECK_NE(partially_free_list_head_, group);
179 DCHECK(!group->partially_free_list_prev);
180 DCHECK(!group->partially_free_list_next);
181 if (partially_free_list_head_)
182 partially_free_list_head_->partially_free_list_prev = group;
183
184 group->partially_free_list_next = partially_free_list_head_;
185 partially_free_list_head_ = group;
186 }
187
RemoveFromPartiallyFreeList(Group * group)188 void AtomicFlagSet::RemoveFromPartiallyFreeList(Group* group) {
189 DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
190 DCHECK(partially_free_list_head_);
191 // Check |group| is in the list.
192 DCHECK(partially_free_list_head_ == group || group->partially_free_list_prev);
193 if (group->partially_free_list_next) {
194 group->partially_free_list_next->partially_free_list_prev =
195 group->partially_free_list_prev;
196 }
197
198 if (group->partially_free_list_prev) {
199 group->partially_free_list_prev->partially_free_list_next =
200 group->partially_free_list_next;
201 } else {
202 partially_free_list_head_ = group->partially_free_list_next;
203 }
204
205 group->partially_free_list_prev = nullptr;
206 group->partially_free_list_next = nullptr;
207 }
208
209 } // namespace base::sequence_manager::internal
210