• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_RACEFUL_WORKLIST_H_
6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_RACEFUL_WORKLIST_H_
7 
8 #include <algorithm>
9 #include <atomic>
10 #include <vector>
11 
12 #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
13 #include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
14 #include "base/allocator/partition_allocator/partition_alloc_check.h"
15 #include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
16 
17 namespace partition_alloc::internal {
18 
19 template <typename T>
20 class RacefulWorklist {
21   struct Node {
NodeNode22     explicit Node(const T& value) : value(value) {}
NodeNode23     Node(const Node& other)
24         : value(other.value),
25           is_being_visited(
26               other.is_being_visited.load(std::memory_order_relaxed)),
27           is_visited(other.is_visited.load(std::memory_order_relaxed)) {}
28 
29     T value;
30     std::atomic<bool> is_being_visited{false};
31     std::atomic<bool> is_visited{false};
32   };
33   using Underlying = std::vector<Node, MetadataAllocator<Node>>;
34 
35  public:
36   class RandomizedView {
37    public:
RandomizedView(RacefulWorklist & worklist)38     explicit RandomizedView(RacefulWorklist& worklist)
39         : worklist_(worklist), offset_(0) {
40       if (worklist.data_.size() > 0)
41         offset_ = static_cast<size_t>(
42             internal::base::RandGenerator(worklist.data_.size()));
43     }
44 
45     RandomizedView(const RandomizedView&) = delete;
46     const RandomizedView& operator=(const RandomizedView&) = delete;
47 
48     template <typename Function>
49     void Visit(Function f);
50 
51    private:
52     RacefulWorklist& worklist_;
53     size_t offset_;
54   };
55 
56   RacefulWorklist() = default;
57 
58   RacefulWorklist(const RacefulWorklist&) = delete;
59   RacefulWorklist& operator=(const RacefulWorklist&) = delete;
60 
Push(const T & t)61   void Push(const T& t) { data_.push_back(Node(t)); }
62 
63   template <typename It>
Push(It begin,It end)64   void Push(It begin, It end) {
65     std::transform(begin, end, std::back_inserter(data_),
66                    [](const T& t) { return Node(t); });
67   }
68 
69   template <typename Function>
70   void VisitNonConcurrently(Function) const;
71 
72  private:
73   Underlying data_;
74   std::atomic<bool> fully_visited_{false};
75 };
76 
77 template <typename T>
78 template <typename Function>
VisitNonConcurrently(Function f)79 void RacefulWorklist<T>::VisitNonConcurrently(Function f) const {
80   for (const auto& t : data_)
81     f(t.value);
82 }
83 
84 template <typename T>
85 template <typename Function>
Visit(Function f)86 void RacefulWorklist<T>::RandomizedView::Visit(Function f) {
87   auto& data = worklist_.data_;
88   std::vector<typename Underlying::iterator,
89               MetadataAllocator<typename Underlying::iterator>>
90       to_revisit;
91 
92   // To avoid worklist iteration, quick check if the worklist was already
93   // visited.
94   if (worklist_.fully_visited_.load(std::memory_order_acquire))
95     return;
96 
97   const auto offset_it = std::next(data.begin(), offset_);
98 
99   // First, visit items starting from the offset.
100   for (auto it = offset_it; it != data.end(); ++it) {
101     if (it->is_visited.load(std::memory_order_relaxed))
102       continue;
103     if (it->is_being_visited.load(std::memory_order_relaxed)) {
104       to_revisit.push_back(it);
105       continue;
106     }
107     it->is_being_visited.store(true, std::memory_order_relaxed);
108     f(it->value);
109     it->is_visited.store(true, std::memory_order_relaxed);
110   }
111 
112   // Then, visit items before the offset.
113   for (auto it = data.begin(); it != offset_it; ++it) {
114     if (it->is_visited.load(std::memory_order_relaxed))
115       continue;
116     if (it->is_being_visited.load(std::memory_order_relaxed)) {
117       to_revisit.push_back(it);
118       continue;
119     }
120     it->is_being_visited.store(true, std::memory_order_relaxed);
121     f(it->value);
122     it->is_visited.store(true, std::memory_order_relaxed);
123   }
124 
125   // Finally, racefully visit items that were scanned by some other thread.
126   for (auto it : to_revisit) {
127     if (PA_LIKELY(it->is_visited.load(std::memory_order_relaxed)))
128       continue;
129     // Don't bail out here if the item is being visited by another thread.
130     // This is helpful to guarantee forward progress if the other thread
131     // is making slow progress.
132     it->is_being_visited.store(true, std::memory_order_relaxed);
133     f(it->value);
134     it->is_visited.store(true, std::memory_order_relaxed);
135   }
136 
137   worklist_.fully_visited_.store(true, std::memory_order_release);
138 }
139 
140 }  // namespace partition_alloc::internal
141 
142 #endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_RACEFUL_WORKLIST_H_
143