• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2025 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef COMMON_COMPONENTS_MUTATOR_SATB_BUFFER_H
17 #define COMMON_COMPONENTS_MUTATOR_SATB_BUFFER_H
18 
19 #include "common_components/common/page_pool.h"
20 #include "common_components/common/mark_work_stack.h"
21 #include "common_components/log/log.h"
22 
23 namespace common {
24 // snapshot at the beginning buffer
25 // mainly used to buffer modified field of mutator write
26 class SatbBuffer {
27 public:
28     static constexpr size_t INITIAL_PAGES = 64;    // 64 pages of initial satb buffer
29     static constexpr size_t CACHE_LINE_ALIGN = 64; // for most hardware platfrom, the cache line is 64-byte aigned.
30     static SatbBuffer& Instance() noexcept;
31     class TreapNode {
32         friend class SatbBuffer;
33 
34     public:
TreapNode()35         TreapNode() : top_(container_), next_(nullptr) {}
36         ~TreapNode() = default;
IsEmpty()37         bool IsEmpty() const { return reinterpret_cast<size_t>(top_) == reinterpret_cast<size_t>(container_); }
IsFull()38         bool IsFull() const
39         {
40             static_assert((sizeof(TreapNode) % sizeof(BaseObject**)) == 0, "Satb node must be aligned");
41             return top_ == &container_[CONTAINER_CAPACITY_];
42         }
Clear()43         void Clear()
44         {
45             size_t size = reinterpret_cast<uintptr_t>(top_) - reinterpret_cast<uintptr_t>(container_);
46             LOGF_CHECK((memset_s(container_, sizeof(container_), 0, size) == EOK)) << "memset fail\n";
47             top_ = container_;
48         }
Push(const BaseObject * obj)49         void Push(const BaseObject* obj)
50         {
51             std::lock_guard<std::mutex> lg(syncLock_);
52             *top_ = const_cast<BaseObject*>(obj);
53             top_++;
54         }
55         template<typename T>
GetObjects(T & stack)56         void GetObjects(T& stack)
57         {
58             ASSERT_LOGF(top_ <= &container_[CONTAINER_CAPACITY_], "invalid node");
59             std::lock_guard<std::mutex> lg(syncLock_);
60             BaseObject** head = container_;
61             while (head != top_) {
62                 stack.push_back(*head);
63                 head++;
64             }
65             Clear();
66         }
67 
68     private:
69 #if defined(_WIN64)
70         static constexpr size_t CONTAINER_CAPACITY_ = 69;
71 #elif defined(__aarch64__)
72         static constexpr size_t CONTAINER_CAPACITY_ = 64;
73 #else
74         static constexpr size_t CONTAINER_CAPACITY_ = 65;
75 #endif
76         std::mutex syncLock_;
77         BaseObject** top_;
78         TreapNode* next_;
79         BaseObject* container_[CONTAINER_CAPACITY_] = { nullptr };
80     };
81 
82     static constexpr size_t NODE_SIZE_ = sizeof(TreapNode);
83     struct Page {
PagePage84         Page(Page* n, size_t bytes) : next_(n), length_(bytes) {}
85         Page* next_ = nullptr;
86         size_t length_ = 0;
87     };
88 
89     // there is no need to use LL/SC to avoid ABA problem, becasue Nodes are all unique.
90     template<typename T>
91     class LockFreeList {
92         friend class SatbBuffer;
93 
94     public:
LockFreeList()95         LockFreeList() : head_(nullptr) {}
96         ~LockFreeList() = default;
97 
Reset()98         void Reset() { head_ = nullptr; }
99 
Push(T * n)100         void Push(T* n)
101         {
102             T* old = head_.load(std::memory_order_relaxed);
103             do {
104                 n->next = old;
105             } while (!head_.compare_exchange_weak(old, n, std::memory_order_release, std::memory_order_relaxed));
106         }
107 
Pop()108         T* Pop()
109         {
110             T* old = head_.load(std::memory_order_relaxed);
111             do {
112                 if (old == nullptr) {
113                     return nullptr;
114                 }
115             } while (!head_.compare_exchange_weak(old, old->next, std::memory_order_release,
116                                                   std::memory_order_relaxed));
117             old->next = nullptr;
118             return old;
119         }
120 
PopAll()121         T* PopAll()
122         {
123             T* old = head_.load(std::memory_order_relaxed);
124             while (!head_.compare_exchange_weak(old, nullptr, std::memory_order_release, std::memory_order_relaxed)) {
125             };
126             return old;
127         }
128 
129     private:
130         std::atomic<T*> head_;
131     };
132 
133     template<typename T>
134     class LockedList {
135         friend class SatbBuffer;
136 
137     public:
LockedList()138         LockedList() : head_(nullptr) {}
139         ~LockedList() = default;
140 
Reset()141         void Reset()
142         {
143             std::lock_guard<std::mutex> lg(safeLock_);
144             head_ = nullptr;
145         }
146 
Push(T * n)147         void Push(T* n)
148         {
149             std::lock_guard<std::mutex> lg(safeLock_);
150             n->next_ = head_;
151             head_ = n;
152         }
153 
Pop()154         T* Pop()
155         {
156             std::lock_guard<std::mutex> lg(safeLock_);
157             if (head_ == nullptr) {
158                 return nullptr;
159             }
160             T* old = head_;
161             T* next = head_->next_;
162             head_ = next;
163             return old;
164         }
165 
PopAll()166         T* PopAll()
167         {
168             std::lock_guard<std::mutex> lg(safeLock_);
169             T* old = head_;
170             head_ = nullptr;
171             return old;
172         }
173 
174     private:
175         std::mutex safeLock_;
176         T* head_;
177     };
178 
EnsureGoodNode(TreapNode * & node)179     void EnsureGoodNode(TreapNode*& node)
180     {
181         if (node == nullptr) {
182             node = freeNodes_.Pop();
183         } else if (node->IsFull()) {
184             // means current node is full
185             retiredNodes_.Push(node);
186             node = freeNodes_.Pop();
187         } else {
188             // not null & have slots
189             return;
190         }
191         if (node == nullptr) {
192             // there is no free nodes in the freeNodes list
193             Page* page = GetPages(COMMON_PAGE_SIZE);
194             TreapNode* list = ConstructFreeNodeList(page, COMMON_PAGE_SIZE);
195             if (list == nullptr) {
196                 return;
197             }
198             node = list;
199             TreapNode* cur = list->next_;
200             node->next_ = nullptr;
201             LOGF_CHECK(node->IsEmpty()) << "Get an unempty node from new page";
202             while (cur != nullptr) {
203                 TreapNode* next = cur->next_;
204                 freeNodes_.Push(cur);
205                 cur = next;
206             }
207         } else {
208             LOGF_CHECK(node->IsEmpty()) << "get an unempty node from free nodes";
209         }
210     }
211     bool ShouldEnqueue(const BaseObject* obj);
212 
213     // must not have thread racing
Init()214     void Init()
215     {
216         TreapNode* head = retiredNodes_.PopAll();
217         while (head != nullptr) {
218             TreapNode* oldHead = head;
219             head = head->next_;
220             oldHead->Clear();
221             freeNodes_.Push(oldHead);
222         }
223 
224         if (freeNodes_.head_ == nullptr) {
225             size_t initalBytes = INITIAL_PAGES * COMMON_PAGE_SIZE;
226             Page* page = GetPages(initalBytes);
227             TreapNode* list = ConstructFreeNodeList(page, initalBytes);
228             freeNodes_.head_ = list;
229         }
230     }
231 
RetireNode(TreapNode * node)232     void RetireNode(TreapNode* node) noexcept { retiredNodes_.Push(node); }
233 
Fini()234     void Fini() { ReclaimALLPages(); }
235 
236     template<typename T>
GetRetiredObjects(T & stack)237     void GetRetiredObjects(T& stack)
238     {
239         TreapNode* head = retiredNodes_.PopAll();
240         while (head != nullptr) {
241             head->GetObjects(stack);
242             TreapNode* oldHead = head;
243             head = head->next_;
244             oldHead->Clear();
245             freeNodes_.Push(oldHead);
246         }
247     }
248 
249     template<typename T>
TryFetchOneRetiredNode(T & stack)250     void TryFetchOneRetiredNode(T& stack)
251     {
252         TreapNode* node = retiredNodes_.Pop();
253         if (!node) {
254             return;
255         }
256         node->GetObjects(stack);
257         node->Clear();
258         freeNodes_.Push(node);
259     }
260 
ClearBuffer()261     void ClearBuffer()
262     {
263         TreapNode* head = retiredNodes_.PopAll();
264         while (head != nullptr) {
265             TreapNode* oldHead = head;
266             head = head->next_;
267             oldHead->Clear();
268             freeNodes_.Push(oldHead);
269         }
270     }
271 
272     // it can be invoked only if no mutator points to any node.
ReclaimALLPages()273     void ReclaimALLPages()
274     {
275         freeNodes_.Reset();
276         retiredNodes_.Reset();
277         Page* list = arena_.PopAll();
278         if (list == nullptr) {
279             return;
280         }
281         while (list != nullptr) {
282             Page* next = list->next_;
283             PagePool::Instance().ReturnPage(reinterpret_cast<uint8_t*>(list), list->length_);
284             list = next;
285         }
286     }
287 
288 private:
GetPages(size_t bytes)289     Page* GetPages(size_t bytes)
290     {
291         Page* page = new (PagePool::Instance().GetPage(bytes)) Page(nullptr, bytes);
292         page->next_ = nullptr;
293         arena_.Push(page);
294         return page;
295     }
296 
ConstructFreeNodeList(const Page * page,size_t bytes)297     TreapNode* ConstructFreeNodeList(const Page* page, size_t bytes) const
298     {
299         HeapAddress start = reinterpret_cast<HeapAddress>(page) + RoundUp(sizeof(Page), CACHE_LINE_ALIGN);
300         HeapAddress end = reinterpret_cast<HeapAddress>(page) + bytes;
301         TreapNode* cur = nullptr;
302         TreapNode* head = nullptr;
303         while (start <= (end - NODE_SIZE_)) {
304             TreapNode* node = new (reinterpret_cast<void*>(start)) TreapNode();
305             if (cur == nullptr) {
306                 cur = node;
307                 head = node;
308             } else {
309                 cur->next_ = node;
310                 cur = node;
311             }
312             start += NODE_SIZE_;
313         }
314         return head;
315     }
316 
317     LockedList<Page> arena_;        // arena of allocatable area, first area is 64 * 4k = 256k, the rest is 4k
318     // free nodes, mutator will acquire nodes from this list to record old value writes
319     LockedList<TreapNode> freeNodes_;
320     LockedList<TreapNode> retiredNodes_; // has been filled by mutator, ready for scan
321 };
322 } // namespace common
323 
324 #endif // COMMON_COMPONENTS_MUTATOR_SATB_BUFFER_H
325