1 /**
2 * Copyright (c) 2021-2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "runtime/arch/memory_helpers.h"
17 #include "runtime/include/managed_thread.h"
18 #include "runtime/mem/gc/gc_barrier_set.h"
19 #include "libpandabase/mem/gc_barrier.h"
20 #include "libpandabase/mem/mem.h"
21 #include "runtime/include/object_header.h"
22 #include "runtime/include/panda_vm.h"
23 #include "runtime/mem/rem_set.h"
24 #include "runtime/mem/gc/heap-space-misc/crossing_map.h"
25 #include <atomic>
26
27 namespace ark::mem {
28
29 GCBarrierSet::~GCBarrierSet() = default;
30
CheckPostBarrier(CardTable * cardTable,const void * objAddr,bool checkCardTable=true)31 bool CheckPostBarrier(CardTable *cardTable, const void *objAddr, bool checkCardTable = true)
32 {
33 if constexpr (PANDA_CROSSING_MAP_MANAGE_CROSSED_BORDER) {
34 return true;
35 }
36
37 // check that obj_addr must be object header
38 ASSERT(IsAddressInObjectsHeap(objAddr));
39 [[maybe_unused]] auto *object = reinterpret_cast<const ObjectHeader *>(objAddr);
40 ASSERT(IsAddressInObjectsHeap(object->ClassAddr<BaseClass>()));
41
42 // we need to check that card related by object header must be young/marked/processed.
43 // doesn't for G1, because card_table is processed concurrently, so it can be cleared before we enter here
44 bool res = true;
45 if (checkCardTable) {
46 res = !cardTable->IsClear(ToUintPtr(objAddr));
47 }
48 return res;
49 }
50
PreSATBBarrier(ObjectHeader * preVal)51 static void PreSATBBarrier(ObjectHeader *preVal)
52 {
53 if (preVal != nullptr) {
54 LOG(DEBUG, GC) << "GC PreSATBBarrier pre val -> new val:" << preVal;
55 auto preBuff = static_cast<PandaVector<ObjectHeader *> *>(ManagedThread::GetCurrent()->GetPreBuff());
56 ASSERT(preBuff != nullptr);
57 ValidateObject(RootType::SATB_BUFFER, preVal);
58 preBuff->push_back(preVal);
59 }
60 }
61
PostIntergenerationalBarrier(const void * minAddr,uint8_t * cardTableAddr,uint8_t cardBits,uint8_t dirtyCardValue,const void * objFieldAddr)62 void PostIntergenerationalBarrier(const void *minAddr, uint8_t *cardTableAddr, uint8_t cardBits, uint8_t dirtyCardValue,
63 const void *objFieldAddr)
64 {
65 size_t cardIndex = (ToUintPtr(objFieldAddr) - ToUintPtr(minAddr)) >> cardBits;
66 auto *cardAddr = static_cast<std::atomic_uint8_t *>(ToVoidPtr(ToUintPtr(cardTableAddr) + cardIndex));
67 // Atomic with relaxed order reason: data race with card_addr with no synchronization or ordering constraints
68 // imposed on other reads or writes
69 cardAddr->store(dirtyCardValue, std::memory_order_relaxed);
70 }
71
GetBarrierOperand(BarrierPosition barrierPosition,std::string_view name)72 BarrierOperand GCBarrierSet::GetBarrierOperand(BarrierPosition barrierPosition, std::string_view name)
73 {
74 if (barrierPosition == BarrierPosition::BARRIER_POSITION_PRE) {
75 if (UNLIKELY(preOperands_.find(name.data()) == preOperands_.end())) {
76 LOG(FATAL, GC) << "Operand " << name << " not found for pre barrier";
77 }
78 return preOperands_.at(name.data());
79 }
80 if (UNLIKELY(postOperands_.find(name.data()) == postOperands_.end())) {
81 LOG(FATAL, GC) << "Operand " << name << " not found for post barrier";
82 }
83 return postOperands_.at(name.data());
84 }
85
GetPostBarrierOperand(std::string_view name)86 BarrierOperand GCBarrierSet::GetPostBarrierOperand(std::string_view name)
87 {
88 return GetBarrierOperand(BarrierPosition::BARRIER_POSITION_POST, name);
89 }
90
PreBarrier(void * preValAddr)91 void GCGenBarrierSet::PreBarrier([[maybe_unused]] void *preValAddr) {}
92
PostBarrier(const void * objAddr,size_t offset,void * storedValAddr)93 void GCGenBarrierSet::PostBarrier(const void *objAddr, [[maybe_unused]] size_t offset,
94 [[maybe_unused]] void *storedValAddr)
95 {
96 LOG(DEBUG, GC) << "GC PostBarrier: write to " << std::hex << objAddr << " value " << storedValAddr;
97 PostIntergenerationalBarrier(minAddr_, cardTableAddr_, cardBits_, dirtyCardValue_, objAddr);
98 ASSERT(CheckPostBarrier(cardTable_, objAddr));
99 }
100
PostBarrier(const void * objAddr,size_t offset,size_t count)101 void GCGenBarrierSet::PostBarrier(const void *objAddr, [[maybe_unused]] size_t offset, [[maybe_unused]] size_t count)
102 {
103 // NOTE: We can improve an implementation here
104 // because now we consider every field as an object reference field.
105 // Maybe, it will be better to check it, but there can be possible performance degradation.
106 PostIntergenerationalBarrier(minAddr_, cardTableAddr_, cardBits_, dirtyCardValue_, objAddr);
107 ASSERT(CheckPostBarrier(cardTable_, objAddr));
108 }
109
IsPreBarrierEnabled()110 bool GCG1BarrierSet::IsPreBarrierEnabled()
111 {
112 // No data race because G1GC sets this flag on pause
113 return Thread::GetCurrent()->GetPreWrbEntrypoint() != nullptr;
114 }
115
PreBarrier(void * preValAddr)116 void GCG1BarrierSet::PreBarrier(void *preValAddr)
117 {
118 LOG_IF(preValAddr != nullptr, DEBUG, GC) << "GC PreBarrier: with pre-value " << preValAddr;
119 ASSERT(Thread::GetCurrent()->GetPreWrbEntrypoint() != nullptr);
120
121 PreSATBBarrier(reinterpret_cast<ObjectHeader *>(preValAddr));
122 }
123
PostBarrier(const void * objAddr,size_t offset,void * storedValAddr)124 void GCG1BarrierSet::PostBarrier(const void *objAddr, size_t offset, void *storedValAddr)
125 {
126 if (storedValAddr == nullptr) {
127 return;
128 }
129
130 LOG(DEBUG, GC) << "GC PostBarrier: write to " << std::hex << objAddr << " value " << storedValAddr;
131
132 if (ark::mem::IsSameRegion(objAddr, storedValAddr, regionSizeBitsCount_)) {
133 return;
134 }
135
136 auto *card = cardTable_->GetCardPtr(ToUintPtr(objAddr) + offset);
137 if (card->IsYoung()) {
138 return;
139 }
140
141 // StoreLoad barrier is required to guarantee order of previous reference store and card load
142 arch::StoreLoadBarrier();
143
144 auto cardValue = card->GetCard();
145 auto cardStatus = CardTable::Card::GetStatus(cardValue);
146 if (!CardTable::Card::IsMarked(cardStatus)) {
147 LOG(DEBUG, GC) << "GC Interregion barrier write to " << objAddr << " value " << storedValAddr;
148 card->Mark();
149 if (!CardTable::Card::IsHot(cardValue)) {
150 Enqueue(card);
151 }
152 }
153 ASSERT(CheckPostBarrier(cardTable_, objAddr, false));
154 }
155
PostBarrier(const void * objAddr,size_t offset,size_t count)156 void GCG1BarrierSet::PostBarrier(const void *objAddr, size_t offset, size_t count)
157 {
158 // Force post inter-region barrier
159 auto firstAddr = ToUintPtr(objAddr) + offset;
160 auto *beginCard = cardTable_->GetCardPtr(firstAddr);
161 auto *lastCard = cardTable_->GetCardPtr(firstAddr + count - 1);
162 if (beginCard->IsYoung()) {
163 // Check one card only because cards from beginCard to lastCard belong to the same region
164 return;
165 }
166 // StoreLoad barrier is required to guarantee order of previous reference store and card load
167 arch::StoreLoadBarrier();
168 Invalidate(beginCard, lastCard);
169 ASSERT(CheckPostBarrier(cardTable_, objAddr, false));
170 // NOTE: We can improve an implementation here
171 // because now we consider every field as an object reference field.
172 // Maybe, it will be better to check it, but there can be possible performance degradation.
173 }
174
Invalidate(CardTable::CardPtr begin,CardTable::CardPtr last)175 void GCG1BarrierSet::Invalidate(CardTable::CardPtr begin, CardTable::CardPtr last)
176 {
177 LOG(DEBUG, GC) << "GC Interregion barrier write for memory range from " << cardTable_->GetCardStartAddress(begin)
178 << " to " << cardTable_->GetCardEndAddress(last);
179 // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
180 for (auto *card = begin; card <= last; ++card) {
181 auto cardValue = card->GetCard();
182 auto cardStatus = CardTable::Card::GetStatus(cardValue);
183 ASSERT(!CardTable::Card::IsYoung(cardStatus));
184 if (!CardTable::Card::IsMarked(cardStatus)) {
185 card->Mark();
186 if (!CardTable::Card::IsHot(cardValue)) {
187 Enqueue(card);
188 }
189 }
190 }
191 }
192
Enqueue(CardTable::CardPtr card)193 void GCG1BarrierSet::Enqueue(CardTable::CardPtr card)
194 {
195 auto *thread = ManagedThread::GetCurrent();
196 if (thread == nullptr) { // slow path via shared-queue for VM threads: gc/compiler/etc
197 os::memory::LockHolder lock(*queueLock_);
198 updatedRefsQueue_->push_back(card);
199 } else {
200 // general fast-path for mutators
201 ASSERT(thread->GetPreBuff() != nullptr); // write barrier cant be called after Terminate
202 auto *buffer = thread->GetG1PostBarrierBuffer();
203 ASSERT(buffer != nullptr);
204 // try to push it twice
205 for (size_t i = 0; i < 2U; i++) {
206 bool success = buffer->TryPush(card);
207 if (success) {
208 return;
209 }
210 }
211 // After 2 unsuccessfull pushing, we see that current buffer still full
212 // so, reuse shared buffer
213 os::memory::LockHolder lock(*queueLock_);
214 updatedRefsQueue_->push_back(card);
215 }
216 }
217 } // namespace ark::mem
218