• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "runtime/include/managed_thread.h"
17 #include "runtime/mem/gc/gc_barrier_set.h"
18 #include "libpandabase/mem/gc_barrier.h"
19 #include "libpandabase/mem/mem.h"
20 #include "runtime/include/object_header.h"
21 #include "runtime/include/panda_vm.h"
22 #include "runtime/mem/rem_set.h"
23 #include "runtime/mem/gc/heap-space-misc/crossing_map.h"
24 #include <atomic>
25 
26 namespace panda::mem {
27 
28 GCBarrierSet::~GCBarrierSet() = default;
29 
CheckPostBarrier(CardTable * cardTable,const void * objAddr,bool checkCardTable=true)30 bool CheckPostBarrier(CardTable *cardTable, const void *objAddr, bool checkCardTable = true)
31 {
32     if constexpr (PANDA_CROSSING_MAP_MANAGE_CROSSED_BORDER) {
33         return true;
34     }
35 
36     // check that obj_addr must be object header
37     ASSERT(IsAddressInObjectsHeap(objAddr));
38     [[maybe_unused]] auto *object = reinterpret_cast<const ObjectHeader *>(objAddr);
39     ASSERT(IsAddressInObjectsHeap(object->ClassAddr<BaseClass>()));
40 
41     // we need to check that card related by object header must be young/marked/processed.
42     // doesn't for G1, because card_table is processed concurrently, so it can be cleared before we enter here
43     bool res = true;
44     if (checkCardTable) {
45         res = !cardTable->GetCardPtr(ToUintPtr(objAddr))->IsClear();
46     }
47     return res;
48 }
49 
PreSATBBarrier(ObjectHeader * preVal)50 static void PreSATBBarrier(ObjectHeader *preVal)
51 {
52     if (preVal != nullptr) {
53         LOG(DEBUG, GC) << "GC PreSATBBarrier pre val -> new val:" << preVal;
54         auto preBuff = static_cast<PandaVector<ObjectHeader *> *>(ManagedThread::GetCurrent()->GetPreBuff());
55         ASSERT(preBuff != nullptr);
56         ValidateObject(RootType::SATB_BUFFER, preVal);
57         preBuff->push_back(preVal);
58     }
59 }
60 
PostIntergenerationalBarrier(const void * minAddr,uint8_t * cardTableAddr,uint8_t cardBits,uint8_t dirtyCardValue,const void * objFieldAddr)61 void PostIntergenerationalBarrier(const void *minAddr, uint8_t *cardTableAddr, uint8_t cardBits, uint8_t dirtyCardValue,
62                                   const void *objFieldAddr)
63 {
64     size_t cardIndex = (ToUintPtr(objFieldAddr) - ToUintPtr(minAddr)) >> cardBits;
65     auto *cardAddr = static_cast<std::atomic_uint8_t *>(ToVoidPtr(ToUintPtr(cardTableAddr) + cardIndex));
66     // Atomic with relaxed order reason: data race with card_addr with no synchronization or ordering constraints
67     // imposed on other reads or writes
68     cardAddr->store(dirtyCardValue, std::memory_order_relaxed);
69 }
70 
GetBarrierOperand(BarrierPosition barrierPosition,std::string_view name)71 BarrierOperand GCBarrierSet::GetBarrierOperand(BarrierPosition barrierPosition, std::string_view name)
72 {
73     if (barrierPosition == BarrierPosition::BARRIER_POSITION_PRE) {
74         if (UNLIKELY(preOperands_.find(name.data()) == preOperands_.end())) {
75             LOG(FATAL, GC) << "Operand " << name << " not found for pre barrier";
76         }
77         return preOperands_.at(name.data());
78     }
79     if (UNLIKELY(postOperands_.find(name.data()) == postOperands_.end())) {
80         LOG(FATAL, GC) << "Operand " << name << " not found for post barrier";
81     }
82     return postOperands_.at(name.data());
83 }
84 
GetPostBarrierOperand(std::string_view name)85 BarrierOperand GCBarrierSet::GetPostBarrierOperand(std::string_view name)
86 {
87     return GetBarrierOperand(BarrierPosition::BARRIER_POSITION_POST, name);
88 }
89 
PreBarrier(void * preValAddr)90 void GCGenBarrierSet::PreBarrier([[maybe_unused]] void *preValAddr) {}
91 
PostBarrier(const void * objAddr,size_t offset,void * storedValAddr)92 void GCGenBarrierSet::PostBarrier(const void *objAddr, [[maybe_unused]] size_t offset,
93                                   [[maybe_unused]] void *storedValAddr)
94 {
95     LOG(DEBUG, GC) << "GC PostBarrier: write to " << std::hex << objAddr << " value " << storedValAddr;
96     PostIntergenerationalBarrier(minAddr_, cardTableAddr_, cardBits_, dirtyCardValue_, objAddr);
97     ASSERT(CheckPostBarrier(cardTable_, objAddr));
98 }
99 
PostBarrier(const void * objAddr,size_t offset,size_t count)100 void GCGenBarrierSet::PostBarrier(const void *objAddr, [[maybe_unused]] size_t offset, [[maybe_unused]] size_t count)
101 {
102     // NOTE: We can improve an implementation here
103     // because now we consider every field as an object reference field.
104     // Maybe, it will be better to check it, but there can be possible performance degradation.
105     PostIntergenerationalBarrier(minAddr_, cardTableAddr_, cardBits_, dirtyCardValue_, objAddr);
106     ASSERT(CheckPostBarrier(cardTable_, objAddr));
107 }
108 
IsPreBarrierEnabled()109 bool GCG1BarrierSet::IsPreBarrierEnabled()
110 {
111     // No data race because G1GC sets this flag on pause
112     return Thread::GetCurrent()->GetPreWrbEntrypoint() != nullptr;
113 }
114 
PreBarrier(void * preValAddr)115 void GCG1BarrierSet::PreBarrier(void *preValAddr)
116 {
117     LOG_IF(preValAddr != nullptr, DEBUG, GC) << "GC PreBarrier: with pre-value " << preValAddr;
118     ASSERT(Thread::GetCurrent()->GetPreWrbEntrypoint() != nullptr);
119 
120     PreSATBBarrier(reinterpret_cast<ObjectHeader *>(preValAddr));
121 }
122 
PostBarrier(const void * objAddr,size_t offset,void * storedValAddr)123 void GCG1BarrierSet::PostBarrier(const void *objAddr, size_t offset, void *storedValAddr)
124 {
125     LOG_IF(storedValAddr != nullptr, DEBUG, GC)
126         << "GC PostBarrier: write to " << std::hex << objAddr << " value " << storedValAddr;
127     if (storedValAddr != nullptr) {
128         // If it is cross-region reference
129         auto *card = cardTable_->GetCardPtr(ToUintPtr(objAddr) + offset);
130         if (!card->IsYoung() && !card->IsMarked() &&
131             !panda::mem::IsSameRegion(objAddr, storedValAddr, regionSizeBitsCount_)) {
132             LOG(DEBUG, GC) << "GC Interregion barrier write to " << objAddr << " value " << storedValAddr;
133             card->Mark();
134             Enqueue(card);
135         }
136     }
137     ASSERT(storedValAddr == nullptr || panda::mem::IsSameRegion(objAddr, storedValAddr, regionSizeBitsCount_) ||
138            CheckPostBarrier(cardTable_, objAddr, false));
139 }
140 
PostBarrier(const void * objAddr,size_t offset,size_t count)141 void GCG1BarrierSet::PostBarrier(const void *objAddr, size_t offset, size_t count)
142 {
143     // Force post inter-region barrier
144     auto firstAddr = ToUintPtr(objAddr) + offset;
145     auto lastAddr = firstAddr + count - 1;
146     Invalidate(firstAddr, lastAddr);
147     ASSERT(CheckPostBarrier(cardTable_, objAddr, false));
148     // NOTE: We can improve an implementation here
149     // because now we consider every field as an object reference field.
150     // Maybe, it will be better to check it, but there can be possible performance degradation.
151 }
152 
Invalidate(uintptr_t begin,uintptr_t last)153 void GCG1BarrierSet::Invalidate(uintptr_t begin, uintptr_t last)
154 {
155     LOG(DEBUG, GC) << "GC Interregion barrier write for memory range from  " << ToVoidPtr(begin) << " to "
156                    << ToVoidPtr(last);
157     auto *beginCard = cardTable_->GetCardPtr(begin);
158     auto *lastCard = cardTable_->GetCardPtr(last);
159     // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
160     for (auto *card = beginCard; card <= lastCard; ++card) {
161         if (!card->IsYoung() && !card->IsMarked()) {
162             card->Mark();
163             Enqueue(card);
164         }
165     }
166 }
167 
Enqueue(CardTable::CardPtr card)168 void GCG1BarrierSet::Enqueue(CardTable::CardPtr card)
169 {
170     auto *thread = ManagedThread::GetCurrent();
171     if (thread == nullptr) {  // slow path via shared-queue for VM threads: gc/compiler/etc
172         os::memory::LockHolder lock(*queueLock_);
173         updatedRefsQueue_->push_back(card);
174     } else {
175         // general fast-path for mutators
176         ASSERT(thread->GetPreBuff() != nullptr);  // write barrier cant be called after Terminate
177         auto *buffer = thread->GetG1PostBarrierBuffer();
178         ASSERT(buffer != nullptr);
179         // try to push it twice
180         for (size_t i = 0; i < 2U; i++) {
181             bool success = buffer->TryPush(card);
182             if (success) {
183                 return;
184             }
185         }
186         // After 2 unsuccessfull pushing, we see that current buffer still full
187         // so, reuse shared buffer
188         os::memory::LockHolder lock(*queueLock_);
189         updatedRefsQueue_->push_back(card);
190     }
191 }
192 }  // namespace panda::mem
193